code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""Philips Hue sensors platform tests."""
import asyncio
from collections import deque
import datetime
import logging
from unittest.mock import Mock
import aiohue
from aiohue.sensors import Sensors
import pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.components.hue import sensor_base as hue_sensor_base
_LOGGER = logging.getLogger(__name__)
PRESENCE_SENSOR_1_PRESENT = {
"state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Living room sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_1 = {
"state": {
"lightlevel": 1,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 1",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_1 = {
"state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 1",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:77-02-0402",
"capabilities": {"certified": True},
}
PRESENCE_SENSOR_2_NOT_PRESENT = {
"state": {"presence": False, "lastupdated": "2019-01-01T00:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Kitchen sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_2 = {
"state": {
"lightlevel": 10001,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 2",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_2 = {
"state": {"temperature": 1875, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 2",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:88-02-0402",
"capabilities": {"certified": True},
}
PRESENCE_SENSOR_3_PRESENT = {
"state": {"presence": True, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"sensitivity": 2,
"sensitivitymax": 2,
"pending": [],
},
"name": "Bedroom sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0406",
"capabilities": {"certified": True},
}
LIGHT_LEVEL_SENSOR_3 = {
"state": {
"lightlevel": 1,
"dark": True,
"daylight": True,
"lastupdated": "2019-01-01T01:00:00",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T00:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"tholddark": 12467,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 3",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0400",
"capabilities": {"certified": True},
}
TEMPERATURE_SENSOR_3 = {
"state": {"temperature": 1775, "lastupdated": "2019-01-01T01:00:00"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-01-01T01:00:00"},
"config": {
"on": True,
"battery": 100,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 3",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:11:22:33:44:55:66:99-02-0402",
"capabilities": {"certified": True},
}
UNSUPPORTED_SENSOR = {
"state": {"status": 0, "lastupdated": "2019-01-01T01:00:00"},
"config": {"on": True, "reachable": True},
"name": "Unsupported sensor",
"type": "CLIPGenericStatus",
"modelid": "PHWA01",
"manufacturername": "Philips",
"swversion": "1.0",
"uniqueid": "arbitrary",
"recycle": True,
}
SENSOR_RESPONSE = {
"1": PRESENCE_SENSOR_1_PRESENT,
"2": LIGHT_LEVEL_SENSOR_1,
"3": TEMPERATURE_SENSOR_1,
"4": PRESENCE_SENSOR_2_NOT_PRESENT,
"5": LIGHT_LEVEL_SENSOR_2,
"6": TEMPERATURE_SENSOR_2,
}
def create_mock_bridge():
"""Create a mock Hue bridge."""
bridge = Mock(
available=True,
allow_unreachable=False,
allow_groups=False,
api=Mock(),
spec=hue.HueBridge,
)
bridge.mock_requests = []
# We're using a deque so we can schedule multiple responses
# and also means that `popleft()` will blow up if we get more updates
# than expected.
bridge.mock_sensor_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
bridge.mock_requests.append(kwargs)
if path == "sensors":
return bridge.mock_sensor_responses.popleft()
return None
bridge.api.config.apiversion = "9.9.9"
bridge.api.sensors = Sensors({}, mock_request)
return bridge
@pytest.fixture
def mock_bridge(hass):
"""Mock a Hue bridge."""
return create_mock_bridge()
@pytest.fixture
def increase_scan_interval(hass):
"""Increase the SCAN_INTERVAL to prevent unexpected scans during tests."""
hue_sensor_base.SensorManager.SCAN_INTERVAL = datetime.timedelta(days=365)
async def setup_bridge(hass, mock_bridge, hostname=None):
"""Load the Hue platform with the provided bridge."""
if hostname is None:
hostname = "mock-host"
hass.config.components.add(hue.DOMAIN)
hass.data[hue.DOMAIN] = {hostname: mock_bridge}
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": hostname},
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
)
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
await hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
# and make sure it completes before going further
await hass.async_block_till_done()
async def test_no_sensors(hass, mock_bridge):
"""Test the update_items function when no sensors are found."""
mock_bridge.allow_groups = True
mock_bridge.mock_sensor_responses.append({})
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 0
async def test_sensors_with_multiple_bridges(hass, mock_bridge):
"""Test the update_items function with some sensors."""
mock_bridge_2 = create_mock_bridge()
mock_bridge_2.mock_sensor_responses.append(
{
"1": PRESENCE_SENSOR_3_PRESENT,
"2": LIGHT_LEVEL_SENSOR_3,
"3": TEMPERATURE_SENSOR_3,
}
)
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
await setup_bridge(hass, mock_bridge_2, hostname="mock-bridge-2")
assert len(mock_bridge.mock_requests) == 1
assert len(mock_bridge_2.mock_requests) == 1
# 3 "physical" sensors with 3 virtual sensors each
assert len(hass.states.async_all()) == 9
async def test_sensors(hass, mock_bridge):
"""Test the update_items function with some sensors."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 "physical" sensors with 3 virtual sensors each
assert len(hass.states.async_all()) == 6
presence_sensor_1 = hass.states.get("binary_sensor.living_room_sensor_motion")
light_level_sensor_1 = hass.states.get("sensor.living_room_sensor_light_level")
temperature_sensor_1 = hass.states.get("sensor.living_room_sensor_temperature")
assert presence_sensor_1 is not None
assert presence_sensor_1.state == "on"
assert light_level_sensor_1 is not None
assert light_level_sensor_1.state == "1.0"
assert light_level_sensor_1.name == "Living room sensor light level"
assert temperature_sensor_1 is not None
assert temperature_sensor_1.state == "17.75"
assert temperature_sensor_1.name == "Living room sensor temperature"
presence_sensor_2 = hass.states.get("binary_sensor.kitchen_sensor_motion")
light_level_sensor_2 = hass.states.get("sensor.kitchen_sensor_light_level")
temperature_sensor_2 = hass.states.get("sensor.kitchen_sensor_temperature")
assert presence_sensor_2 is not None
assert presence_sensor_2.state == "off"
assert light_level_sensor_2 is not None
assert light_level_sensor_2.state == "10.0"
assert light_level_sensor_2.name == "Kitchen sensor light level"
assert temperature_sensor_2 is not None
assert temperature_sensor_2.state == "18.75"
assert temperature_sensor_2.name == "Kitchen sensor temperature"
async def test_unsupported_sensors(hass, mock_bridge):
"""Test that unsupported sensors don't get added and don't fail."""
response_with_unsupported = dict(SENSOR_RESPONSE)
response_with_unsupported["7"] = UNSUPPORTED_SENSOR
mock_bridge.mock_sensor_responses.append(response_with_unsupported)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 "physical" sensors with 3 virtual sensors each
assert len(hass.states.async_all()) == 6
async def test_new_sensor_discovered(hass, mock_bridge):
"""Test if 2nd update has a new sensor."""
mock_bridge.mock_sensor_responses.append(SENSOR_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 6
new_sensor_response = dict(SENSOR_RESPONSE)
new_sensor_response.update(
{
"7": PRESENCE_SENSOR_3_PRESENT,
"8": LIGHT_LEVEL_SENSOR_3,
"9": TEMPERATURE_SENSOR_3,
}
)
mock_bridge.mock_sensor_responses.append(new_sensor_response)
# Force updates to run again
sm_key = hue_sensor_base.SENSOR_MANAGER_FORMAT.format("mock-host")
sm = hass.data[hue.DOMAIN][sm_key]
await sm.async_update_items()
# To flush out the service call to update the group
await hass.async_block_till_done()
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 9
presence = hass.states.get("binary_sensor.bedroom_sensor_motion")
assert presence is not None
assert presence.state == "on"
temperature = hass.states.get("sensor.bedroom_sensor_temperature")
assert temperature is not None
assert temperature.state == "17.75"
async def test_update_timeout(hass, mock_bridge):
"""Test bridge marked as not available if timeout error during update."""
mock_bridge.api.sensors.update = Mock(side_effect=asyncio.TimeoutError)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert mock_bridge.available is False
async def test_update_unauthorized(hass, mock_bridge):
"""Test bridge marked as not available if unauthorized during update."""
mock_bridge.api.sensors.update = Mock(side_effect=aiohue.Unauthorized)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert mock_bridge.available is False
|
fbradyirl/home-assistant
|
tests/components/hue/test_sensor_base.py
|
Python
|
apache-2.0
| 14,721
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pysparkling import *
from pyspark.sql import SparkSession
import h2o
# Start Cluster
spark = SparkSession.builder.appName("App name").getOrCreate()
hc = H2OContext.getOrCreate()
assert h2o.cluster().cloud_size == 3
# Prepare Data
frame = h2o.import_file("https://raw.githubusercontent.com/h2oai/sparkling-water/master/examples/smalldata/prostate/prostate.csv")
sparkDF = hc.asSparkFrame(frame)
sparkDF = sparkDF.withColumn("CAPSULE", sparkDF.CAPSULE.cast("string"))
[trainingDF, testingDF] = sparkDF.randomSplit([0.8, 0.2])
# Train Model
from pysparkling.ml import H2OXGBoost
estimator = H2OXGBoost(labelCol = "CAPSULE")
model = estimator.fit(trainingDF)
# Run Predictions
model.transform(testingDF).collect()
|
h2oai/sparkling-water
|
ci/databricksTests/test.py
|
Python
|
apache-2.0
| 1,504
|
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from pisa_tags import pisaTag
from pisa_util import *
from pisa_reportlab import PmlTable, TableStyle, PmlKeepInFrame
import copy
import sys
from itertools import izip_longest
import logging
log = logging.getLogger("ho.pisa")
try:
from celery.exceptions import SoftTimeLimitExceeded
except ImportError:
SoftTimeLimitExceeded = KeyboardInterrupt
def _width(value=None):
if value is None:
return None
value = str(value)
if value.endswith("%"):
return value
return getSize(value)
class TableData:
def __init__(self):
self.data = []
self.styles = []
self.span = []
self.mode = ""
self.padding = 0
self.col = 0
# self.c = None
def add_cell(self, data=None):
self.col += 1
self.data[len(self.data) - 1].append(data)
def add_style(self, data):
# print self.mode, data
# Do we have color and
# width = data[3]
#if data[0].startswith("LINE"):
# color = data[4]
# if color is None:
# return
self.styles.append(copy.copy(data))
def add_empty(self, x, y):
self.span.append((x, y))
def get_data(self):
data = self.data
for x, y in self.span:
try:
data[y].insert(x, '')
except SoftTimeLimitExceeded:
raise
except:
pass
return data
def add_cell_styles(self, c, begin, end, mode="td"):
def getColor(a, b):
return a
self.mode = mode.upper()
if c.frag.backColor and mode != "tr": # XXX Stimmt das so?
self.add_style(('BACKGROUND', begin, end, c.frag.backColor))
# print 'BACKGROUND', begin, end, c.frag.backColor
if 0:
log.debug("%r", (
begin,
end,
c.frag.borderTopWidth,
c.frag.borderTopStyle,
c.frag.borderTopColor,
c.frag.borderBottomWidth,
c.frag.borderBottomStyle,
c.frag.borderBottomColor,
c.frag.borderLeftWidth,
c.frag.borderLeftStyle,
c.frag.borderLeftColor,
c.frag.borderRightWidth,
c.frag.borderRightStyle,
c.frag.borderRightColor,
))
if getBorderStyle(c.frag.borderTopStyle) and c.frag.borderTopWidth and c.frag.borderTopColor is not None:
self.add_style(('LINEABOVE', begin, (end[0], begin[1]),
c.frag.borderTopWidth,
c.frag.borderTopColor,
"squared"))
if getBorderStyle(c.frag.borderLeftStyle) and c.frag.borderLeftWidth and c.frag.borderLeftColor is not None:
self.add_style(('LINEBEFORE', begin, (begin[0], end[1]),
c.frag.borderLeftWidth,
c.frag.borderLeftColor,
"squared"))
if getBorderStyle(c.frag.borderRightStyle) and c.frag.borderRightWidth and c.frag.borderRightColor is not None:
self.add_style(('LINEAFTER', (end[0], begin[1]), end,
c.frag.borderRightWidth,
c.frag.borderRightColor,
"squared"))
if getBorderStyle(c.frag.borderBottomStyle) and c.frag.borderBottomWidth and c.frag.borderBottomColor is not None:
self.add_style(('LINEBELOW', (begin[0], end[1]), end,
c.frag.borderBottomWidth,
c.frag.borderBottomColor,
"squared"))
self.add_style(('LEFTPADDING', begin, end, c.frag.paddingLeft or self.padding))
self.add_style(('RIGHTPADDING', begin, end, c.frag.paddingRight or self.padding))
self.add_style(('TOPPADDING', begin, end, c.frag.paddingTop or self.padding))
self.add_style(('BOTTOMPADDING', begin, end, c.frag.paddingBottom or self.padding))
class pisaTagTABLE(pisaTag):
def start(self, c):
c.addPara()
attrs = self.attr
# Swap table data
c.tableData, self.tableData = TableData(), c.tableData
tdata = c.tableData
# border
#tdata.border = attrs.border
#tdata.bordercolor = attrs.bordercolor
begin = (0, 0)
end = (-1, - 1)
if attrs.border and attrs.bordercolor:
frag = c.frag
frag.borderLeftWidth = attrs.border
frag.borderLeftColor = attrs.bordercolor
frag.borderLeftStyle = "solid"
frag.borderRightWidth = attrs.border
frag.borderRightColor = attrs.bordercolor
frag.borderRightStyle = "solid"
frag.borderTopWidth = attrs.border
frag.borderTopColor = attrs.bordercolor
frag.borderTopStyle = "solid"
frag.borderBottomWidth = attrs.border
frag.borderBottomColor = attrs.bordercolor
frag.borderBottomStyle = "solid"
# tdata.add_style(("GRID", begin, end, attrs.border, attrs.bordercolor))
tdata.padding = attrs.cellpadding
#if 0: #attrs.cellpadding:
# tdata.add_style(('LEFTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('RIGHTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('TOPPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('BOTTOMPADDING', begin, end, attrs.cellpadding))
# alignment
#~ tdata.add_style(('VALIGN', (0,0), (-1,-1), attrs.valign.upper()))
# Set Border and padding styles
tdata.add_cell_styles(c, (0, 0), (-1, - 1), "table")
# bgcolor
#if attrs.bgcolor is not None:
# tdata.add_style(('BACKGROUND', (0, 0), (-1, -1), attrs.bgcolor))
tdata.align = attrs.align.upper()
tdata.col = 0
tdata.row = 0
tdata.colw = []
tdata.rowh = []
tdata.repeat = attrs.repeat
tdata.width = _width(attrs.width)
# self.tabdata.append(tdata)
def end(self, c):
tdata = c.tableData
data = tdata.get_data()
# Add missing columns so that each row has the same count of columns
# This prevents errors in Reportlab table
try:
maxcols = max([len(row) for row in data] or [0])
except ValueError:
log.warn(c.warning("<table> rows seem to be inconsistent"))
maxcols = [0]
for i, row in enumerate(data):
data[i] += [''] * (maxcols - len(row))
try:
if tdata.data:
# This is a bit of a hack -- ensure there are heights for all rows
# if a <tr></tr> is empty, a row height isn't generated for it. This will
# add a height of None for that row as a default.
if len(tdata.data) > len(tdata.rowh):
new_rowh = []
for idx, (row, height) in enumerate(izip_longest(tdata.data, tdata.rowh)):
if len([i for i in row if i == '']) == len(tdata.colw):
new_rowh.append(None)
if idx < len(tdata.rowh):
new_rowh.append(height)
tdata.rowh = new_rowh
# log.debug("Table sryles %r", tdata.styles)
t = PmlTable(
data,
colWidths=tdata.colw,
rowHeights=tdata.rowh,
# totalWidth = tdata.width,
splitByRow=1,
# repeatCols = 1,
repeatRows=tdata.repeat,
hAlign=tdata.align,
vAlign='TOP',
style=TableStyle(tdata.styles))
t.totalWidth = _width(tdata.width)
t.spaceBefore = c.frag.spaceBefore
t.spaceAfter = c.frag.spaceAfter
# XXX Maybe we need to copy some more properties?
t.keepWithNext = c.frag.keepWithNext
# t.hAlign = tdata.align
c.addStory(t)
else:
log.warn(c.warning("<table> is empty"))
except SoftTimeLimitExceeded:
raise
except:
log.warn(c.warning("<table>"), exc_info=1)
# Cleanup and re-swap table data
c.clearFrag()
c.tableData, self.tableData = self.tableData, None
class pisaTagTR(pisaTag):
def start(self, c):
tdata = c.tableData
row = tdata.row
begin = (0, row)
end = (-1, row)
tdata.add_cell_styles(c, begin, end, "tr")
c.frag.vAlign = self.attr.valign or c.frag.vAlign
tdata.col = 0
tdata.data.append([])
def end(self, c):
c.tableData.row += 1
class pisaTagTD(pisaTag):
def start(self, c):
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
c.clearFrag()
self.story = c.swapStory()
# print "#", len(c.story)
attrs = self.attr
tdata = c.tableData
cspan = attrs.colspan
rspan = attrs.rowspan
row = tdata.row
col = tdata.col
while 1:
for x, y in tdata.span:
if x == col and y == row:
col += 1
tdata.col += 1
break
#cs = 0
#rs = 0
begin = (col, row)
end = (col, row)
if cspan:
end = (end[0] + cspan - 1, end[1])
if rspan:
end = (end[0], end[1] + rspan - 1)
if begin != end:
#~ print begin, end
tdata.add_style(('SPAN', begin, end))
for x in range(begin[0], end[0] + 1):
for y in range(begin[1], end[1] + 1):
if x != begin[0] or y != begin[1]:
tdata.add_empty(x, y)
# Set Border and padding styles
tdata.add_cell_styles(c, begin, end, "td")
# Calculate widths
# Add empty placeholders for new columns
if (col + 1) > len(tdata.colw):
tdata.colw = tdata.colw + ((col + 1 - len(tdata.colw)) * [_width()])
# Get value of with, if no spanning
if not cspan:
# print c.frag.width
width = c.frag.width or self.attr.width #self._getStyle(None, attrs, "width", "width", mode)
# If is value, the set it in the right place in the arry
# print width, _width(width)
if width is not None:
tdata.colw[col] = _width(width)
# Calculate heights
if row + 1 > len(tdata.rowh):
tdata.rowh = tdata.rowh + ((row + 1 - len(tdata.rowh)) * [_width()])
if not rspan:
height = None #self._getStyle(None, attrs, "height", "height", mode)
if height is not None:
tdata.rowh[row] = _width(height)
tdata.add_style(('FONTSIZE', begin, end, 1.0))
tdata.add_style(('LEADING', begin, end, 1.0))
# Vertical align
valign = self.attr.valign or c.frag.vAlign
if valign is not None:
tdata.add_style(('VALIGN', begin, end, valign.upper()))
# Reset border, otherwise the paragraph block will have borders too
frag = c.frag
frag.borderLeftWidth = 0
frag.borderLeftColor = None
frag.borderLeftStyle = None
frag.borderRightWidth = 0
frag.borderRightColor = None
frag.borderRightStyle = None
frag.borderTopWidth = 0
frag.borderTopColor = None
frag.borderTopStyle = None
frag.borderBottomWidth = 0
frag.borderBottomColor = None
frag.borderBottomStyle = None
def end(self, c):
tdata = c.tableData
c.addPara()
cell = c.story
# Handle empty cells, they otherwise collapse
#if not cell:
# cell = ' '
# Keep in frame if needed since Reportlab does no split inside of cells
if (not c.frag.insideStaticFrame) and (c.frag.keepInFrameMode is not None):
# tdata.keepinframe["content"] = cell
cell = PmlKeepInFrame(
maxWidth=0,
maxHeight=0,
mode=c.frag.keepInFrameMode,
content=cell)
c.swapStory(self.story)
tdata.add_cell(cell)
class pisaTagTH(pisaTagTD):
pass
'''
end_th = end_td
def start_keeptogether(self, attrs):
self.story.append([])
self.next_para()
def end_keeptogether(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
s = self.story.pop()
self.add_story(KeepTogether(s))
def start_keepinframe(self, attrs):
self.story.append([])
self.keepinframe = {
"maxWidth": attrs["maxwidth"],
"maxHeight": attrs["maxheight"],
"mode": attrs["mode"],
"name": attrs["name"],
"mergeSpace": attrs["mergespace"]
}
# print self.keepinframe
self.next_para()
def end_keepinframe(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
self.keepinframe["content"] = self.story.pop()
self.add_story(KeepInFrame(**self.keepinframe))
'''
|
drchrono/pisa
|
sx/pisa3/pisa_tables.py
|
Python
|
apache-2.0
| 14,153
|
"""
Test basics of Minidump debugging.
"""
from __future__ import print_function
from six import iteritems
import lldb
import os
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class MiniDumpUUIDTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
super(MiniDumpUUIDTestCase, self).setUp()
self._initial_platform = lldb.DBG.GetSelectedPlatform()
def tearDown(self):
lldb.DBG.SetSelectedPlatform(self._initial_platform)
super(MiniDumpUUIDTestCase, self).tearDown()
def verify_module(self, module, verify_path, verify_uuid):
uuid = module.GetUUIDString()
self.assertEqual(verify_path, module.GetFileSpec().fullpath)
self.assertEqual(verify_uuid, uuid)
def get_minidump_modules(self, yaml_file):
minidump_path = self.getBuildArtifact(os.path.basename(yaml_file) + ".dmp")
self.yaml2obj(yaml_file, minidump_path)
self.target = self.dbg.CreateTarget(None)
self.process = self.target.LoadCore(minidump_path)
return self.target.modules
def test_zero_uuid_modules(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
but contains a PDB70 value whose age is zero and whose UUID values are
all zero. Prior to a fix all such modules would be duplicated to the
first one since the UUIDs claimed to be valid and all zeroes. Now we
ensure that the UUID is not valid for each module and that we have
each of the modules in the target after loading the core
"""
modules = self.get_minidump_modules("linux-arm-zero-uuids.yaml")
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/file/does/not/exist/a", None)
self.verify_module(modules[1], "/file/does/not/exist/b", None)
def test_uuid_modules_no_age(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a PDB70 value whose age is zero and whose UUID values are
valid. Ensure we decode the UUID and don't include the age field in the UUID.
"""
modules = self.get_minidump_modules("linux-arm-uuids-no-age.yaml")
modules = self.target.modules
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/tmp/a", "01020304-0506-0708-090A-0B0C0D0E0F10")
self.verify_module(modules[1], "/tmp/b", "0A141E28-323C-4650-5A64-6E78828C96A0")
def test_uuid_modules_no_age_apple(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a PDB70 value whose age is zero and whose UUID values are
valid. Ensure we decode the UUID and don't include the age field in the UUID.
Also ensure that the first uint32_t is byte swapped, along with the next
two uint16_t values. Breakpad incorrectly byte swaps these values when it
saves Darwin minidump files.
"""
modules = self.get_minidump_modules("macos-arm-uuids-no-age.yaml")
modules = self.target.modules
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/tmp/a", "04030201-0605-0807-090A-0B0C0D0E0F10")
self.verify_module(modules[1], "/tmp/b", "281E140A-3C32-5046-5A64-6E78828C96A0")
def test_uuid_modules_with_age(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a PDB70 value whose age is valid and whose UUID values are
valid. Ensure we decode the UUID and include the age field in the UUID.
"""
modules = self.get_minidump_modules("linux-arm-uuids-with-age.yaml")
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/tmp/a", "01020304-0506-0708-090A-0B0C0D0E0F10-10101010")
self.verify_module(modules[1], "/tmp/b", "0A141E28-323C-4650-5A64-6E78828C96A0-20202020")
def test_uuid_modules_elf_build_id_16(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a ELF build ID whose value is valid and is 16 bytes long.
"""
modules = self.get_minidump_modules("linux-arm-uuids-elf-build-id-16.yaml")
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/tmp/a", "01020304-0506-0708-090A-0B0C0D0E0F10")
self.verify_module(modules[1], "/tmp/b", "0A141E28-323C-4650-5A64-6E78828C96A0")
def test_uuid_modules_elf_build_id_20(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a ELF build ID whose value is valid and is 20 bytes long.
"""
modules = self.get_minidump_modules("linux-arm-uuids-elf-build-id-20.yaml")
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/tmp/a", "01020304-0506-0708-090A-0B0C0D0E0F10-11121314")
self.verify_module(modules[1], "/tmp/b", "0A141E28-323C-4650-5A64-6E78828C96A0-AAB4BEC8")
def test_uuid_modules_elf_build_id_zero(self):
"""
Test multiple modules having a MINIDUMP_MODULE.CvRecord that is valid,
and contains a ELF build ID whose value is all zero.
"""
modules = self.get_minidump_modules("linux-arm-uuids-elf-build-id-zero.yaml")
self.assertEqual(2, len(modules))
self.verify_module(modules[0], "/not/exist/a", None)
self.verify_module(modules[1], "/not/exist/b", None)
@expectedFailureAll(oslist=["windows"])
def test_partial_uuid_match(self):
"""
Breakpad has been known to create minidump files using CvRecord in each
module whose signature is set to PDB70 where the UUID only contains the
first 16 bytes of a 20 byte ELF build ID. Code was added to
ProcessMinidump.cpp to deal with this and allows partial UUID matching.
This test verifies that if we have a minidump with a 16 byte UUID, that
we are able to associate a symbol file with a 20 byte UUID only if the
first 16 bytes match. In this case we will see the path from the file
we found in the test directory and the 20 byte UUID from the actual
file, not the 16 byte shortened UUID from the minidump.
"""
so_path = self.getBuildArtifact("libuuidmatch.so")
self.yaml2obj("libuuidmatch.yaml", so_path)
cmd = 'settings set target.exec-search-paths "%s"' % (os.path.dirname(so_path))
self.dbg.HandleCommand(cmd)
modules = self.get_minidump_modules("linux-arm-partial-uuids-match.yaml")
self.assertEqual(1, len(modules))
self.verify_module(modules[0], so_path,
"7295E17C-6668-9E05-CBB5-DEE5003865D5-5267C116")
def test_partial_uuid_mismatch(self):
"""
Breakpad has been known to create minidump files using CvRecord in each
module whose signature is set to PDB70 where the UUID only contains the
first 16 bytes of a 20 byte ELF build ID. Code was added to
ProcessMinidump.cpp to deal with this and allows partial UUID matching.
This test verifies that if we have a minidump with a 16 byte UUID, that
we are not able to associate a symbol file with a 20 byte UUID only if
any of the first 16 bytes do not match. In this case we will see the UUID
from the minidump file and the path from the minidump file.
"""
so_path = self.getBuildArtifact("libuuidmismatch.so")
self.yaml2obj("libuuidmismatch.yaml", so_path)
cmd = 'settings set target.exec-search-paths "%s"' % (os.path.dirname(so_path))
self.dbg.HandleCommand(cmd)
modules = self.get_minidump_modules("linux-arm-partial-uuids-mismatch.yaml")
self.assertEqual(1, len(modules))
self.verify_module(modules[0],
"/invalid/path/on/current/system/libuuidmismatch.so",
"7295E17C-6668-9E05-CBB5-DEE5003865D5")
def test_relative_module_name(self):
old_cwd = os.getcwd()
self.addTearDownHook(lambda: os.chdir(old_cwd))
os.chdir(self.getBuildDir())
name = "file-with-a-name-unlikely-to-exist-in-the-current-directory.so"
open(name, "a").close()
modules = self.get_minidump_modules(
self.getSourcePath("relative_module_name.yaml"))
self.assertEqual(1, len(modules))
self.verify_module(modules[0], name, None)
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/functionalities/postmortem/minidump-new/TestMiniDumpUUID.py
|
Python
|
apache-2.0
| 8,803
|
import numpy as np
import tensorflow as tf
DEFAULT_PADDING = 'SAME'
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
shape=[],
name='use_dropout')
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, basestring):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('kernel', shape=[k_h, k_w, c_i / group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(input,group, 3)
kernel_groups = tf.split(kernel, group, 3)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the bias
if biased:
bias = self.make_var('bias', [c_o])
output = tf.nn.bias_add(output, bias)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(axis=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
feed_in, dim = (input, input_shape[-1].value)
weights = self.make_var('kernel', shape=[dim, num_out])
bias = self.make_var('bias', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, bias, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = map(lambda v: v.value, input.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input = tf.squeeze(input, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input, name=name)
@layer
def batch_normalization(self, input, name, scale_offset=True, relu=False):
# NOTE: Currently, only inference is supported
with tf.variable_scope(name) as scope:
shape = [input.get_shape()[-1]]
if scale_offset:
scale = self.make_var('scale', shape=shape)
offset = self.make_var('offset', shape=shape)
else:
scale, offset = (None, None)
output = tf.nn.batch_normalization(
input,
mean=self.make_var('mean', shape=shape),
variance=self.make_var('variance', shape=shape),
offset=offset,
scale=scale,
# TODO: This is the default Caffe batch norm eps
# Get the actual eps from parameters
variance_epsilon=1e-5,
name=name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input, keep_prob, name):
keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
return tf.nn.dropout(input, keep, name=name)
|
mlwithtf/mlwithtf
|
chapter_12/caffe-tensorflow/kaffe/tensorflow/network.py
|
Python
|
apache-2.0
| 9,629
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-26 23:05
# Ported from the PyTorch implementation https://github.com/zysite/biaffine-parser
import tensorflow as tf
from hanlp.utils.tf_util import tf_bernoulli
class Biaffine(tf.keras.layers.Layer):
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
super().__init__(trainable, name, dtype, dynamic, **kwargs)
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = None
def build(self, input_shape):
self.weight = self.add_weight(name='kernel',
shape=(self.n_out,
self.n_in + self.bias_x,
self.n_in + self.bias_y),
initializer='zero')
def extra_repr(self):
s = f"n_in={self.n_in}, n_out={self.n_out}"
if self.bias_x:
s += f", bias_x={self.bias_x}"
if self.bias_y:
s += f", bias_y={self.bias_y}"
return s
# noinspection PyMethodOverriding
def call(self, x, y, **kwargs):
if self.bias_x:
x = tf.concat((x, tf.ones_like(x[..., :1])), -1)
if self.bias_y:
y = tf.concat((y, tf.ones_like(y[..., :1])), -1)
# [batch_size, n_out, seq_len, seq_len]
s = tf.einsum('bxi,oij,byj->boxy', x, self.weight, y)
# remove dim 1 if n_out == 1
if self.n_out == 1:
s = tf.squeeze(s, axis=1)
return s
class MLP(tf.keras.layers.Layer):
def __init__(self, n_hidden, dropout=0, trainable=True, name=None, dtype=None, dynamic=False, **kwargs):
super().__init__(trainable, name, dtype, dynamic, **kwargs)
self.linear = tf.keras.layers.Dense(n_hidden, kernel_initializer='orthogonal')
self.activation = tf.keras.layers.LeakyReLU(0.1)
self.dropout = SharedDropout(p=dropout)
def call(self, x, **kwargs):
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
class SharedDropout(tf.keras.layers.Layer):
def __init__(self, p=0.5, batch_first=True, trainable=True, name=None, dtype=None, dynamic=False, **kwargs):
"""Dropout on timesteps with bernoulli distribution"""
super().__init__(trainable, name, dtype, dynamic, **kwargs)
self.p = p
self.batch_first = batch_first
def extra_repr(self):
s = f"p={self.p}"
if self.batch_first:
s += f", batch_first={self.batch_first}"
return s
def call(self, x, training=None, **kwargs):
if training and self.p > 0:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p)
else:
mask = self.get_mask(x[0], self.p)
x *= tf.expand_dims(mask, axis=1) if self.batch_first else mask
return x
@staticmethod
def get_mask(x, p):
mask = tf_bernoulli(tf.shape(x), 1 - p, x.dtype)
mask = mask / (1 - p)
return mask
class IndependentDropout(tf.keras.layers.Layer):
def __init__(self, p=0.5, trainable=True, name=None, dtype=None, dynamic=False, **kwargs):
"""Dropout on the first two dimensions"""
super().__init__(trainable, name, dtype, dynamic, **kwargs)
self.p = p
def extra_repr(self):
return f"p={self.p}"
def call(self, inputs, training=None, **kwargs):
if training and self.p > 0:
masks = [tf_bernoulli(tf.shape(x)[:2], 1 - self.p)
for x in inputs]
total = sum(masks)
scale = len(inputs) / tf.reduce_max(tf.ones_like(total))
masks = [mask * scale for mask in masks]
inputs = [item * tf.expand_dims(mask, axis=-1)
for item, mask in zip(inputs, masks)]
return inputs
|
hankcs/HanLP
|
hanlp/components/parsers/biaffine_tf/layers.py
|
Python
|
apache-2.0
| 4,019
|
'''
Test for deleting and expunge image created vm ops.
The key step:
-add image1
-create vm1 from image1
-export image1
-create image2 from vm1
-export image2
-create vm2 from image2
-del and expunge image1
-change vm2 os
-del image2
-resize data volume on vm2
-expunge image2
-change vm2 state
@author: PxChen
'''
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
image1 = None
def test():
global image1
global test_obj_dict
#run condition
hosts = res_ops.query_resource(res_ops.HOST)
if len(hosts) <= 1:
test_util.test_skip("skip for host_num is not satisfy condition host_num>1")
bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid'])
image_name1 = 'image1_a'
image_option = test_util.ImageOption()
image_option.set_format('qcow2')
image_option.set_name(image_name1)
#image_option.set_system_tags('qemuga')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_url(os.environ.get('imageUrl_s'))
image_option.set_backup_storage_uuid_list([bss[0].uuid])
image_option.set_timeout(3600*1000)
image1 = zstack_image_header.ZstackTestImage()
image1.set_creation_option(image_option)
image1.add_root_volume_template()
image1.check()
#export image
if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]:
image1.export()
image_name = os.environ.get('imageName_net')
l3_name = os.environ.get('l3VlanNetworkName1')
vm1 = test_stub.create_vm(image_name1, image_name, l3_name)
test_obj_dict.add_vm(vm1)
cond = res_ops.gen_query_conditions('uuid', '=', bss[0].uuid)
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
if bs.type == 'SftpBackupStorage':
vm1.stop()
#create image by vm root volume
created_vm_img_name = "created_vm_image1"
img_option2 = test_util.ImageOption()
img_option2.set_backup_storage_uuid_list([bss[0].uuid])
img_option2.set_root_volume_uuid(vm1.vm.rootVolumeUuid)
img_option2.set_name(created_vm_img_name)
image2 = test_image.ZstackTestImage()
image2.set_creation_option(img_option2)
image2.create()
test_obj_dict.add_image(image2)
#export image
if bss[0].type in [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE]:
image2.export()
#create vm
vm2 = test_stub.create_vm('image-vm', created_vm_img_name, l3_name)
#del and expunge image1
image1.delete()
image1.expunge()
# vm ops test
test_stub.vm_ops_test(vm2, "VM_TEST_CHANGE_OS")
#del and expunge image2
image2.delete()
test_stub.vm_ops_test(vm2, "VM_TEST_RESIZE_DVOL")
image2.expunge()
# vm ops test
test_stub.vm_ops_test(vm2, "VM_TEST_STATE")
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Cloned VM ops for BS Success')
# Will be called only if exception happens in test().
def error_cleanup():
global image1
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
try:
image1.delete()
except:
pass
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/bs/test_del_exp_img_cpyd_vm_resize_rvol_reimg_mig.py
|
Python
|
apache-2.0
| 3,564
|
def nabar_action_handler_nabar_autocomplete(context, action = None, query = '', **args):
''''''
results = []
query = query.replace('%', '').replace("'", '')
try:
db = IN.db
connection = db.connection
limit = 10
# TODO: make it dynamic
cursor = db.execute('''SELECT
n.id,
n.name,
string_agg(l.value, ',') AS emails,
string_agg(l2.value, ',') AS names
FROM
account.nabar n
LEFT JOIN account.login l ON n.nabar_id = l.nabar_id and l.type = 'email'
LEFT JOIN account.login l2 ON n.nabar_id = l2.nabar_id and l2.type = 'name'
WHERE
n.status > 0 AND
l.status > 0 AND
(
n.name like %(query)s OR
l.value like %(query)s OR
l2.value like %(query)s
)
GROUP BY n.id
LIMIT %(limit)s
''', {
'limit' : limit,
'query' : query.join(('%', '%'))
})
if cursor.rowcount >= 0:
for row in cursor:
names = set()
for name in row['names'].split(','):
names.add(name)
for name in row['emails'].split(','):
names.add(name)
results.append({
'id' : row['id'],
'text' : '<h3>' + row['name'] + '</h3> ' + ', '.join(names),
'item' : row['name']
})
except:
IN.logger.debug()
context.response = In.core.response.JSONResponse(output = results)
|
vinoth3v/In
|
In/nabar/page/nabar_autocomplete.py
|
Python
|
apache-2.0
| 1,286
|
# -*- coding: utf-8 -*-
"""
docker_registry.drivers.s3
~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a s3 based driver.
"""
# This fixes an issue where boto sends unicode to gevent.socket.getaddrinfo in
# an eventlet causing the event pool to hang in a deadlock state.
# This initiates the unicode => idna conversion outside of getaddrinfo,
# preventing the deadlock.
# See https://github.com/gevent/gevent/issues/349 for context.
u'fix for gevent deadlock'.encode('idna')
import gevent.monkey
gevent.monkey.patch_all()
import docker_registry.core.boto as coreboto
from docker_registry.core import compat
from docker_registry.core import exceptions
from docker_registry.core import lru
import logging
import os
import re
import time
import boto.exception
import boto.s3
import boto.s3.connection
import boto.s3.key
logger = logging.getLogger(__name__)
class Cloudfront():
def __init__(self, awsaccess, awssecret, base, keyid, privatekey):
boto.connect_cloudfront(
awsaccess,
awssecret
)
host = re.compile('^https?://([^/]+)').findall(base)
self.dist = boto.cloudfront.distribution.Distribution(domain_name=host)
self.base = base
self.keyid = keyid
self.privatekey = privatekey
try:
self.privatekey = open(privatekey).read()
except Exception:
logger.debug('Passed private key is not readable. Assume string.')
def sign(self, url, expire_time=0):
path = os.path.join(self.base, url)
if expire_time:
expire_time = time.time() + expire_time
return self.dist.create_signed_url(
path,
self.keyid,
private_key_string=self.privatekey,
expire_time=int(expire_time)
)
def pub(self, path):
return os.path.join(self.base, path)
class Storage(coreboto.Base):
def __init__(self, path, config):
super(Storage, self).__init__(path, config)
def _build_connection_params(self):
kwargs = super(Storage, self)._build_connection_params()
if self._config.s3_secure is not None:
kwargs['is_secure'] = (self._config.s3_secure is True)
return kwargs
def makeConnection(self):
kwargs = self._build_connection_params()
# Connect cloudfront if we are required to
if self._config.cloudfront:
self.signer = Cloudfront(
self._config.s3_access_key,
self._config.s3_secret_key,
self._config.cloudfront['base'],
self._config.cloudfront['keyid'],
self._config.cloudfront['keysecret']
).sign
else:
self.signer = None
if self._config.s3_use_sigv4 is True:
if self._config.boto_host is None:
logger.warn("No S3 Host specified, Boto won't use SIGV4!")
boto.config.add_section('s3')
boto.config.set('s3', 'use-sigv4', 'True')
if self._config.s3_region is not None:
return boto.s3.connect_to_region(
region_name=self._config.s3_region,
aws_access_key_id=self._config.s3_access_key,
aws_secret_access_key=self._config.s3_secret_key,
**kwargs)
logger.warn("No S3 region specified, using boto default region, " +
"this may affect performance and stability.")
return boto.s3.connection.S3Connection(
self._config.s3_access_key,
self._config.s3_secret_key,
**kwargs)
def makeKey(self, path):
return boto.s3.key.Key(self._boto_bucket, path)
@lru.set
def put_content(self, path, content):
path = self._init_path(path)
key = self.makeKey(path)
key.set_contents_from_string(
content, encrypt_key=(self._config.s3_encrypt is True))
return path
def stream_write(self, path, fp):
# Minimum size of upload part size on S3 is 5MB
buffer_size = 5 * 1024 * 1024
if self.buffer_size > buffer_size:
buffer_size = self.buffer_size
path = self._init_path(path)
mp = self._boto_bucket.initiate_multipart_upload(
path, encrypt_key=(self._config.s3_encrypt is True))
num_part = 1
try:
while True:
buf = fp.read(buffer_size)
if not buf:
break
io = compat.StringIO(buf)
mp.upload_part_from_file(io, num_part)
num_part += 1
io.close()
except IOError as e:
raise e
mp.complete_upload()
def content_redirect_url(self, path):
path = self._init_path(path)
key = self.makeKey(path)
if not key.exists():
raise IOError('No such key: \'{0}\''.format(path))
# No cloudfront? Sign to the bucket
if not self.signer:
return key.generate_url(
expires_in=1200,
method='GET',
query_auth=True)
# Have cloudfront? Sign it
return self.signer(path, expire_time=60)
def get_content(self, path, tries=0):
try:
return super(Storage, self).get_content(path)
except exceptions.FileNotFoundError as e:
if tries <= 3:
time.sleep(.1)
return self.get_content(path, tries + 1)
else:
raise e
|
baseboxorg/registry-debootstrap
|
docker_registry/drivers/s3.py
|
Python
|
apache-2.0
| 5,503
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mox
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import least_cost
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
def fake_filter_hosts(hosts, filter_properties):
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
def test_run_instance_no_hosts(self):
def _fake_empty_call_zone_method(*args, **kwargs):
return []
sched = fakes.FakeFilterScheduler()
uuid = 'fake-uuid1'
fake_context = context.RequestContext('user', 'project')
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
'ephemeral_gb': 0},
'instance_properties': {'project_id': 1},
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
compute_utils.add_instance_fault_from_exc(fake_context,
uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
db.instance_update_and_get_original(fake_context, uuid,
{'vm_state': vm_states.ERROR,
'task_state': None}).AndReturn(({}, {}))
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
def test_run_instance_non_admin(self):
self.was_admin = False
def fake_get(context, *args, **kwargs):
# make sure this is called with admin context, even though
# we're using user context below
self.was_admin = context.is_admin
return {}
sched = fakes.FakeFilterScheduler()
self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
fake_context = context.RequestContext('user', 'project')
uuid = 'fake-uuid1'
request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
'instance_properties': {'project_id': 1},
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
compute_utils.add_instance_fault_from_exc(fake_context,
uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
db.instance_update_and_get_original(fake_context, uuid,
{'vm_state': vm_states.ERROR,
'task_state': None}).AndReturn(({}, {}))
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
self.assertTrue(self.was_admin)
def test_schedule_bad_topic(self):
"""Parameter checking."""
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project')
self.assertRaises(NotImplementedError, sched._schedule, fake_context,
"foo", {}, {})
def test_scheduler_includes_launch_index(self):
ctxt = "fake-context"
fake_kwargs = {'fake_kwarg1': 'fake_value1',
'fake_kwarg2': 'fake_value2'}
instance_opts = {'fake_opt1': 'meow'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
def _has_launch_index(expected_index):
"""Return a function that verifies the expected index."""
def _check_launch_index(value):
if 'instance_properties' in value:
if 'launch_index' in value['instance_properties']:
index = value['instance_properties']['launch_index']
if index == expected_index:
return True
return False
return _check_launch_index
class ContextFake(object):
def elevated(self):
return ctxt
context_fake = ContextFake()
self.mox.StubOutWithMock(self.driver, '_schedule')
self.mox.StubOutWithMock(self.driver, '_provision_resource')
self.driver._schedule(context_fake, 'compute',
request_spec, {}, ['fake-uuid1', 'fake-uuid2']
).AndReturn(['host1', 'host2'])
# instance 1
self.driver._provision_resource(
ctxt, 'host1',
mox.Func(_has_launch_index(0)), {},
None, None, None, None,
instance_uuid='fake-uuid1').AndReturn(instance1)
# instance 2
self.driver._provision_resource(
ctxt, 'host2',
mox.Func(_has_launch_index(1)), {},
None, None, None, None,
instance_uuid='fake-uuid2').AndReturn(instance2)
self.mox.ReplayAll()
self.driver.schedule_run_instance(context_fake, request_spec,
None, None, None, None, {})
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through."""
self.next_weight = 1.0
def _fake_weighted_sum(functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return least_cost.WeightedHost(self.next_weight,
host_state=host_state)
sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project',
is_admin=True)
self.stubs.Set(sched.host_manager, 'filter_hosts',
fake_filter_hosts)
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
fakes.mox_host_manager_db_calls(self.mox, fake_context)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1}}
self.mox.ReplayAll()
weighted_hosts = sched._schedule(fake_context, 'compute',
request_spec, {})
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
self.assertTrue(weighted_host.host_state is not None)
def test_schedule_prep_resize_doesnt_update_host(self):
fake_context = context.RequestContext('user', 'project',
is_admin=True)
sched = fakes.FakeFilterScheduler()
def _return_hosts(*args, **kwargs):
host_state = host_manager.HostState('host2', 'compute')
return [least_cost.WeightedHost(1.0, host_state=host_state)]
self.stubs.Set(sched, '_schedule', _return_hosts)
info = {'called': 0}
def _fake_instance_update_db(*args, **kwargs):
# This should not be called
info['called'] = 1
self.stubs.Set(driver, 'instance_update_db',
_fake_instance_update_db)
instance = {'uuid': 'fake-uuid', 'host': 'host1'}
sched.schedule_prep_resize(fake_context, {}, {}, {},
instance, {}, None)
self.assertEqual(info['called'], 0)
def test_get_cost_functions(self):
fixture = fakes.FakeFilterScheduler()
fns = fixture.get_cost_functions()
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, -1.0)
hostinfo = host_manager.HostState('host', 'compute')
hostinfo.update_from_compute_node(dict(memory_mb=1000,
local_gb=0, vcpus=1, disk_available_least=1000,
free_disk_mb=1000, free_ram_mb=872, vcpus_used=0,
local_gb_used=0, updated_at=None))
self.assertEquals(872, fn(hostinfo, {}))
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
sched = fakes.FakeFilterScheduler()
self.assertEqual(4, sched._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
sched = fakes.FakeFilterScheduler()
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
"""Retry info should not get populated when re-scheduling is off"""
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
instance_properties = {}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
sched._schedule(self.context, 'compute', request_spec,
filter_properties=filter_properties)
# should not have retry info in the populated filter properties:
self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self):
"""Test retry logic on initial scheduling attempt"""
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {}
request_spec = dict(instance_properties=instance_properties)
filter_properties = {}
sched._schedule(self.context, 'compute', request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
"""Test retry logic when re-scheduling"""
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {}
request_spec = dict(instance_properties=instance_properties)
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
sched._schedule(self.context, 'compute', request_spec,
filter_properties=filter_properties)
num_attempts = filter_properties['retry']['num_attempts']
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
"""Test for necessary explosion when max retries is exceeded"""
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
instance_properties = {}
request_spec = dict(instance_properties=instance_properties)
retry = dict(num_attempts=2)
filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
'compute', request_spec, filter_properties=filter_properties)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
sched = fakes.FakeFilterScheduler()
sched._add_retry_host(filter_properties, host)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual(host, hosts[0])
|
paulmathews/nova
|
nova/tests/scheduler/test_filter_scheduler.py
|
Python
|
apache-2.0
| 12,312
|
# -*- coding: utf-8 -*-
'''
Created on Sep 24, 2013
@author: jin
'''
from django.db import models
from django.contrib.auth.models import User
import re
from django.core import validators
class UserProfile(models.Model):
user=models.OneToOneField(User,verbose_name=r'用户')
gender=models.CharField(verbose_name=r'性别',max_length='2',choices=(('M','男'),('F','女')))
IDCard=models.CharField(verbose_name=r'身份证 ',max_length=20,unique=True,validators=[
validators.RegexValidator(re.compile('^\d{15}(?:\d{3}|\d{2}[a-zA-Z]|)$'), "证件位数或格式不正确!", 'invalid')
])
phone=models.CharField(verbose_name='手机号码',max_length=20,validators=[
validators.RegexValidator(re.compile('^\d{11}$'), "手机号码位数为11位!", 'invalid')
])
uid=models.CharField(verbose_name='微信uid',max_length=125,unique=True)
bankCard=models.CharField(verbose_name='信用卡',max_length=30,)
data=models.CharField(verbose_name='数据',max_length=255,)
class Meta:
verbose_name = u'用户详细信息'
verbose_name_plural = u'用户详细信息'
db_table = "user_profile"
|
SnailJin/house
|
house/models.py
|
Python
|
apache-2.0
| 1,183
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import object
class AvatarPlugin(object):
"""
Abstract interface for all Avatar plugins
Upon start() and stop(), plugins are expected to register/unregister
their own event handlers by the means of :func:`System.register_event_listener`
and :func:`System.unregister_event_listener`
"""
def __init__(self, system):
self._system = system
def init(self, **kwargs):
assert(False) #Not implemented
def start(self, **kwargs):
assert(False) #Not implemented
def stop(self, **kwargs):
assert(False) #Not implemented
|
Bounti/avatar-python
|
avatar/plugins/avatar_plugin.py
|
Python
|
apache-2.0
| 835
|
import time
from sinchsms import SinchSMS
number = 'your_phone_mobile_number'
message = 'your_sms_message'
#sign up https://www.sinch.com to get app_key & app secret
client = SinchSMS(your_app_key, your_app_secret)
print("Sending '%s' to %s" % (message, number))
response = client.send_message(number, message)
message_id = response['messageId']
response = client.check_status(message_id)
while response['status'] != 'Successful':
print(response['status'])
time.sleep(1)
response = client.check_status(message_id)
print(response['status'])
|
amanmehara/programming-app-data
|
Python/Sending SMS/SendingSms.py
|
Python
|
apache-2.0
| 559
|
#---------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Adham Lucas da Silva Oliveira 1715310059
# Gabriel Barroso da Silva Lima 1715310011
# Guilherme Silva de Oliveira 1715310034
# Natália Cavalcante Xavier 1715310021
# Tiago Ferreira Aranha 1715310047
#
# Implementar um algoritmo em Python para calcular o sen(X). O valor de X
# deverá ser digitado em graus. O valor do seno de X será calculado pela soma dos 15
# primeiros termos da série determinado na questão
# Esses termos devem ser armazenados em um vetor de reais.
#---------------------------------------------------------------------------
X = float(input())
i = 0
expoente = 1
fatorial = 1
divisor = 1
termos = []
while i < 15:
while fatorial <= expoente:
divisor *= fatorial
fatorial += 1
if i % 2 == 0:
termo = (X**expoente)/divisor
termos.append(termo)
else:
termo = -((X**expoente)/divisor)
termos.append(termo)
expoente += 2
fatorial = 1
divisor = 1
i += 1
|
jucimarjr/IPC_2017-1
|
lista06/lista06_lista02_questao18.py
|
Python
|
apache-2.0
| 1,298
|
# prog6_6.py
# This program sums a list of numbers using functions
# <Chad Hobbs>
def sumList(nums): #function sums a list
total = 0
for i in range((len(nums))):
total = total + nums[i]
return total
def main(): # Main program
initial = [2,3,4,5,6,7,8,9]
print("The original list is",initial)
final = sumList(initial)
print("The sum of the list is",final)
main()
|
itsallvoodoo/csci-school
|
CSCI220/Week 08 - MAR05-09/prog6_6.py
|
Python
|
apache-2.0
| 434
|
import torch
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("noam")
class NoamLR(LearningRateScheduler):
"""
Implements the Noam Learning rate schedule. This corresponds to increasing
the learning rate linearly for the first `warmup_steps` training steps, and
decreasing it thereafter proportionally to the inverse square root of the
step number, scaled by the inverse square root of the dimensionality of the
model. Time will tell if this is just madness or it's actually important.
The formula for learning rate when using `NoamLR` is:
`lr`= `factor *` (
`model_size **` (`-0.5`)
`*` min(`step**` (`-0.5`), `step * warmup_steps **` (`-1.5`))
)
Registered as a `LearningRateScheduler` with name "noam".
# Parameters
optimizer : `torch.optim.Optimizer`
This argument does not get an entry in a configuration file for the
object.
model_size : `int`, required.
The hidden size parameter which dominates the number of parameters in
your model.
warmup_steps : `int`, required.
The number of steps to linearly increase the learning rate.
factor : `float`, optional (default = `1.0`).
The overall scale factor for the learning rate decay.
# Example
Config for using `NoamLR` with a model size of `1024`, warmup steps
of `5`, and `factor` of `.25`.
```json
{
...
"trainer":{
...
"learning_rate_scheduler": {
"type": "noam",
"model_size": 1024,
"warmup_steps":5,
"factor":0.25
},
...
}
}
```
Note that you do NOT pass an `optimizer` key to the Learning rate scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
model_size: int,
warmup_steps: int,
factor: float = 1.0,
last_epoch: int = -1,
) -> None:
self.warmup_steps = warmup_steps
self.factor = factor
self.model_size = model_size
super().__init__(optimizer, last_epoch=last_epoch)
def step(self, metric: float = None) -> None:
pass
def step_batch(self, batch_num_total: int = None) -> None:
if batch_num_total is None:
self.last_epoch += 1 # type: ignore
else:
self.last_epoch = batch_num_total
for param_group, learning_rate in zip(self.optimizer.param_groups, self.get_values()):
param_group["lr"] = learning_rate
def get_values(self):
step = max(self.last_epoch, 1)
scale = self.factor * (
self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup_steps ** (-1.5))
)
return [scale for _ in range(len(self.base_values))]
|
allenai/allennlp
|
allennlp/training/learning_rate_schedulers/noam.py
|
Python
|
apache-2.0
| 2,903
|
# -*- coding: utf-8 -*-
'''
程序:focus on Mi1 on ganji & 58
版本:0.1
@author: Michael Wan
日期:2015.02.16
语言:Python 2.7
操作:输入带分页的地址,去掉最后面的数字,设置一下起始页数和终点页数。
功能:下载对应页码内的所有页面并存储为html文件。
@see: http://blog.csdn.net/pleasecallmewhy/article/details/8927832
'''
import re
import string, urllib2
import sys
import time
import crawler
test = 'a abc abcd abcdef'
matches = re.finditer(r'(\w+)\s+(\w+)', test)
results = [str(match.group(1)) for match in matches]
print(results)
|
onehao/opensource
|
pyml/hackerrank/2ndfun_email_tracker.py
|
Python
|
apache-2.0
| 623
|
__author__ = 'ono'
import unittest
from pubkey import *
class MyTestCase(unittest.TestCase):
def test_get_key_path(self):
self.assertEqual(
get_key_path("~/.ssh/", "id_rsa"),
("~/.ssh/", "id_rsa", "id_rsa.pub")
)
self.assertEqual(
get_key_path("~/.ssh/", "~/.ssh/id_rsa"),
("~/.ssh/", "id_rsa", "id_rsa.pub")
)
self.assertEqual(
get_key_path("~/.ssh/", "~/.ssh/sample"),
("~/.ssh/", "sample", "sample.pub")
)
self.assertEqual(
get_key_path("~/.ssh/", "~/.ssh/sample.key"),
("~/.ssh/", "sample.key", "sample.key.pub")
)
self.assertEqual(
get_key_path("~/.ssh/", "~/.ssh/sample.any"),
("~/.ssh/", "sample.any", "sample.any.pub")
)
self.assertEqual(
get_key_path("~/.ssh/", "~/.ssh/sample.any.key"),
("~/.ssh/", "sample.any.key", "sample.any.key.pub")
)
self.assertEqual(
get_key_path("/home/", "sample.any.key"),
("/home/", "sample.any.key", "sample.any.key.pub")
)
def test_gen_config_data(self):
self.assertEqual(
gen_config_data("~/.ssh/id_rsa", "","abc@home.com"),
"Host home.com\n\thostname home.com\n\tUser abc\n\tIdentityFile ~/.ssh/id_rsa\n\n"
)
self.assertEqual(
gen_config_data("~/.ssh/id_rsa", "server1","abc@home.com"),
"Host server1\n\thostname home.com\n\tUser abc\n\tIdentityFile ~/.ssh/id_rsa\n\n"
)
self.assertEqual(
gen_config_data("~/.ssh/hoge", "server1","1@home.com"),
"Host server1\n\thostname home.com\n\tUser 1\n\tIdentityFile ~/.ssh/hoge\n\n"
)
if __name__ == '__main__':
unittest.main()
|
yuki2006/pubkey
|
test_pubkey.py
|
Python
|
apache-2.0
| 1,837
|
class Grid:
def __init__(self):
self.grid = [["Grass","Dirt","Grass"],["Dirt","Grass","Grass"],["Grass","Rock","Dirt"]]
@property
def height(self):
return len(self.grid)
@property
def width(self):
return len(self.grid[0])
def tile(self,x_pos,y_pos):
try:
return self.grid[y_pos][x_pos]
except IndexError:
return None
class Actor:
def __init__(self,grid):
self.grid = grid
self.x_pos = 0
self.y_pos = 0
def move_left(self):
tile = self.grid.tile(self.x_pos-1,self.y_pos)
if not tile and self.test_terrain(tile) :
self.x_pos = self.x_pos -1
def move_right(self):
if not self.x_pos == self.grid.width - 1:
self.x_pos = self.x_pos +1
def move_up(self):
if not self.y_pos == 0:
self.y_pos +=1
def move_down(self):
if not self.y_pos == self.grid.height - 1:
self.y_pos -= 1
def test_terrain(self,terrain):
if terrain == "Rock":
return False
else:
return True
|
mgrazebrook/dojo_modular_pyg0
|
model.py
|
Python
|
apache-2.0
| 1,146
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-01-06
Last_modify: 2016-01-07
******************************************
'''
'''
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log(m+n)).
'''
class Solution(object):
def (self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m, n = len(nums1), len(nums2)
if m > n:
# make sure m < n
nums1, nums2, m, n = nums2, nums1, n, m
# i in (imin, imax)
imin, imax, half_len = 0, m, (m + n + 1) // 2
while imin <= imax:
# binary search
i = (imin + imax) // 2
# i+j = m-i+n-j[or actually (m+n+1)-i-j], so j = (m+n+1)/2-i.
j = half_len - i
# move imin to right
if j > 0 and i < m and nums2[j-1] > nums1[i]:
imin = i + 1
# move imax to left
elif i > 0 and j < n and nums1[i-1] > nums2[j]:
imax = i - 1
# when meeting both two conditions, we get the answer here
else:
# just incase two inputs are both None.
if i == 0 and j == 0:
return None
elif i == 0:
num1 = nums2[j-1]
elif j == 0:
num1 = nums1[i-1]
else:
num1 = max(nums1[i-1], nums2[j-1])
# if m+n is odd
if (m + n) % 2 == 1:
return num1
# in this case nums1 is all in left
if i == m:
num2 = nums2[j]
# in this case nums2 is all in left
elif j == n:
num2 = nums1[i]
else:
num2 = min(nums1[i], nums2[j])
# if m+n is even
return (num1 + num2) / 2.0
|
zhlinh/leetcode
|
0004.Median of Two Sorted Arrays/solution.py
|
Python
|
apache-2.0
| 2,180
|
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
]
|
rnzaws/test-ci-codepipeline-elastic-beanstalk
|
app/app/urls.py
|
Python
|
apache-2.0
| 824
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class K8sIoApimachineryPkgApisMetaV1Status(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'code': 'int',
'details': 'K8sIoApimachineryPkgApisMetaV1StatusDetails',
'kind': 'str',
'message': 'str',
'metadata': 'K8sIoApimachineryPkgApisMetaV1ListMeta',
'reason': 'str',
'status': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'code': 'code',
'details': 'details',
'kind': 'kind',
'message': 'message',
'metadata': 'metadata',
'reason': 'reason',
'status': 'status'
}
def __init__(self, api_version=None, code=None, details=None, kind=None, message=None, metadata=None, reason=None, status=None):
"""
K8sIoApimachineryPkgApisMetaV1Status - a model defined in Swagger
"""
self._api_version = None
self._code = None
self._details = None
self._kind = None
self._message = None
self._metadata = None
self._reason = None
self._status = None
if api_version is not None:
self.api_version = api_version
if code is not None:
self.code = code
if details is not None:
self.details = details
if kind is not None:
self.kind = kind
if message is not None:
self.message = message
if metadata is not None:
self.metadata = metadata
if reason is not None:
self.reason = reason
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this K8sIoApimachineryPkgApisMetaV1Status.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:return: The api_version of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this K8sIoApimachineryPkgApisMetaV1Status.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param api_version: The api_version of this K8sIoApimachineryPkgApisMetaV1Status.
:type: str
"""
self._api_version = api_version
@property
def code(self):
"""
Gets the code of this K8sIoApimachineryPkgApisMetaV1Status.
Suggested HTTP return code for this status, 0 if not set.
:return: The code of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this K8sIoApimachineryPkgApisMetaV1Status.
Suggested HTTP return code for this status, 0 if not set.
:param code: The code of this K8sIoApimachineryPkgApisMetaV1Status.
:type: int
"""
self._code = code
@property
def details(self):
"""
Gets the details of this K8sIoApimachineryPkgApisMetaV1Status.
Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.
:return: The details of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: K8sIoApimachineryPkgApisMetaV1StatusDetails
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this K8sIoApimachineryPkgApisMetaV1Status.
Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.
:param details: The details of this K8sIoApimachineryPkgApisMetaV1Status.
:type: K8sIoApimachineryPkgApisMetaV1StatusDetails
"""
self._details = details
@property
def kind(self):
"""
Gets the kind of this K8sIoApimachineryPkgApisMetaV1Status.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:return: The kind of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this K8sIoApimachineryPkgApisMetaV1Status.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param kind: The kind of this K8sIoApimachineryPkgApisMetaV1Status.
:type: str
"""
self._kind = kind
@property
def message(self):
"""
Gets the message of this K8sIoApimachineryPkgApisMetaV1Status.
A human-readable description of the status of this operation.
:return: The message of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this K8sIoApimachineryPkgApisMetaV1Status.
A human-readable description of the status of this operation.
:param message: The message of this K8sIoApimachineryPkgApisMetaV1Status.
:type: str
"""
self._message = message
@property
def metadata(self):
"""
Gets the metadata of this K8sIoApimachineryPkgApisMetaV1Status.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:return: The metadata of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: K8sIoApimachineryPkgApisMetaV1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this K8sIoApimachineryPkgApisMetaV1Status.
Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param metadata: The metadata of this K8sIoApimachineryPkgApisMetaV1Status.
:type: K8sIoApimachineryPkgApisMetaV1ListMeta
"""
self._metadata = metadata
@property
def reason(self):
"""
Gets the reason of this K8sIoApimachineryPkgApisMetaV1Status.
A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.
:return: The reason of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this K8sIoApimachineryPkgApisMetaV1Status.
A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.
:param reason: The reason of this K8sIoApimachineryPkgApisMetaV1Status.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this K8sIoApimachineryPkgApisMetaV1Status.
Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
:return: The status of this K8sIoApimachineryPkgApisMetaV1Status.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this K8sIoApimachineryPkgApisMetaV1Status.
Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
:param status: The status of this K8sIoApimachineryPkgApisMetaV1Status.
:type: str
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, K8sIoApimachineryPkgApisMetaV1Status):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
kubevirt/client-python
|
kubevirt/models/k8s_io_apimachinery_pkg_apis_meta_v1_status.py
|
Python
|
apache-2.0
| 11,208
|
__author__ = 'kaneg'
import json
import os
server_host = '0.0.0.0'
server_port = 8080
debug = False
lxd_api_url = 'unix:/var/lib/lxd/unix.socket'
ssl_key_path = '~/.config/lxc/client.key'
ssl_crt_path = '~/.config/lxc/client.crt'
web_shell_host = 'http://%(host)s:9090/?arg=%(container)s'
login_required = True
admin_user = 'admin'
admin_password = 'admin'
def get_users():
users = {}
if os.path.exists('users.json'):
users = json.load(open('users.json'))
return users
|
kaneg/lxdui
|
settings.py
|
Python
|
apache-2.0
| 493
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack import service
from rally.task import service as base_service
from tests.unit import test
class DiscoverTestCase(test.TestCase):
def test_discover_network_impl_based_on_service(self):
class SomeService(base_service.UnifiedService):
pass
@service.service("neutron", "network", version="2")
class NeutronV2Service(service.Service):
pass
@service.compat_layer(NeutronV2Service)
class UnifiedNeutronV2Service(SomeService):
pass
clients = mock.MagicMock()
clients.neutron.choose_version.return_value = "2"
clients.services.return_value = {}
clients.services.return_value = {"network": "neutron"}
self.assertIsInstance(SomeService(clients)._impl,
UnifiedNeutronV2Service)
|
yeming233/rally
|
tests/unit/plugins/openstack/test_service.py
|
Python
|
apache-2.0
| 1,441
|
from pyneat import Conf
from pyneat import Innovations
from pyneat.genotype import Genome
import mock
import random
def test_genome():
genome = Genome.minimal_fully_connected(0, (3, 2))
assert genome.neurons == [3, 0, 2]
assert len(genome.genes) == 6
def test_crossover_equal_mom_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
dad = mom.duplicate(1)
baby = mom.crossover(dad, 2.0, 1.0, innovs)
assert len(baby.genes) == 6
def test_crossover_equal_dad_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
dad = mom.duplicate(1)
baby = mom.crossover(dad, 1.0, 2.0, innovs)
assert len(baby.genes) == 6
def test_crossover_unequal_mom_mut_dad_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
innovs.innov = max(map(lambda x: x.innov, mom.genes))
dad = mom.duplicate(1)
mom.mutate_neuron(innovs)
baby = mom.crossover(dad, 2.0, 1.0, innovs)
assert len(baby.genes) == 8
def test_crossover_unequal_mom_mut_mom_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
innovs.innov = max(map(lambda x: x.innov, mom.genes))
dad = mom.duplicate(1)
mom.mutate_neuron(innovs)
baby = mom.crossover(dad, 1.0, 2.0, innovs)
assert len(baby.genes) == 6
def test_crossover_unequal_dad_mut_dad_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
innovs.innov = max(map(lambda x: x.innov, mom.genes))
dad = mom.duplicate(1)
dad.mutate_neuron(innovs)
baby = mom.crossover(dad, 1.0, 2.0, innovs)
assert len(baby.genes) == 8
def test_crossover_unequal_dad_mut_mom_fit():
innovs = Innovations()
mom = Genome.minimal_fully_connected(0, (3, 2))
innovs.innov = max(map(lambda x: x.innov, mom.genes))
dad = mom.duplicate(1)
dad.mutate_neuron(innovs)
baby = mom.crossover(dad, 2.0, 1.0, innovs)
assert len(baby.genes) == 6
def test_mutate_gene():
genome = Genome.minimal_fully_connected(0, (3, 2))
genome.neurons[1] += 1
innovations = Innovations()
innovations.innov = max(map(lambda x: x.innov, genome.genes))
old = random.choice
random.choice = mock.MagicMock(side_effect=[0, 3])
genome.mutate_gene(innovations)
random.choice = old
assert len(genome.genes) == 7
assert len(innovations.gene_innov) == 1
def test_mutate_gene_none_free():
genome = Genome.minimal_fully_connected(0, (3, 2))
innovations = Innovations()
innovations.innov = max(map(lambda x: x.innov, genome.genes))
old = random.choice
random.choice = mock.MagicMock(side_effect=[0, Genome.MAX_HIDDEN])
genome.mutate_gene(innovations)
random.choice = old
assert len(genome.genes) == 6
assert len(innovations.gene_innov) == 0
def test_mutate_neuron():
genome = Genome.minimal_fully_connected(0, (3, 2))
innovations = Innovations()
innovations.innov = max(map(lambda x: x.innov, genome.genes))
genome.mutate_neuron(innovations)
assert len(genome.genes) == 8
assert sum(genome.neurons) == 6
assert len(innovations.neuron_innov) == 1
def test_compatible():
conf = Conf()
g1 = Genome.minimal_fully_connected(0, (3, 2))
g2 = g1.duplicate(1)
g2.mutate_weights()
assert g1.compatible(conf, g2)
def test_compatible_diff_struct():
conf = Conf()
innovations = Innovations()
g1 = Genome.minimal_fully_connected(0, (3, 2))
innovations.innov = max(map(lambda x: x.innov, g1.genes))
g2 = g1.duplicate(1)
g2.mutate_neuron(innovations)
assert not g1.compatible(conf, g2)
def test_genesis():
genome = Genome.minimal_fully_connected(0, (3, 2))
net = genome.genesis()
assert net
assert net.dim == [3, 0, 2]
|
jasonb5/pyneat
|
tests/test_genome.py
|
Python
|
apache-2.0
| 3,859
|
# Copyright 2013 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Client side of the conductor RPC API."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_versionedobjects import base as ovo_base
import jacket.compute.conf
from jacket.objects.compute import base as objects_base
from jacket import rpc
CONF = jacket.compute.conf.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class ConductorAPI(object):
"""Client side of the conductor RPC API
API version history:
* 1.0 - Initial version.
* 1.1 - Added migration_update
* 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
* 1.3 - Added aggregate_host_add and aggregate_host_delete
* 1.4 - Added migration_get
* 1.5 - Added bw_usage_update
* 1.6 - Added get_backdoor_port()
* 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
and aggregate_metadata_delete
* 1.8 - Added security_group_get_by_instance and
security_group_rule_get_by_security_group
* 1.9 - Added provider_fw_rule_get_all
* 1.10 - Added agent_build_get_by_triple
* 1.11 - Added aggregate_get
* 1.12 - Added block_device_mapping_update_or_create
* 1.13 - Added block_device_mapping_get_all_by_instance
* 1.14 - Added block_device_mapping_destroy
* 1.15 - Added instance_get_all_by_filters and
instance_get_all_hung_in_rebooting and
instance_get_active_by_window
Deprecated instance_get_all_by_host
* 1.16 - Added instance_destroy
* 1.17 - Added instance_info_cache_delete
* 1.18 - Added instance_type_get
* 1.19 - Added vol_get_usage_by_time and vol_usage_update
* 1.20 - Added migration_get_unconfirmed_by_dest_compute
* 1.21 - Added service_get_all_by
* 1.22 - Added ping
* 1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
* 1.24 - Added instance_get
* 1.25 - Added action_event_start and action_event_finish
* 1.26 - Added instance_info_cache_update
* 1.27 - Added service_create
* 1.28 - Added binary arg to service_get_all_by
* 1.29 - Added service_destroy
* 1.30 - Added migration_create
* 1.31 - Added migration_get_in_progress_by_host_and_node
* 1.32 - Added optional node to instance_get_all_by_host
* 1.33 - Added compute_node_create and compute_node_update
* 1.34 - Added service_update
* 1.35 - Added instance_get_active_by_window_joined
* 1.36 - Added instance_fault_create
* 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
* 1.38 - Added service name to instance_update
* 1.39 - Added notify_usage_exists
* 1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
* 1.41 - Added fixed_ip_get_by_instance, network_get,
instance_floating_address_get_all, quota_commit,
quota_rollback
* 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
* 1.43 - Added compute_stop
* 1.44 - Added compute_node_delete
* 1.45 - Added project_id to quota_commit and quota_rollback
* 1.46 - Added compute_confirm_resize
* 1.47 - Added columns_to_join to instance_get_all_by_host and
instance_get_all_by_filters
* 1.48 - Added compute_unrescue
... Grizzly supports message version 1.48. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.48.
* 1.49 - Added columns_to_join to instance_get_by_uuid
* 1.50 - Added object_action() and object_class_action()
* 1.51 - Added the 'legacy' argument to
block_device_mapping_get_all_by_instance
* 1.52 - Pass instance objects for compute_confirm_resize
* 1.53 - Added compute_reboot
* 1.54 - Added 'update_cells' argument to bw_usage_update
* 1.55 - Pass instance objects for compute_stop
* 1.56 - Remove compute_confirm_resize and
migration_get_unconfirmed_by_dest_compute
* 1.57 - Remove migration_create()
* 1.58 - Remove migration_get()
... Havana supports message version 1.58. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.58.
* 1.59 - Remove instance_info_cache_update()
* 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
* ... - Remove security_group_get_by_instance() and
security_group_rule_get_by_security_group()
* 1.61 - Return deleted instance from instance_destroy()
* 1.62 - Added object_backport()
* 1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
* 1.64 - Added use_slave to instance_get_all_filters()
- Remove instance_type_get()
- Remove aggregate_get()
- Remove aggregate_get_by_host()
- Remove instance_get()
- Remove migration_update()
- Remove block_device_mapping_destroy()
* 2.0 - Drop backwards compatibility
- Remove quota_rollback() and quota_commit()
- Remove aggregate_host_add() and aggregate_host_delete()
- Remove network_migrate_instance_start() and
network_migrate_instance_finish()
- Remove vol_get_usage_by_time
... Icehouse supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* Remove instance_destroy()
* Remove compute_unrescue()
* Remove instance_get_all_by_filters()
* Remove instance_get_active_by_window_joined()
* Remove instance_fault_create()
* Remove action_event_start() and action_event_finish()
* Remove instance_get_by_uuid()
* Remove agent_build_get_by_triple()
... Juno supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.0.
* 2.1 - Make notify_usage_exists() take an instance object
* Remove bw_usage_update()
* Remove notify_usage_exists()
... Kilo supports message version 2.1. So, any changes to
existing methods in 2.x after that point should be done such
that they can handle the version_cap being set to 2.1.
* Remove get_ec2_ids()
* Remove service_get_all_by()
* Remove service_create()
* Remove service_destroy()
* Remove service_update()
* Remove migration_get_in_progress_by_host_and_node()
* Remove aggregate_metadata_get_by_host()
* Remove block_device_mapping_update_or_create()
* Remove block_device_mapping_get_all_by_instance()
* Remove instance_get_all_by_host()
* Remove compute_node_update()
* Remove compute_node_delete()
* Remove security_groups_trigger_handler()
* Remove task_log_get()
* Remove task_log_begin_task()
* Remove task_log_end_task()
* Remove security_groups_trigger_members_refresh()
* Remove vol_usage_update()
* Remove instance_update()
* 2.2 - Add object_backport_versions()
* 2.3 - Add object_class_action_versions()
* Remove compute_node_create()
* Remove object_backport()
* 3.0 - Drop backwards compatibility
... Liberty supports message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such
that they can handle the version_cap being set to 3.0.
* Remove provider_fw_rule_get_all()
"""
VERSION_ALIASES = {
'grizzly': '1.48',
'havana': '1.58',
'icehouse': '2.0',
'juno': '2.0',
'kilo': '2.1',
'liberty': '3.0',
}
def __init__(self):
super(ConductorAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.conductor,
CONF.upgrade_levels.conductor)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target,
version_cap=version_cap,
serializer=serializer)
# TODO(hanlind): This method can be removed once oslo.versionedobjects
# has been converted to use version_manifests in remotable_classmethod
# operations, which will use the new class action handler.
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
versions = ovo_base.obj_tree_get_versions(objname)
return self.object_class_action_versions(context,
objname,
objmethod,
versions,
args, kwargs)
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_class_action_versions',
objname=objname, objmethod=objmethod,
object_versions=object_versions,
args=args, kwargs=kwargs)
def object_action(self, context, objinst, objmethod, args, kwargs):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_action', objinst=objinst,
objmethod=objmethod, args=args, kwargs=kwargs)
def object_backport_versions(self, context, objinst, object_versions):
cctxt = self.client.prepare()
return cctxt.call(context, 'object_backport_versions', objinst=objinst,
object_versions=object_versions)
class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API
API version history:
1.0 - Initial version (empty).
1.1 - Added unified migrate_server call.
1.2 - Added build_instances
1.3 - Added unshelve_instance
1.4 - Added reservations to migrate_server.
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
1.7 - Do not send block_device_mapping and legacy_bdm to build_instances
1.8 - Add rebuild_instance
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
1.11 - Added clean_shutdown to migrate_server()
1.12 - Added request_spec to rebuild_instance()
1.13 - Added request_spec to migrate_server()
1.14 - Added request_spec to unshelve_instance()
"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None, clean_shutdown=True, request_spec=None):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
'clean_shutdown': clean_shutdown,
'request_spec': request_spec,
}
version = '1.13'
if not self.client.can_send_version(version):
del kw['request_spec']
version = '1.11'
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
if not self.client.can_send_version(version):
kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping, legacy_bdm=True):
image_p = jsonutils.to_primitive(image)
version = '1.10'
if not self.client.can_send_version(version):
version = '1.9'
if 'instance_type' in filter_properties:
flavor = filter_properties['instance_type']
flavor_p = objects_base.obj_to_primitive(flavor)
filter_properties = dict(filter_properties,
instance_type=flavor_p)
kw = {'instances': instances, 'image': image_p,
'filter_properties': filter_properties,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'security_groups': security_groups}
if not self.client.can_send_version(version):
version = '1.8'
kw['requested_networks'] = kw['requested_networks'].as_tuples()
if not self.client.can_send_version('1.7'):
version = '1.5'
bdm_p = objects_base.obj_to_primitive(block_device_mapping)
kw.update({'block_device_mapping': bdm_p,
'legacy_bdm': legacy_bdm})
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'build_instances', **kw)
def unshelve_instance(self, context, instance, request_spec=None):
version = '1.14'
kw = {'instance': instance,
'request_spec': request_spec
}
if not self.client.can_send_version(version):
version = '1.3'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'unshelve_instance', **kw)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
preserve_ephemeral=False, request_spec=None, kwargs=None):
version = '1.12'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
'image_ref': image_ref,
'orig_image_ref': orig_image_ref,
'orig_sys_metadata': orig_sys_metadata,
'bdms': bdms,
'recreate': recreate,
'on_shared_storage': on_shared_storage,
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
}
if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
cctxt.cast(ctxt, 'rebuild_instance', **kw)
|
HybridF5/jacket
|
jacket/compute/conductor/rpcapi.py
|
Python
|
apache-2.0
| 16,186
|
"""Event loop using a selector and related classes.
A selector is a "notify-when-ready" multiplexer. For a subclass which
also includes support for signal handling, see the unix_events sub-module.
"""
__all__ = ['BaseSelectorEventLoop']
import collections
import errno
import functools
import socket
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import constants
from . import events
from . import futures
from . import selectors
from . import transports
from . import sslproto
from .coroutines import coroutine
from .log import logger
def _test_selector_event(selector, fd, event):
# Test if the selector is monitoring 'event' events
# for the file descriptor 'fd'.
try:
key = selector.get_key(fd)
except KeyError:
return False
else:
return bool(key.events & event)
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
See events.EventLoop for API specification.
"""
def __init__(self, selector=None):
super().__init__()
if selector is None:
selector = selectors.DefaultSelector()
logger.debug('Using selector: %s', selector.__class__.__name__)
self._selector = selector
self._make_self_pipe()
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
return _SelectorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None):
if not sslproto._is_sslproto_available():
return self._make_legacy_ssl_transport(
rawsock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
extra=extra, server=server)
ssl_protocol = sslproto.SSLProtocol(self, protocol, sslcontext, waiter,
server_side, server_hostname)
_SelectorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_legacy_ssl_transport(self, rawsock, protocol, sslcontext,
waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
# Use the legacy API: SSL_write, SSL_read, etc. The legacy API is used
# on Python 3.4 and older, when ssl.MemoryBIO is not available.
return _SelectorSslTransport(
self, rawsock, protocol, sslcontext, waiter,
server_side, server_hostname, extra, server)
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _SelectorDatagramTransport(self, sock, protocol,
address, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
self._close_self_pipe()
super().close()
if self._selector is not None:
self._selector.close()
self._selector = None
def _socketpair(self):
raise NotImplementedError
def _close_self_pipe(self):
self.remove_reader(self._ssock.fileno())
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = self._socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
self.add_reader(self._ssock.fileno(), self._read_from_self)
def _process_self_data(self, data):
pass
def _read_from_self(self):
while True:
try:
data = self._ssock.recv(4096)
if not data:
break
self._process_self_data(data)
except InterruptedError:
continue
except BlockingIOError:
break
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is not None:
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None):
self.add_reader(sock.fileno(), self._accept_connection,
protocol_factory, sock, sslcontext, server)
def _accept_connection(self, protocol_factory, sock,
sslcontext=None, server=None):
try:
conn, addr = sock.accept()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
conn.setblocking(False)
except (BlockingIOError, InterruptedError, ConnectionAbortedError):
pass # False alarm.
except OSError as exc:
# There's nowhere to send the error, so just log it.
if exc.errno in (errno.EMFILE, errno.ENFILE,
errno.ENOBUFS, errno.ENOMEM):
# Some platforms (e.g. Linux keep reporting the FD as
# ready, so we remove the read handler temporarily.
# We'll try again in a while.
self.call_exception_handler({
'message': 'socket.accept() out of system resource',
'exception': exc,
'socket': sock,
})
self.remove_reader(sock.fileno())
self.call_later(constants.ACCEPT_RETRY_DELAY,
self._start_serving,
protocol_factory, sock, sslcontext, server)
else:
raise # The event loop will catch, log and ignore it.
else:
extra = {'peername': addr}
accept = self._accept_connection2(protocol_factory, conn, extra,
sslcontext, server)
self.create_task(accept)
@coroutine
def _accept_connection2(self, protocol_factory, conn, extra,
sslcontext=None, server=None):
protocol = None
transport = None
try:
protocol = protocol_factory()
waiter = self.create_future()
if sslcontext:
transport = self._make_ssl_transport(
conn, protocol, sslcontext, waiter=waiter,
server_side=True, extra=extra, server=server)
else:
transport = self._make_socket_transport(
conn, protocol, waiter=waiter, extra=extra,
server=server)
try:
yield from waiter
except:
transport.close()
raise
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
context = {
'message': ('Error on transport creation '
'for incoming connection'),
'exception': exc,
}
if protocol is not None:
context['protocol'] = protocol
if transport is not None:
context['transport'] = transport
self.call_exception_handler(context)
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_READ,
(handle, None))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_READ,
(handle, writer))
if reader is not None:
reader.cancel()
def remove_reader(self, fd):
"""Remove a reader callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
mask &= ~selectors.EVENT_READ
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (None, writer))
if reader is not None:
reader.cancel()
return True
else:
return False
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._check_closed()
handle = events.Handle(callback, args, self)
try:
key = self._selector.get_key(fd)
except KeyError:
self._selector.register(fd, selectors.EVENT_WRITE,
(None, handle))
else:
mask, (reader, writer) = key.events, key.data
self._selector.modify(fd, mask | selectors.EVENT_WRITE,
(reader, handle))
if writer is not None:
writer.cancel()
def remove_writer(self, fd):
"""Remove a writer callback."""
if self.is_closed():
return False
try:
key = self._selector.get_key(fd)
except KeyError:
return False
else:
mask, (reader, writer) = key.events, key.data
# Remove both writer and connector.
mask &= ~selectors.EVENT_WRITE
if not mask:
self._selector.unregister(fd)
else:
self._selector.modify(fd, mask, (reader, None))
if writer is not None:
writer.cancel()
return True
else:
return False
def sock_recv(self, sock, n):
"""Receive data from the socket.
The return value is a bytes object representing the data received.
The maximum amount of data to be received at once is specified by
nbytes.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_recv(fut, False, sock, n)
return fut
def _sock_recv(self, fut, registered, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
fd = sock.fileno()
if registered:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_reader(fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_recv, fut, True, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(data)
def sock_sendall(self, sock, data):
"""Send data to the socket.
The socket must be connected to a remote socket. This method continues
to send data from data until either all data has been sent or an
error occurs. None is returned on success. On error, an exception is
raised, and there is no way to determine how much data, if any, was
successfully processed by the receiving end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
if data:
self._sock_sendall(fut, False, sock, data)
else:
fut.set_result(None)
return fut
def _sock_sendall(self, fut, registered, sock, data):
fd = sock.fileno()
if registered:
self.remove_writer(fd)
if fut.cancelled():
return
try:
n = sock.send(data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n == len(data):
fut.set_result(None)
else:
if n:
data = data[n:]
self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
def sock_connect(self, sock, address):
"""Connect to a remote socket at address.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
if hasattr(socket, 'AF_UNIX') and sock.family == socket.AF_UNIX:
self._sock_connect(fut, sock, address)
else:
resolved = base_events._ensure_resolved(
address, family=sock.family, proto=sock.proto, loop=self)
resolved.add_done_callback(
lambda resolved: self._on_resolved(fut, sock, resolved))
return fut
def _on_resolved(self, fut, sock, resolved):
try:
_, _, _, _, address = resolved.result()[0]
except Exception as exc:
fut.set_exception(exc)
else:
self._sock_connect(fut, sock, address)
def _sock_connect(self, fut, sock, address):
fd = sock.fileno()
try:
sock.connect(address)
except (BlockingIOError, InterruptedError):
# Issue #23618: When the C function connect() fails with EINTR, the
# connection runs in background. We have to wait until the socket
# becomes writable to be notified when the connection succeed or
# fails.
fut.add_done_callback(functools.partial(self._sock_connect_done,
fd))
self.add_writer(fd, self._sock_connect_cb, fut, sock, address)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def _sock_connect_done(self, fd, fut):
self.remove_writer(fd)
def _sock_connect_cb(self, fut, sock, address):
if fut.cancelled():
return
try:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# Jump to any except clause below.
raise OSError(err, 'Connect call failed %s' % (address,))
except (BlockingIOError, InterruptedError):
# socket is still registered, the callback will be retried later
pass
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result(None)
def sock_accept(self, sock):
"""Accept a connection.
The socket must be bound to an address and listening for connections.
The return value is a pair (conn, address) where conn is a new socket
object usable to send and receive data on the connection, and address
is the address bound to the socket on the other end of the connection.
This method is a coroutine.
"""
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
self._sock_accept(fut, False, sock)
return fut
def _sock_accept(self, fut, registered, sock):
fd = sock.fileno()
if registered:
self.remove_reader(fd)
if fut.cancelled():
return
try:
conn, address = sock.accept()
conn.setblocking(False)
except (BlockingIOError, InterruptedError):
self.add_reader(fd, self._sock_accept, fut, True, sock)
except Exception as exc:
fut.set_exception(exc)
else:
fut.set_result((conn, address))
def _process_events(self, event_list):
for key, mask in event_list:
fileobj, (reader, writer) = key.fileobj, key.data
if mask & selectors.EVENT_READ and reader is not None:
if reader._cancelled:
self.remove_reader(fileobj)
else:
self._add_callback(reader)
if mask & selectors.EVENT_WRITE and writer is not None:
if writer._cancelled:
self.remove_writer(fileobj)
else:
self._add_callback(writer)
def _stop_serving(self, sock):
self.remove_reader(sock.fileno())
sock.close()
class _SelectorTransport(transports._FlowControlMixin,
transports.Transport):
max_size = 256 * 1024 # Buffer size passed to recv().
_buffer_factory = bytearray # Constructs initial value for self._buffer.
# Attribute used in the destructor: it must be set even if the constructor
# is not called (see _SelectorSslTransport which may start by raising an
# exception)
_sock = None
def __init__(self, loop, sock, protocol, extra=None, server=None):
super().__init__(extra, loop)
self._extra['socket'] = sock
self._extra['sockname'] = sock.getsockname()
if 'peername' not in self._extra:
try:
self._extra['peername'] = sock.getpeername()
except socket.error:
self._extra['peername'] = None
self._sock = sock
self._sock_fd = sock.fileno()
self._protocol = protocol
self._protocol_connected = True
self._server = server
self._buffer = self._buffer_factory()
self._conn_lost = 0 # Set when call to connection_lost scheduled.
self._closing = False # Set when close() called.
if self._server is not None:
self._server._attach()
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append('fd=%s' % self._sock_fd)
# test if the transport was closed
if self._loop is not None and not self._loop.is_closed():
polling = _test_selector_event(self._loop._selector,
self._sock_fd, selectors.EVENT_READ)
if polling:
info.append('read=polling')
else:
info.append('read=idle')
polling = _test_selector_event(self._loop._selector,
self._sock_fd,
selectors.EVENT_WRITE)
if polling:
state = 'polling'
else:
state = 'idle'
bufsize = self.get_write_buffer_size()
info.append('write=<%s, bufsize=%s>' % (state, bufsize))
return '<%s>' % ' '.join(info)
def abort(self):
self._force_close(None)
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._loop.remove_reader(self._sock_fd)
if not self._buffer:
self._conn_lost += 1
self._loop.remove_writer(self._sock_fd)
self._loop.call_soon(self._call_connection_lost, None)
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if self._sock is not None:
warnings.warn("unclosed transport %r" % self, ResourceWarning)
self._sock.close()
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._force_close(exc)
def _force_close(self, exc):
if self._conn_lost:
return
if self._buffer:
self._buffer.clear()
self._loop.remove_writer(self._sock_fd)
if not self._closing:
self._closing = True
self._loop.remove_reader(self._sock_fd)
self._conn_lost += 1
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
if self._protocol_connected:
self._protocol.connection_lost(exc)
finally:
self._sock.close()
self._sock = None
self._protocol = None
self._loop = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
return len(self._buffer)
class _SelectorSocketTransport(_SelectorTransport):
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, extra, server)
self._eof = False
self._paused = False
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def pause_reading(self):
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._conn_lost:
return
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on socket transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
# We're keeping the connection open so the
# protocol can write more, but we still can't
# receive more, so remove the reader callback.
self._loop.remove_reader(self._sock_fd)
else:
self.close()
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if self._eof:
raise RuntimeError('Cannot call write() after write_eof()')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Optimization: try to send now.
try:
n = self._sock.send(data)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on socket transport')
return
else:
data = data[n:]
if not data:
return
# Not all was written; register write handler.
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
if self._conn_lost:
return
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on socket transport')
else:
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
elif self._eof:
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
if self._eof:
return
self._eof = True
if not self._buffer:
self._sock.shutdown(socket.SHUT_WR)
def can_write_eof(self):
return True
class _SelectorSslTransport(_SelectorTransport):
_buffer_factory = bytearray
def __init__(self, loop, rawsock, protocol, sslcontext, waiter=None,
server_side=False, server_hostname=None,
extra=None, server=None):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = sslproto._create_transport_context(server_side, server_hostname)
wrap_kwargs = {
'server_side': server_side,
'do_handshake_on_connect': False,
}
if server_hostname and not server_side:
wrap_kwargs['server_hostname'] = server_hostname
sslsock = sslcontext.wrap_socket(rawsock, **wrap_kwargs)
super().__init__(loop, sslsock, protocol, extra, server)
# the protocol connection is only made after the SSL handshake
self._protocol_connected = False
self._server_hostname = server_hostname
self._waiter = waiter
self._sslcontext = sslcontext
self._paused = False
# SSL-specific extra info. (peercert is set later)
self._extra.update(sslcontext=sslcontext)
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
start_time = self._loop.time()
else:
start_time = None
self._on_handshake(start_time)
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def _on_handshake(self, start_time):
try:
self._sock.do_handshake()
except ssl.SSLWantReadError:
self._loop.add_reader(self._sock_fd,
self._on_handshake, start_time)
return
except ssl.SSLWantWriteError:
self._loop.add_writer(self._sock_fd,
self._on_handshake, start_time)
return
except BaseException as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
self._sock.close()
self._wakeup_waiter(exc)
if isinstance(exc, Exception):
return
else:
raise
self._loop.remove_reader(self._sock_fd)
self._loop.remove_writer(self._sock_fd)
peercert = self._sock.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname and
self._sslcontext.verify_mode != ssl.CERT_NONE):
try:
ssl.match_hostname(peercert, self._server_hostname)
except Exception as exc:
if self._loop.get_debug():
logger.warning("%r: SSL handshake failed "
"on matching the hostname",
self, exc_info=True)
self._sock.close()
self._wakeup_waiter(exc)
return
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=self._sock.cipher(),
compression=self._sock.compression(),
ssl_object=self._sock,
)
self._read_wants_write = False
self._write_wants_read = False
self._loop.add_reader(self._sock_fd, self._read_ready)
self._protocol_connected = True
self._loop.call_soon(self._protocol.connection_made, self)
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(self._wakeup_waiter)
if self._loop.get_debug():
dt = self._loop.time() - start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
def pause_reading(self):
# XXX This is a bit icky, given the comment at the top of
# _read_ready(). Is it possible to evoke a deadlock? I don't
# know, although it doesn't look like it; write() will still
# accept more data for the buffer and eventually the app will
# call resume_reading() again, and things will flow again.
if self._closing:
raise RuntimeError('Cannot pause_reading() when closing')
if self._paused:
raise RuntimeError('Already paused')
self._paused = True
self._loop.remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if not self._paused:
raise RuntimeError('Not paused')
self._paused = False
if self._closing:
return
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _read_ready(self):
if self._conn_lost:
return
if self._write_wants_read:
self._write_wants_read = False
self._write_ready()
if self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
try:
data = self._sock.recv(self.max_size)
except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):
pass
except ssl.SSLWantWriteError:
self._read_wants_write = True
self._loop.remove_reader(self._sock_fd)
self._loop.add_writer(self._sock_fd, self._write_ready)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on SSL transport')
else:
if data:
self._protocol.data_received(data)
else:
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
keep_open = self._protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self.close()
def _write_ready(self):
if self._conn_lost:
return
if self._read_wants_write:
self._read_wants_write = False
self._read_ready()
if not (self._paused or self._closing):
self._loop.add_reader(self._sock_fd, self._read_ready)
if self._buffer:
try:
n = self._sock.send(self._buffer)
except (BlockingIOError, InterruptedError, ssl.SSLWantWriteError):
n = 0
except ssl.SSLWantReadError:
n = 0
self._loop.remove_writer(self._sock_fd)
self._write_wants_read = True
except Exception as exc:
self._loop.remove_writer(self._sock_fd)
self._buffer.clear()
self._fatal_error(exc, 'Fatal write error on SSL transport')
return
if n:
del self._buffer[:n]
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
self._loop.add_writer(self._sock_fd, self._write_ready)
# Add it to the buffer.
self._buffer.extend(data)
self._maybe_pause_protocol()
def can_write_eof(self):
return False
class _SelectorDatagramTransport(_SelectorTransport):
_buffer_factory = collections.deque
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
super().__init__(loop, sock, protocol, extra)
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop.add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def get_write_buffer_size(self):
return sum(len(data) for data, _ in self._buffer)
def _read_ready(self):
if self._conn_lost:
return
try:
data, addr = self._sock.recvfrom(self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal read error on datagram transport')
else:
self._protocol.datagram_received(data, addr)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be a bytes-like object, '
'not %r' % type(data).__name__)
if not data:
return
if self._address and addr not in (None, self._address):
raise ValueError('Invalid address: must be None or %s' %
(self._address,))
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
return
except (BlockingIOError, InterruptedError):
self._loop.add_writer(self._sock_fd, self._sendto_ready)
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._maybe_pause_protocol()
def _sendto_ready(self):
while self._buffer:
data, addr = self._buffer.popleft()
try:
if self._address:
self._sock.send(data)
else:
self._sock.sendto(data, addr)
except (BlockingIOError, InterruptedError):
self._buffer.appendleft((data, addr)) # Try again later.
break
except OSError as exc:
self._protocol.error_received(exc)
return
except Exception as exc:
self._fatal_error(exc,
'Fatal write error on datagram transport')
return
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer:
self._loop.remove_writer(self._sock_fd)
if self._closing:
self._call_connection_lost(None)
|
vxgmichel/asyncio
|
asyncio/selector_events.py
|
Python
|
apache-2.0
| 39,781
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
import WidgetFactory
import cairis.core.Goal
from cairis.core.Borg import Borg
from GoalEnvironmentNotebook import GoalEnvironmentNotebook
__author__ = 'Shamal Faily'
class SingleGoalPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,GOAL_ID)
b = Borg()
self.dbProxy = b.dbProxy
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(WidgetFactory.buildTextSizer(self,'Name',(87,30),GOAL_TEXTNAME_ID),0,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildTextSizer(self,'Originator',(87,30),GOAL_TEXTORIGINATOR_ID),0,wx.EXPAND)
self.nameCtrl = self.FindWindowById(GOAL_TEXTNAME_ID)
self.notebook = GoalEnvironmentNotebook(self,self.dbProxy,True)
mainSizer.Add(self.notebook,1,wx.EXPAND)
mainSizer.Add(WidgetFactory.buildCommitButtonSizer(self,GOAL_BUTTONCOMMIT_ID,True),0,wx.CENTER)
self.definitionCtrl = self.notebook.FindWindowById(GOAL_TEXTDEFINITION_ID)
self.categoryCtrl = self.notebook.FindWindowById(GOAL_COMBOCATEGORY_ID)
self.priorityCtrl = self.notebook.FindWindowById(GOAL_COMBOPRIORITY_ID)
self.fitCriterionCtrl = self.notebook.FindWindowById(GOAL_TEXTFITCRITERION_ID)
self.issueCtrl = self.notebook.FindWindowById(GOAL_TEXTISSUE_ID)
self.goalAssociationCtrl = self.notebook.FindWindowById(GOAL_LISTGOALREFINEMENTS_ID)
self.subGoalAssociationCtrl = self.notebook.FindWindowById(GOAL_LISTSUBGOALREFINEMENTS_ID)
self.cCtrl = self.notebook.FindWindowById(GOAL_LISTCONCERNS_ID)
self.caCtrl = self.notebook.FindWindowById(GOAL_LISTCONCERNASSOCIATIONS_ID)
self.ctCtrl = self.notebook.FindWindowById(GOAL_COMBOCONTRIBUTIONTYPE_ID)
self.SetSizer(mainSizer)
|
nathanbjenx/cairis
|
cairis/gui/SingleGoalPanel.py
|
Python
|
apache-2.0
| 2,502
|
#!/usr/bin/env python
#
# Copyright (C) 2015 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycmd.completers.robot.robotfw_completer import RobotFrameworkCompleter
def GetCompleter( user_options ):
return RobotFrameworkCompleter( user_options )
|
istyf/robotframework-ycm
|
robot/hook.py
|
Python
|
apache-2.0
| 898
|
import os
"""
This file contains constants for various things in the app.
DONT STRING MATCH
put stuff in here and import to other files.
Also dont import things into here, circular dependencies == bad
"""
STATE_CHOICES = [
("FL", "Florida"),
]
COUNTRY_CHOICES = [
("US", "United States of America! YEAH!"),
]
SESSION_VARS = {
'gatorlink': 'gatorlink',
'email': 'email',
'first_name': 'first_name',
'last_name': 'last_name',
}
ADDRESS_TYPE= {
'business': 'business',
'organization': 'organization',
}
answer_submit_names = {
'question_id': 'question_id',
'choice_id': 'choice_id',
'project_id': 'project_id',
}
answer_response_names = {
'user_id': 'user_id',
'question_id': 'question_id',
'choice_id': 'choice_id',
'project_id': 'project_id',
'response_id': 'response_id',
'newly_created': 'newly_created',
}
answer_submit_names = {
'question_id': 'question_id',
'choice_id': 'choice_id',
'project_id': 'project_id',
}
projects_per_page = 25
description_factor = 25
keyword_factor = 25
title_factor = 10
big_aim_factor = 10
category_factor = 10
clinical_area_factor = 10
clinical_setting_factor = 10
api_username = 'admin_api_user'
gatorlink_header = 'Glid'
registry_host = 'http://' + os.environ['QIPR_APPROVER_REGISTRY_HOST']
registry_endpoints = {
'add_model': '/'.join([registry_host, 'add_model']),
}
app_label = 'approver'
VERSION_NUMBER = '0.4.0'
|
amberlallen/qipr_approver
|
qipr_approver/approver/constants.py
|
Python
|
apache-2.0
| 1,459
|
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Window
from pyjamas.ui import Applier
def setStyleName(element, style, add):
oldStyle = DOM.getAttribute(element, "className")
if oldStyle is None:
oldStyle = ""
idx = oldStyle.find(style)
# Calculate matching index
lastPos = len(oldStyle)
while idx != -1:
if idx == 0 or (oldStyle[idx - 1] == " "):
last = idx + len(style)
if (last == lastPos) or ((last < lastPos) and (oldStyle[last] == " ")):
break
idx = oldStyle.find(style, idx + 1)
if add:
if idx == -1:
DOM.setAttribute(element, "className", oldStyle + " " + style)
else:
if idx != -1:
begin = oldStyle[:idx-1]
end = oldStyle[idx + len(style):]
DOM.setAttribute(element, "className", begin + end)
class UIObject(Applier):
def __init__(self, **kwargs):
# do not initialise element, here, to None, whatever you do.
# there are circumstances where UIObject.__init__ is the last
# thing that is done in derived classes, where self.setElement
# will _already_ have been called.
Applier.__init__(self, **kwargs)
def getAbsoluteLeft(self):
return DOM.getAbsoluteLeft(self.getElement())
def getAbsoluteTop(self):
return DOM.getAbsoluteTop(self.getElement())
def getElement(self):
"""Get the DOM element associated with the UIObject, if any"""
return self.element
def getOffsetHeight(self):
return DOM.getIntAttribute(self.element, "offsetHeight")
def getOffsetWidth(self):
return DOM.getIntAttribute(self.element, "offsetWidth")
def getStyleName(self):
return DOM.getAttribute(self.element, "className")
def getStylePrimaryName(self):
"""Return with the first className if there are multiples"""
fullClassName = self.getStyleName()
if fullClassName: return fullClassName.split()[0]
def getTitle(self):
return DOM.getAttribute(self.element, "title")
def setElement(self, element):
"""Set the DOM element associated with the UIObject."""
self.element = element
def setHeight(self, height):
"""Set the height of the element associated with this UIObject. The
value should be given as a CSS value, such as 100px, 30%, or 50pi"""
DOM.setStyleAttribute(self.element, "height", str(height))
def getHeight(self):
return DOM.getStyleAttribute(self.element, "height")
def setPixelSize(self, width, height):
"""Set the width and height of the element associated with this UIObject
in pixels. Width and height should be numbers."""
if width >= 0:
self.setWidth("%dpx" % width)
if height >= 0:
self.setHeight("%dpx" % height)
def setSize(self, width, height):
"""Set the width and height of the element associated with this UIObject. The
values should be given as a CSS value, such as 100px, 30%, or 50pi"""
self.setWidth(width)
self.setHeight(height)
def addStyleName(self, style):
"""Append a style to the element associated with this UIObject. This is
a CSS class name. It will be added after any already-assigned CSS class for
the element."""
self.setStyleName(self.element, style, True)
def addStyleDependentName(self, styleSuffix):
"""Adds a secondary or dependent style name to this element.
For example if the primary stylename is gwt-TextBox,
self.addStyleDependentName("readonly") will return gwt-TextBox-readonly."""
self.addStyleName(self.getStylePrimaryName()+"-"+styleSuffix)
def removeStyleName(self, style):
"""Remove a style from the element associated with this UIObject. This is
a CSS class name."""
self.setStyleName(self.element, style, False)
def removeStyleDependentName(self, styleSuffix):
"""Remove a dependent style name by specifying the style name's suffix."""
self.removeStyleName(self.getStylePrimaryName()+"-"+styleSuffix)
# also callable as: setStyleName(self, style)
def setStyleName(self, element, style=None, add=True):
"""When called with a single argument, this replaces all the CSS classes
associated with this UIObject's element with the given parameter. Otherwise,
this is assumed to be a worker function for addStyleName and removeStyleName."""
# emulate setStyleName(self, style)
if style is None:
style = element
DOM.setAttribute(self.element, "className", style)
return
setStyleName(element, style, add)
def setTitle(self, title):
DOM.setAttribute(self.element, "title", title)
def setWidth(self, width):
"""Set the width of the element associated with this UIObject. The
value should be given as a CSS value, such as 100px, 30%, or 50pi"""
DOM.setStyleAttribute(self.element, "width", str(width))
def getWidth(self):
return DOM.getStyleAttribute(self.element, "width")
def sinkEvents(self, eventBitsToAdd):
"""Request that the given events be delivered to the event handler for this
element. The event bits passed are added (using inclusive OR) to the events
already "sunk" for the element associated with the UIObject. The event bits
are a combination of values from class L{Event}."""
if self.element:
DOM.sinkEvents(self.getElement(), eventBitsToAdd | DOM.getEventsSunk(self.getElement()))
def setzIndex(self, index):
DOM.setIntStyleAttribute(self.element, "zIndex", index)
def isVisible(self, element=None):
"""Determine whether this element is currently visible, by checking
the CSS property 'display'
"""
if not element:
element = self.element
try: # yuk!
return element.style.display != "none"
except AttributeError: # not been set (yet?)
return True
# also callable as: setVisible(visible)
def setVisible(self, element, visible=None):
"""Set whether this element is visible or not. If a single parameter is
given, the self.element is used. This modifies the CSS property 'display',
which means that an invisible element not only is not drawn, but doesn't
occupy any space on the page."""
if visible is None:
visible = element
element = self.element
if visible:
DOM.setStyleAttribute(element, 'display', "")
else:
DOM.setStyleAttribute(element, 'display', "none")
def unsinkEvents(self, eventBitsToRemove):
"""Reverse the operation of sinkEvents. See L{UIObject.sinkevents}."""
DOM.sinkEvents(self.getElement(), ~eventBitsToRemove & DOM.getEventsSunk(self.getElement()))
|
lovelysystems/pyjamas
|
library/pyjamas/ui/UIObject.py
|
Python
|
apache-2.0
| 7,596
|
# this makes ctypes friendlier (for me, anyways)
from ctypes import *
## page permissions
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
## process access permissions from winnt.h
DELETE = 0x00010000L
READ_CONTROL = 0x00020000L
WRITE_DAC = 0x00040000L
WRITE_OWNER = 0x00080000L
SYNCHRONIZE = 0x00100000L
ACCESS_SYSTEM_SECURITY = 0x01000000L
MAXIMUM_ALLOWED = 0x02000000L
GENERIC_READ = 0x80000000L
GENERIC_WRITE = 0x40000000L
GENERIC_EXECUTE = 0x20000000L
GENERIC_ALL = 0x10000000L
STANDARD_RIGHTS_REQUIRED = 0x000F0000L
STANDARD_RIGHTS_READ = READ_CONTROL
STANDARD_RIGHTS_WRITE = READ_CONTROL
STANDARD_RIGHTS_EXECUTE = READ_CONTROL
STANDARD_RIGHTS_ALL = 0x001F0000L
SPECIFIC_RIGHTS_ALL = 0x0000FFFFL
PROCESS_TERMINATE = 0x0001
PROCESS_CREATE_THREAD = 0x0002
PROCESS_SET_SESSIONID = 0x0004
PROCESS_VM_OPERATION = 0x0008
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_DUP_HANDLE = 0x0040
PROCESS_CREATE_PROCESS = 0x0080
PROCESS_SET_QUOTA = 0x0100
PROCESS_SET_INFORMATION = 0x0200
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_SUSPEND_RESUME = 0x0800
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
#PROCESS_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
PROCESS_VM_ALL = PROCESS_VM_OPERATION|PROCESS_VM_READ|PROCESS_VM_WRITE
PROCESS_INFO_ALL = PROCESS_QUERY_INFORMATION|PROCESS_SET_INFORMATION
THREAD_TERMINATE = 0x0001
THREAD_SUSPEND_RESUME = 0x0002
THREAD_GET_CONTEXT = 0x0008
THREAD_SET_CONTEXT = 0x0010
THREAD_QUERY_INFORMATION = 0x0040
THREAD_SET_INFORMATION = 0x0020
THREAD_SET_THREAD_TOKEN = 0x0080
THREAD_IMPERSONATE = 0x0100
THREAD_DIRECT_IMPERSONATION = 0x0200
THREAD_SET_LIMITED_INFORMATION = 0x0400 # winnt
THREAD_QUERY_LIMITED_INFORMATION = 0x0800 # winnt
THREAD_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF
JOB_OBJECT_ASSIGN_PROCESS = 0x0001
JOB_OBJECT_SET_ATTRIBUTES = 0x0002
JOB_OBJECT_QUERY = 0x0004
JOB_OBJECT_TERMINATE = 0x0008
JOB_OBJECT_SET_SECURITY_ATTRIBUTES = 0x0010
JOB_OBJECT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1F
## constants for contexts
CONTEXT_i386 = 0x00010000 # this assumes that i386 and
CONTEXT_i486 = 0x00010000 # i486 have identical context records
CONTEXT_CONTROL = (CONTEXT_i386 | 0x00000001L) # SS:SP, CS:IP, FLAGS, BP
CONTEXT_INTEGER = (CONTEXT_i386 | 0x00000002L) # AX, BX, CX, DX, SI, DI
CONTEXT_SEGMENTS = (CONTEXT_i386 | 0x00000004L) # DS, ES, FS, GS
CONTEXT_FLOATING_POINT = (CONTEXT_i386 | 0x00000008L) # 387 state
CONTEXT_DEBUG_REGISTERS = (CONTEXT_i386 | 0x00000010L) # DB 0-3,6,7
CONTEXT_EXTENDED_REGISTERS = (CONTEXT_i386 | 0x00000020L) # cpu specific extensions
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS)
CONTEXT_ALL = CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS
CONTEXT_ALL |= CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS
CONTEXT_ALL |= CONTEXT_EXTENDED_REGISTERS
## basic types
DWORD64 = c_uint64
DWORD = c_uint32
WORD = c_uint16
BYTE = c_uint8
LONG = c_long
ULONG = c_ulong
INT = c_int
UINT = c_uint
ULONGLONG = c_uint64
LONGLONG = c_int64
## complex structures
class M128A(Structure):
_fields_ = [
('Low', ULONGLONG),
('High', LONGLONG)
]
class MMX(Structure):
_fields_ = [
('Header', ARRAY(M128A, 2)),
('Legacy', ARRAY(M128A, 8)),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A)
]
class XMM_SAVE_AREA32(Structure):
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', ARRAY(M128A, 8)),
('XmmRegisters', ARRAY(M128A, 16)),
('Reserved4', ARRAY(BYTE, 96))
]
SIZE_OF_80387_REGISTERS = 80
class FLOATING_SAVE_AREA(Structure):
_fields_ = [
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', ARRAY(BYTE, SIZE_OF_80387_REGISTERS)),
('Cr0NpxState', DWORD)
]
MAXIMUM_SUPPORTED_EXTENSION = 512
class CONTEXT(Structure):
_fields_ = [
('ContextFlags', DWORD),
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
('FloatSave', FLOATING_SAVE_AREA),
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD),
('EFlags', DWORD),
('Esp', DWORD),
('SegSs', DWORD),
('ExtendedRegisters', ARRAY(BYTE, MAXIMUM_SUPPORTED_EXTENSION))
]
## other win32 stuff
HANDLE = c_voidp
class CLIENT_ID(Structure):
_fields_ = [
('UniqueProcess', HANDLE),
('UniqueThread', HANDLE)
]
ThreadBasicInformation = 0 # _THREADINFOCLASS
KAFFINITY = KPRIORITY = c_ulong
PVOID = c_voidp
NTSTATUS = c_long
class THREAD_BASIC_INFORMATION(Structure):
_fields_ = [
('ExitStatus', NTSTATUS),
('TebBaseAddress', PVOID),
('ClientId', CLIENT_ID),
('AffinityMask', KAFFINITY),
('Priority', KPRIORITY),
('BasePriority', KPRIORITY),
]
## token shit
class LUID(Structure):
_fields_ = [
('LowPart', DWORD),
('HighPart', LONG)
]
class LUID_AND_ATTRIBUTES(Structure):
_fields_ = [
('Luid', LUID),
('Attributes', DWORD)
]
class TOKEN_PRIVILEGES(Structure):
_fields_ = [
('PrivilegeCount', ULONG),
('Privileges', LUID_AND_ATTRIBUTES*1)
]
SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_REMOVED = 0X00000004
SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000
SE_PRIVILEGE_VALID_ATTRIBUTES = (SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS)
PRIVILEGE_SET_ALL_NECESSARY = (1)
class PRIVILEGE_SET(Structure):
_fields_ = [
('PrivilegeCount', DWORD),
('Control', DWORD),
('Privilege', LUID_AND_ATTRIBUTES*1)
]
## token constants
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATE = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_ALL_ACCESS_P = STANDARD_RIGHTS_REQUIRED | TOKEN_ASSIGN_PRIMARY | TOKEN_DUPLICATE | TOKEN_IMPERSONATE | TOKEN_QUERY | TOKEN_QUERY_SOURCE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_ALL_ACCESS = TOKEN_ALL_ACCESS_P | TOKEN_ADJUST_SESSIONID
TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY
TOKEN_WRITE = STANDARD_RIGHTS_WRITE | TOKEN_ADJUST_PRIVILEGES | TOKEN_ADJUST_GROUPS | TOKEN_ADJUST_DEFAULT
TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
|
arizvisa/syringe
|
lib/memorymanager/win32context.py
|
Python
|
bsd-2-clause
| 7,811
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.db import models
from thing.models.item import Item
from thing.models.contractseeding import ContractSeeding
class ContractSeedingItem(models.Model):
id = models.AutoField(primary_key=True)
contractseeding = models.ForeignKey(ContractSeeding, on_delete=models.DO_NOTHING)
item = models.ForeignKey(Item, on_delete=models.DO_NOTHING)
required = models.BooleanField(default=False)
min_qty = models.IntegerField()
class Meta:
app_label = 'thing'
def __unicode__(self):
return self.item.name
|
cmptrgeekken/evething
|
thing/models/contractseedingitem.py
|
Python
|
bsd-2-clause
| 2,069
|
"""
Sample Python code with syntax that's only available in Python 2.
"""
0xDeadBeefCafeL
0xDeadBeefCafel
0123
0177
# All of the print statements
print
print "x"
print "a",
print 1, 2
print 1, 2,
print 1, 2, 93
print >>x
print >>x, 1
print >>x, 1,
print >>x, 9, 8
print >>x, 9, 8, 7
exec "x=1"
exec "x=1" in x
exec "z=1" in z, y
exec "raise" in {}, globals()
# #def spam((a) = c): pass # legal in Python 2.5, not 2.6
# #def spam((((a))) = cc): pass # legal in Python 2.5, not 2.6
# def spam((a,) = c): pass
# def spam((a,b) = c): pass
# def spam((a, (b, c)) = x96): pass
# def spam((a,b,c)=x332): pass
# def spam((a,b,c,d,e,f,g,h)=x12323): pass
# backquotes (alias for repr)
# Terminal "," are not supported
`1`
`1,2`
`1,2,3`
`a,b,c,d,e,f,g,h`
# Does not work in py3k
[x for x in 1,2,3,4]
[x for x in 1,2,3,4,]
|
JohnLunzer/flexx
|
flexx/pyscript/tests/python_sample2.py
|
Python
|
bsd-2-clause
| 818
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Play one round of Tron.
Robert Xiao, Jan 31 2010
Colorization idea cribbed from Jeremy Roman (jbroman).
----------
2014 Jim Mahoney
tested with python 2.7.5
* added FPS option and slowed default animation
* changed "vis" options to "replay"
* modified replay output to match replay.html and replay.js
* added winner to replay file format
"""
import sys, re
import colorize as color
from time import sleep, clock
from player import Player, PlayerFailedException, P0BOARD, P1BOARD
from board import Board, GameBoard, BoardFile
class ErrorInRound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# Approximate frames-per-second to run the visualization
FPS = 30
# Speedup factor per frame (set to 0 to disable speedup)
# This can make tedious spacefilling games speed up over time
# to make them shorter.
FPS_SPEEDUP = 0.01
CHARS = { '#': '#',
'.': '.',
'*': '*',
'\x81': '1',
'\x82': '1',
'\x83': '1',
'\x84': '1',
'1': '1',
'\xa1': '2',
'\xa2': '2',
'\xa3': '2',
'\xa4': '2',
'2': '2',
' ': ' ',
'\x88': '!',
'\x89': '@',
'\x8a': '-',
'\xa8': '!',
'\xa9': '@',
'\xaa': '-'}
RST = color.reset
C1T = color.bg(color.RED)
C2T = color.bg(color.BLUE)
C1 = color.bg(color.RED+color.GREEN)
C2 = color.bg(color.BLUE+color.GREEN)
ANSICHARS = { '#': (color.inverse, " "), # game wall
'.': (C1T, " "), # player 1 trail
'*': (C2T, " "), # player 2 trail
'\x81': (C1, '/\\'), # player 1 directions
'\x83': (C1, '\\/'),
'\x84': (C1, '<<'),
'\x82': (C1, '>>'),
'1': (C1, '11'), # player 1 initial
'\xa1': (C2, "/\\"), # player 2 directions
'\xa3': (C2, "\\/"),
'\xa4': (C2, "<<"),
'\xa2': (C2, ">>"),
'2': (C2, "22"), # player 2 initial
' ': (RST, " "), # floor (open)
'\x88': (C1, ':)'), # player 1 wins
'\x89': (C1, ':('), # player 1 loses
'\x8a': (C1, ':S'), # player 1 draws
'\xa8': (C2, ':)'), # player 2 wins
'\xa9': (C2, ':('), # player 2 loses
'\xaa': (C2, ':S'),} # player 2 draws
def clear_line(board):
sys.stdout.write('\r')
sys.stdout.write(' '*board.width)
sys.stdout.write('\r')
def legend(headcolor, tailcolor, status, name):
tailcolor()
sys.stdout.write(' ')
headcolor()
sys.stdout.write(status)
color.reset()
sys.stdout.write(' '+name)
def blank_line():
sys.stdout.write('\r\n')
def print_board(board, name1, name2, ansi=False):
if ansi:
if board.diff is None:
color.clear()
for line in board.getboard():
for match in re.finditer('(.)\\1*', line):
a, b = ANSICHARS[match.group(1)]
a()
sys.stdout.write(b*len(match.group(0)))
color.reset()
blank_line()
else:
for x,y in board.diff:
color.movexy(x*2, y)
a, b = ANSICHARS[board.board[y][x]]
a()
sys.stdout.write(b)
color.reset()
color.movexy(0, board.height)
clear_line(board)
blank_line()
clear_line(board)
x, y = board.p1loc
legend(C1, C1T, ANSICHARS[board.board[y][x]][1], name1)
blank_line()
clear_line(board)
x, y = board.p2loc
legend(C2, C2T, ANSICHARS[board.board[y][x]][1],name2)
blank_line()
clear_line(board)
else:
print
for line in board.getboard():
for c in line:
sys.stdout.write(CHARS[c])
print
print '-'*board.width
def get_replayfile(fn):
if fn is None:
return None
if fn == '-':
return sys.stdout
return open(fn, 'w')
# except:
# raise ErrorInRound("couldn't open output file {}".format(fn))
def run_round(cmd1, cmd2, board,
name1="Contestant 1", name2="Contestant 2",
verbose=False, interactive=False, ansi=False, replay=False,
**kwargs):
delay = 1.0/FPS
try:
p1 = Player(cmd1, name1)
except Exception, e:
raise PlayerFailedException(name1, "Couldn't start process: "+str(e))
try:
p2 = Player(cmd2, name2)
except Exception, e:
raise PlayerFailedException(name2, "Couldn't start process: "+str(e))
gameboard = GameBoard(board)
if replay:
replayfile = get_replayfile(replay)
replayfile.write("+OK|{} {}|".format(gameboard.width, gameboard.height))
for line in gameboard.board:
replayfile.write(''.join(line).translate(P1BOARD))
replayfile.write('\n')
replayfile.write("|{}|{}|".format(name1, name2))
result = None
exception1 = None
exception2 = None
try:
while True:
if verbose:
print_board(gameboard, name1, name2, ansi)
if interactive:
raw_input("Press <enter> to continue with the next move.")
start = clock()
m1 = p1.getmove(gameboard, '1') # may set exception1
m2 = p2.getmove(gameboard, '2') # may set exception2
result = gameboard.move(m1, m2)
if replay:
replayfile.write('{}'.format(' NESW'[m1]))
replayfile.write('{}'.format(' NESW'[m2]))
total_clock = clock()-start
if verbose and total_clock < delay:
sleep(delay - total_clock)
delay *= 1 - FPS_SPEEDUP
# finished with game ?
if result != None:
break
except PlayerFailedException, e:
if str(e)[0:5] == 'bot 1':
exception1 = e
elif str(e)[0:5] == 'bot 2':
exception2 = e
else:
raise Exception("UNEXPECTED : e[0:5] != 'bot n' : " + str(e))
if exception1 != None and exception2 != None:
result = 'D' # both crashed , so game over : draw
elif exception1 != None:
result = '2' # player 1 crashed , so game over : 2 wins
elif exception2 != None:
result = '1' # player 2 crashed , so game over : 1 wins
if verbose:
if result == '1':
print_board(gameboard, name1+' (Winner)', name2, ansi)
elif result == '2':
print_board(gameboard, name1, name2+' (Winner)', ansi)
else:
print_board(gameboard, name1+' (Draw)', name2+' (Draw)', ansi)
try: p1.send_eof()
except: pass
try: p2.send_eof()
except: pass
if verbose:
# Sleep another little while to keep the game board on-screen.
sleep(0.5)
sleep(0.1)
if p1.sigterm() or p2.sigterm():
# one of the processes wasn't quit yet
sleep(0.25)
p1.sigkill()
p2.sigkill()
if replay:
# result is winner, '1', '2', or 'D'
replayfile.write("|{}|+OK\n".format(result))
replayfile.close()
if exception1:
print "player1 crashed:"
print exception1
if exception2:
print "player2 crashed:"
print exception2
return result
if __name__ == '__main__':
import sys
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] <cmd1> <cmd2>\ncmd1 " +
"and/or cmd2 can be - to indicate a human player.")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Show each move as it is played.")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose",
help="Print only the game summary without additional information.")
parser.add_option("-i", "--interactive", action="store_true", dest="interactive",
default=False, help="Pause between moves.")
parser.add_option("-b", "--board", action="store", dest="board", default=None,
help="Board specification (default: Board(10,10) for a 10x10 board)")
parser.add_option("-B", "--board-file", action="store", dest="boardfile",
default=None, help="Board filename")
parser.add_option("--no-color", action="store_false", dest="ansi",
default=True, help="Disable colour support.")
parser.add_option("--replay", action="store", dest="replay", default=None,
help="Store data for javascript replay.html to specified " +
"filename (- for stdout)")
parser.add_option("--FPS", action="store", dest="FPS",
default=30, help="animation frames per second (default 30)")
(options, args) = parser.parse_args()
try:
FPS = int(options.FPS)
except:
pass
if options.board and options.boardfile:
parser.error("-b and -B are mutually exclusive.")
if options.board:
options.board = eval(options.board)
elif options.boardfile:
options.board = BoardFile(options.boardfile)
if len(args) == 0:
# Interactive mode selection.
import atexit
def onquit():
raw_input("Press <enter> to exit.")
atexit.register(onquit)
try:
f=open("round_default.txt", "r")
c1=f.readline().strip()
c2=f.readline().strip()
wh=f.readline().strip()
f.close()
except:
c1 = '-'
c2 = '-'
wh = '10,10'
print "Round configuration:"
print "Press <enter> to accept defaults in [brackets]."
print "Use - (a single minus sign) to denote a human player."
def get_input(prompt, default):
inp = raw_input(prompt + ' [%s]? '%default)
if not inp:
return default
return inp
c1 = get_input("Player 1 (red)", c1)
c2 = get_input("Player 2 (blue)", c2)
if not options.board:
wh = get_input("Board size (width, height) or board filename", wh)
args = [c1, c2]
if not options.board:
try:
options.board = eval("Board(%s)"%wh)
except:
options.board = BoardFile(wh)
try:
f=open("round_default.txt", "w")
print >> f, c1
print >> f, c2
print >> f, wh
f.close()
except Exception, e:
print "Warning: defaults weren't saved:", e
raw_input("Press <enter> to continue")
if options.board is None:
options.board = Board(10, 10)
if len(args) > 2:
parser.error("Too many arguments; expected two.")
if len(args) < 2:
parser.error("Too few arguments; expected two.")
cmd1 = args[0]
cmd2 = args[1]
if '-' in args:
# Having a human player implies verbose, to show the board every time.
options.verbose = True
if cmd1 == '-':
name1 = 'Human'
else:
name1 = cmd1
if cmd2 == '-':
name2 = 'Human'
else:
name2 = cmd2
if name1 == name2:
name1 += ' 1'
name2 += ' 2'
outcome = run_round(cmd1, cmd2, name1=name1, name2=name2, **options.__dict__)
print "outcome:",
if outcome == '1':
print "Player 1 wins"
elif outcome == '2':
print "Player 2 wins"
else:
print "Draw"
|
CorySpitzer/tron-engine
|
engines/round.py
|
Python
|
bsd-2-clause
| 11,949
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'and prosper!' #student's note: no point in following instructions concerning the contents of strings: better get creative
print 'Live long', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
|
askbow/cloaked-octo-ironman
|
google-python-exercises/hello.py
|
Python
|
bsd-2-clause
| 1,089
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, by the Pockets team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""Tests for :mod:`pockets.iterators` module."""
from __future__ import absolute_import
from unittest import TestCase
from pockets.iterators import peek_iter, modify_iter
from six import u
class BaseIteratorsTest(TestCase):
def assertEqualTwice(self, expected, func, *args):
self.assertEqual(expected, func(*args))
self.assertEqual(expected, func(*args))
def assertFalseTwice(self, func, *args):
self.assertFalse(func(*args))
self.assertFalse(func(*args))
def assertNext(self, it, expected, is_last):
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(expected, it.peek)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(expected, it.peek)
self.assertTrueTwice(it.has_next)
self.assertEqual(expected, next(it))
if is_last:
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next)
else:
self.assertTrueTwice(it.has_next)
def assertRaisesTwice(self, exc, func, *args):
self.assertRaises(exc, func, *args)
self.assertRaises(exc, func, *args)
def assertTrueTwice(self, func, *args):
self.assertTrue(func(*args))
self.assertTrue(func(*args))
class PeekIterTest(BaseIteratorsTest):
def test_init_with_sentinel(self):
a = iter(['1', '2', 'DONE'])
sentinel = 'DONE'
self.assertRaises(TypeError, peek_iter, a, sentinel)
def get_next():
return next(a)
it = peek_iter(get_next, sentinel)
self.assertEqual(it.sentinel, sentinel)
self.assertNext(it, '1', is_last=False)
self.assertNext(it, '2', is_last=True)
def test_iter(self):
a = ['1', '2', '3']
it = peek_iter(a)
self.assertTrue(it is it.__iter__())
a = []
b = [i for i in peek_iter(a)]
self.assertEqual([], b)
a = ['1']
b = [i for i in peek_iter(a)]
self.assertEqual(['1'], b)
a = ['1', '2']
b = [i for i in peek_iter(a)]
self.assertEqual(['1', '2'], b)
a = ['1', '2', '3']
b = [i for i in peek_iter(a)]
self.assertEqual(['1', '2', '3'], b)
def test_next_with_multi(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 2)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 2)
self.assertTrueTwice(it.has_next)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqual(['1', '2'], it.next(2))
self.assertFalseTwice(it.has_next)
a = ['1', '2', '3']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqual(['1', '2'], it.next(2))
self.assertTrueTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 2)
self.assertTrueTwice(it.has_next)
a = ['1', '2', '3', '4']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqual(['1', '2'], it.next(2))
self.assertTrueTwice(it.has_next)
self.assertEqual(['3', '4'], it.next(2))
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 2)
self.assertFalseTwice(it.has_next)
def test_next_with_none(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next)
self.assertFalseTwice(it.has_next)
a = ['1']
it = peek_iter(a)
self.assertEqual('1', it.__next__())
a = ['1']
it = peek_iter(a)
self.assertNext(it, '1', is_last=True)
a = ['1', '2']
it = peek_iter(a)
self.assertNext(it, '1', is_last=False)
self.assertNext(it, '2', is_last=True)
a = ['1', '2', '3']
it = peek_iter(a)
self.assertNext(it, '1', is_last=False)
self.assertNext(it, '2', is_last=False)
self.assertNext(it, '3', is_last=True)
def test_next_with_one(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 1)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqual(['1'], it.next(1))
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 1)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqual(['1'], it.next(1))
self.assertTrueTwice(it.has_next)
self.assertEqual(['2'], it.next(1))
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 1)
def test_next_with_zero(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertRaisesTwice(StopIteration, it.next, 0)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.next, 0)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.next, 0)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.next, 0)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.next, 0)
def test_peek_with_multi(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertEqualTwice([it.sentinel, it.sentinel], it.peek, 2)
self.assertFalseTwice(it.has_next)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', it.sentinel], it.peek, 2)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', it.sentinel, it.sentinel], it.peek, 3)
self.assertTrueTwice(it.has_next)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2'], it.peek, 2)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2', it.sentinel], it.peek, 3)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2', it.sentinel, it.sentinel], it.peek, 4)
self.assertTrueTwice(it.has_next)
a = ['1', '2', '3']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2'], it.peek, 2)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2', '3'], it.peek, 3)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1', '2', '3', it.sentinel], it.peek, 4)
self.assertTrueTwice(it.has_next)
self.assertEqual('1', next(it))
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['2', '3'], it.peek, 2)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['2', '3', it.sentinel], it.peek, 3)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['2', '3', it.sentinel, it.sentinel], it.peek, 4)
self.assertTrueTwice(it.has_next)
def test_peek_with_none(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertEqualTwice(it.sentinel, it.peek)
self.assertFalseTwice(it.has_next)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice('1', it.peek)
self.assertEqual('1', next(it))
self.assertFalseTwice(it.has_next)
self.assertEqualTwice(it.sentinel, it.peek)
self.assertFalseTwice(it.has_next)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice('1', it.peek)
self.assertEqual('1', next(it))
self.assertTrueTwice(it.has_next)
self.assertEqualTwice('2', it.peek)
self.assertEqual('2', next(it))
self.assertFalseTwice(it.has_next)
self.assertEqualTwice(it.sentinel, it.peek)
self.assertFalseTwice(it.has_next)
def test_peek_with_one(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertEqualTwice([it.sentinel], it.peek, 1)
self.assertFalseTwice(it.has_next)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1'], it.peek, 1)
self.assertEqual('1', next(it))
self.assertFalseTwice(it.has_next)
self.assertEqualTwice([it.sentinel], it.peek, 1)
self.assertFalseTwice(it.has_next)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['1'], it.peek, 1)
self.assertEqual('1', next(it))
self.assertTrueTwice(it.has_next)
self.assertEqualTwice(['2'], it.peek, 1)
self.assertEqual('2', next(it))
self.assertFalseTwice(it.has_next)
self.assertEqualTwice([it.sentinel], it.peek, 1)
self.assertFalseTwice(it.has_next)
def test_peek_with_zero(self):
a = []
it = peek_iter(a)
self.assertFalseTwice(it.has_next)
self.assertEqualTwice([], it.peek, 0)
a = ['1']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.peek, 0)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.peek, 0)
a = ['1', '2']
it = peek_iter(a)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.peek, 0)
self.assertTrueTwice(it.has_next)
self.assertEqualTwice([], it.peek, 0)
class ModifyIterTest(BaseIteratorsTest):
def test_init_with_sentinel_args(self):
a = iter(['1', '2', '3', 'DONE'])
sentinel = 'DONE'
def get_next():
return next(a)
it = modify_iter(get_next, sentinel, int)
expected = [1, 2, 3]
self.assertEqual(expected, [i for i in it])
def test_init_with_sentinel_kwargs(self):
a = iter([1, 2, 3, 4])
sentinel = 4
def get_next():
return next(a)
it = modify_iter(get_next, sentinel, modifier=str)
expected = ['1', '2', '3']
self.assertEqual(expected, [i for i in it])
def test_modifier_default(self):
a = ['', ' ', ' a ', 'b ', ' c', ' ', '']
it = modify_iter(a)
expected = ['', ' ', ' a ', 'b ', ' c', ' ', '']
self.assertEqual(expected, [i for i in it])
def test_modifier_not_callable(self):
self.assertRaises(TypeError, modify_iter, [1], modifier='not_callable')
def test_modifier_rstrip(self):
a = ['', ' ', ' a ', 'b ', ' c', ' ', '']
it = modify_iter(a, modifier=lambda s: s.rstrip())
expected = ['', '', ' a', 'b', ' c', '', '']
self.assertEqual(expected, [i for i in it])
def test_modifier_rstrip_unicode(self):
a = [u(''), u(' '), u(' a '), u('b '), u(' c'), u(' '), u('')]
it = modify_iter(a, modifier=lambda s: s.rstrip())
expected = [u(''), u(''), u(' a'), u('b'), u(' c'), u(''), u('')]
self.assertEqual(expected, [i for i in it])
|
tlatzko/spmcluster
|
.tox/docs/lib/python2.7/site-packages/tests/test_iterators.py
|
Python
|
bsd-2-clause
| 11,599
|
# -*- coding: utf-8 -*-
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from numbers import Number
import numpy as np
from scipy.sparse import issparse
from pymor.core import NUMPY_INDEX_QUIRK
from pymor.la.interfaces import VectorArrayInterface, VectorSpace
from pymor.tools.floatcmp import float_cmp
class NumpyVectorArray(VectorArrayInterface):
"""|VectorArray| implementation via |NumPy arrays|.
This is the default |VectorArray| type used by all |Operators|
implemented directly in pyMOR. Reduced |Operators| will also
expect |NumpyVectorArrays|.
Note that this class is just a thin wrapper around the underlying
|NumPy array|. Thus, while operations like
:meth:`~VectorArrayInterface.axpy` or :meth:`VectorArrayInterface.dot`
will be quite efficient, removing or appending vectors will
be costly.
"""
def __init__(self, instance, dtype=None, copy=False, order=None, subok=False):
assert not isinstance(instance, np.matrixlib.defmatrix.matrix)
if isinstance(instance, np.ndarray):
if copy:
self._array = instance.copy()
else:
self._array = instance
elif issparse(instance):
self._array = np.array(instance.todense(), copy=False)
elif hasattr(instance, 'data'):
self._array = instance.data
if copy:
self._array = self._array.copy()
else:
self._array = np.array(instance, dtype=dtype, copy=copy, order=order, subok=subok, ndmin=2)
if self._array.ndim != 2:
assert self._array.ndim == 1
self._array = np.reshape(self._array, (1, -1))
self._len = len(self._array)
@classmethod
def make_array(cls, subtype=None, count=0, reserve=0):
assert isinstance(subtype, Number)
assert count >= 0
assert reserve >= 0
va = NumpyVectorArray(np.empty((0, 0)))
va._array = np.zeros((max(count, reserve), subtype))
va._len = count
return va
@property
def data(self):
return self._array[:self._len]
def __len__(self):
return self._len
@property
def subtype(self):
return self._array.shape[1]
@property
def dim(self):
return self._array.shape[1]
def copy(self, ind=None):
assert self.check_ind(ind)
if NUMPY_INDEX_QUIRK and self._len == 0:
return NumpyVectorArray(self._array[:0], copy=True)
if ind is None:
return NumpyVectorArray(self._array[:self._len], copy=True)
else:
C = NumpyVectorArray(self._array[ind], copy=False)
if not C._array.flags['OWNDATA']:
C._array = np.array(C._array)
return C
def append(self, other, o_ind=None, remove_from_other=False):
assert other.check_ind(o_ind)
assert self.dim == other.dim
assert other is not self or not remove_from_other
if NUMPY_INDEX_QUIRK and other._len == 0:
o_ind = None
if o_ind is None:
len_other = other._len
if len_other <= self._array.shape[0] - self._len:
self._array[self._len:self._len + len_other] = other._array
else:
self._array = np.vstack((self._array[:self._len], other._array[:len_other]))
self._len += len_other
else:
if not hasattr(o_ind, '__len__'):
len_other = 1
o_ind = [o_ind]
else:
len_other = len(o_ind)
if len_other <= self._array.shape[0] - self._len:
other._array.take(o_ind, axis=0, out=self._array[self._len:self._len + len_other])
else:
self._array = np.append(self._array[:self._len], other._array[o_ind], axis=0)
self._len += len_other
if remove_from_other:
other.remove(o_ind)
def remove(self, ind=None):
assert self.check_ind(ind)
if ind is None:
self._array = np.zeros((0, self.dim))
self._len = 0
else:
if hasattr(ind, '__len__'):
if len(ind) == 0:
return
remaining = sorted(set(xrange(len(self))) - set(ind))
self._array = self._array[remaining]
else:
assert -self._len < ind < self._len
self._array = self._array[range(ind) + range(ind + 1, self._len)]
self._len = self._array.shape[0]
if not self._array.flags['OWNDATA']:
self._array = self._array.copy()
def replace(self, other, ind=None, o_ind=None, remove_from_other=False):
assert self.check_ind_unique(ind)
assert other.check_ind(o_ind)
assert self.dim == other.dim
assert other is not self or not remove_from_other
if NUMPY_INDEX_QUIRK:
if self._len == 0 and hasattr(ind, '__len__'):
ind = None
if other._len == 0 and hasattr(o_ind, '__len__'):
o_ind = None
if ind is None:
if o_ind is None:
if other is self:
return
assert other._len == self._len
self._array = other._array[:other._len].copy()
else:
if not hasattr(o_ind, '__len__'):
o_ind = [o_ind]
assert self._len == len(o_ind)
self._array = other._array[o_ind]
self._len = self._array.shape[0]
else:
len_ind = self.len_ind(ind)
other_array = np.array(self._array) if other is self else other._array
if o_ind is None:
assert len_ind == other._len
self._array[ind] = other_array[:other._len]
else:
len_oind = other.len_ind(o_ind)
assert len_ind == len_oind
self._array[ind] = other_array[o_ind]
assert self._array.flags['OWNDATA']
if remove_from_other:
other.remove(o_ind)
def almost_equal(self, other, ind=None, o_ind=None, rtol=None, atol=None):
assert self.check_ind(ind)
assert other.check_ind(o_ind)
assert self.dim == other.dim
if NUMPY_INDEX_QUIRK:
if self._len == 0 and hasattr(ind, '__len__'):
ind = None
if other._len == 0 and hasattr(o_ind, '__len__'):
o_ind = None
A = self._array[:self._len] if ind is None else \
self._array[ind] if hasattr(ind, '__len__') else self._array[ind:ind + 1]
B = other._array[:other._len] if o_ind is None else \
other._array[o_ind] if hasattr(o_ind, '__len__') else other._array[o_ind:o_ind + 1]
R = np.all(float_cmp(A, B, rtol=rtol, atol=atol), axis=1).squeeze()
if R.ndim == 0:
R = R[np.newaxis, ...]
return R
def scal(self, alpha, ind=None):
assert self.check_ind_unique(ind)
assert isinstance(alpha, Number) \
or isinstance(alpha, np.ndarray) and alpha.shape == (self.len_ind(ind),)
if NUMPY_INDEX_QUIRK and self._len == 0:
return
if isinstance(alpha, np.ndarray) and not isinstance(ind, Number):
alpha = alpha[:, np.newaxis]
if ind is None:
self._array[:self._len] *= alpha
else:
self._array[ind] *= alpha
def axpy(self, alpha, x, ind=None, x_ind=None):
assert self.check_ind_unique(ind)
assert x.check_ind(x_ind)
assert self.dim == x.dim
assert self.len_ind(ind) == x.len_ind(x_ind) or x.len_ind(x_ind) == 1
assert isinstance(alpha, Number) \
or isinstance(alpha, np.ndarray) and alpha.shape == (self.len_ind(ind),)
if NUMPY_INDEX_QUIRK:
if self._len == 0 and hasattr(ind, '__len__'):
ind = None
if x._len == 0 and hasattr(x_ind, '__len__'):
x_ind = None
if np.all(alpha == 0):
return
B = x._array[:x._len] if x_ind is None else x._array[x_ind]
if np.all(alpha == 1):
if ind is None:
self._array[:self._len] += B
elif isinstance(ind, Number) and B.ndim == 2:
self._array[ind] += B.reshape((B.shape[1],))
else:
self._array[ind] += B
elif np.all(alpha == -1):
if ind is None:
self._array[:self._len] -= B
elif isinstance(ind, Number) and B.ndim == 2:
self._array[ind] -= B.reshape((B.shape[1],))
else:
self._array[ind] -= B
else:
if isinstance(alpha, np.ndarray):
alpha = alpha[:, np.newaxis]
if ind is None:
self._array[:self._len] += (B * alpha)
elif isinstance(ind, Number):
self._array[ind] += (B * alpha).reshape((-1,))
else:
self._array[ind] += (B * alpha)
def dot(self, other, pairwise, ind=None, o_ind=None):
assert self.check_ind(ind)
assert other.check_ind(o_ind)
assert self.dim == other.dim
if NUMPY_INDEX_QUIRK:
if self._len == 0 and hasattr(ind, '__len__'):
ind = None
if other._len == 0 and hasattr(o_ind, '__len__'):
o_ind = None
A = self._array[:self._len] if ind is None else \
self._array[ind] if hasattr(ind, '__len__') else self._array[ind:ind + 1]
B = other._array[:other._len] if o_ind is None else \
other._array[o_ind] if hasattr(o_ind, '__len__') else other._array[o_ind:o_ind + 1]
if pairwise:
assert self.len_ind(ind) == other.len_ind(o_ind)
return np.sum(A * B, axis=1)
else:
return A.dot(B.T)
def lincomb(self, coefficients, ind=None):
assert self.check_ind(ind)
assert 1 <= coefficients.ndim <= 2
if NUMPY_INDEX_QUIRK and self._len == 0:
ind = None
if coefficients.ndim == 1:
coefficients = coefficients[np.newaxis, ...]
assert ind is None and coefficients.shape[1] == len(self) \
or not hasattr(ind, '__len__') and coefficients.shape[1] == 1 \
or hasattr(ind, '__len__') and coefficients.shape[1] == len(ind)
if ind is None:
return NumpyVectorArray(coefficients.dot(self._array[:self._len]), copy=False)
elif hasattr(ind, '__len__'):
return NumpyVectorArray(coefficients.dot(self._array[ind]), copy=False)
else:
return NumpyVectorArray(coefficients.dot(self._array[ind:ind + 1]), copy=False)
def l1_norm(self, ind=None):
assert self.check_ind(ind)
if NUMPY_INDEX_QUIRK and self._len == 0:
ind = None
A = self._array[:self._len] if ind is None else \
self._array[ind] if hasattr(ind, '__len__') else self._array[ind:ind + 1]
return np.sum(np.abs(A), axis=1)
def l2_norm(self, ind=None):
assert self.check_ind(ind)
if NUMPY_INDEX_QUIRK and self._len == 0:
ind = None
A = self._array[:self._len] if ind is None else \
self._array[ind] if hasattr(ind, '__len__') else self._array[ind:ind + 1]
return np.sum(np.power(A, 2), axis=1)**(1/2)
def components(self, component_indices, ind=None):
assert self.check_ind(ind)
assert isinstance(component_indices, list) and (len(component_indices) == 0 or min(component_indices) >= 0) \
or (isinstance(component_indices, np.ndarray) and component_indices.ndim == 1
and (len(component_indices) == 0 or np.min(component_indices) >= 0))
# NumPy 1.9 is quite permissive when indexing arrays of size 0, so we have to add the
# following check:
assert self._len > 0 \
or (isinstance(component_indices, list)
and (len(component_indices) == 0 or max(component_indices) < self.dim)) \
or (isinstance(component_indices, np.ndarray) and component_indices.ndim == 1
and (len(component_indices) == 0 or np.max(component_indices) < self.dim))
if NUMPY_INDEX_QUIRK and (self._len == 0 or self.dim == 0):
assert isinstance(component_indices, list) \
and (len(component_indices) == 0 or max(component_indices) < self.dim) \
or isinstance(component_indices, np.ndarray) \
and component_indices.ndim == 1 \
and (len(component_indices) == 0 or np.max(component_indices) < self.dim)
return np.zeros((self.len_ind(ind), len(component_indices)))
if ind is None:
return self._array[:self._len, component_indices]
else:
if not hasattr(ind, '__len__'):
ind = [ind]
return self._array[:, component_indices][ind, :]
def amax(self, ind=None):
assert self.dim > 0
assert self.check_ind(ind)
if NUMPY_INDEX_QUIRK and self._len == 0:
ind = None
if self._array.shape[1] == 0:
l = self.len_ind(ind)
return np.ones(l) * -1, np.zeros(l)
A = self._array[:self._len] if ind is None else \
self._array[ind] if hasattr(ind, '__len__') else self._array[ind:ind + 1]
A = np.abs(A)
max_ind = np.argmax(A, axis=1)
max_val = A[np.arange(len(A)), max_ind]
return max_ind, max_val
def __str__(self):
return self._array[:self._len].__str__()
def __repr__(self):
return 'NumpyVectorArray({})'.format(self._array[:self._len].__str__())
def NumpyVectorSpace(dim):
"""Shorthand for |VectorSpace| `(NumpyVectorArray, dim)`."""
return VectorSpace(NumpyVectorArray, dim)
|
michaellaier/pymor
|
src/pymor/la/numpyvectorarray.py
|
Python
|
bsd-2-clause
| 14,173
|
# Copyright (c) 2012-2017, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
import collections
import json
import re
import sys
import types
from . import validators
__version__ = "1.9.3"
# constants for DeletionPolicy
Delete = 'Delete'
Retain = 'Retain'
Snapshot = 'Snapshot'
# Pseudo Parameters
AWS_ACCOUNT_ID = 'AWS::AccountId'
AWS_NOTIFICATION_ARNS = 'AWS::NotificationARNs'
AWS_NO_VALUE = 'AWS::NoValue'
AWS_REGION = 'AWS::Region'
AWS_STACK_ID = 'AWS::StackId'
AWS_STACK_NAME = 'AWS::StackName'
# Template Limits
MAX_PARAMETERS = 60
MAX_RESOURCES = 200
PARAMETER_TITLE_MAX = 255
valid_names = re.compile(r'^[a-zA-Z0-9]+$')
def is_aws_object_subclass(cls):
is_aws_object = False
try:
is_aws_object = issubclass(cls, BaseAWSObject)
# prop_type isn't a class
except TypeError:
pass
return is_aws_object
def encode_to_dict(obj):
if hasattr(obj, 'to_dict'):
# Calling encode_to_dict to ensure object is
# nomalized to a base dictionary all the way down.
return encode_to_dict(obj.to_dict())
elif isinstance(obj, (list, tuple)):
new_lst = []
for o in list(obj):
new_lst.append(encode_to_dict(o))
return new_lst
elif isinstance(obj, dict):
props = {}
for name, prop in obj.items():
props[name] = encode_to_dict(prop)
return props
# This is useful when dealing with external libs using
# this format. Specifically awacs.
elif hasattr(obj, 'JSONrepr'):
return encode_to_dict(obj.JSONrepr())
return obj
class BaseAWSObject(object):
def __init__(self, title, template=None, **kwargs):
self.title = title
self.template = template
# Cache the keys for validity checks
self.propnames = self.props.keys()
self.attributes = ['DependsOn', 'DeletionPolicy',
'Metadata', 'UpdatePolicy',
'Condition', 'CreationPolicy']
# try to validate the title if its there
if self.title:
self.validate_title()
# Create the list of properties set on this object by the user
self.properties = {}
dictname = getattr(self, 'dictname', None)
if dictname:
self.resource = {
dictname: self.properties,
}
else:
self.resource = self.properties
if hasattr(self, 'resource_type') and self.resource_type is not None:
self.resource['Type'] = self.resource_type
self.__initialized = True
# Check for properties defined in the class
for k, (_, required) in self.props.items():
v = getattr(type(self), k, None)
if v is not None and k not in kwargs:
self.__setattr__(k, v)
# Now that it is initialized, populate it with the kwargs
for k, v in kwargs.items():
self.__setattr__(k, v)
# Bound it to template if we know it
if self.template is not None:
self.template.add_resource(self)
def __getattr__(self, name):
try:
return self.properties.__getitem__(name)
except KeyError:
# Fall back to the name attribute in the object rather than
# in the properties dict. This is for non-OpenStack backwards
# compatibility since OpenStack objects use a "name" property.
if name == 'name':
return self.__getattribute__('title')
raise AttributeError(name)
def __setattr__(self, name, value):
if name in self.__dict__.keys() \
or '_BaseAWSObject__initialized' not in self.__dict__:
return dict.__setattr__(self, name, value)
elif name in self.attributes:
self.resource[name] = value
return None
elif name in self.propnames:
# Check the type of the object and compare against what we were
# expecting.
expected_type = self.props[name][0]
# If the value is a AWSHelperFn we can't do much validation
# we'll have to leave that to Amazon. Maybe there's another way
# to deal with this that we'll come up with eventually
if isinstance(value, AWSHelperFn):
return self.properties.__setitem__(name, value)
# If it's a function, call it...
elif isinstance(expected_type, types.FunctionType):
try:
value = expected_type(value)
except Exception:
sys.stderr.write(
"%s: %s.%s function validator '%s' threw "
"exception:\n" % (self.__class__,
self.title,
name,
expected_type.__name__))
raise
return self.properties.__setitem__(name, value)
# If it's a list of types, check against those types...
elif isinstance(expected_type, list):
# If we're expecting a list, then make sure it is a list
if not isinstance(value, list):
self._raise_type(name, value, expected_type)
# Iterate over the list and make sure it matches our
# type checks (as above accept AWSHelperFn because
# we can't do the validation ourselves)
for v in value:
if not isinstance(v, tuple(expected_type)) \
and not isinstance(v, AWSHelperFn):
self._raise_type(name, v, expected_type)
# Validated so assign it
return self.properties.__setitem__(name, value)
# Final validity check, compare the type of value against
# expected_type which should now be either a single type or
# a tuple of types.
elif isinstance(value, expected_type):
return self.properties.__setitem__(name, value)
else:
self._raise_type(name, value, expected_type)
type_name = getattr(self, 'resource_type', self.__class__.__name__)
if type_name == 'AWS::CloudFormation::CustomResource' or \
type_name.startswith('Custom::'):
# Add custom resource arguments to the dict without any further
# validation. The properties of a CustomResource is not known.
return self.properties.__setitem__(name, value)
raise AttributeError("%s object does not support attribute %s" %
(type_name, name))
def _raise_type(self, name, value, expected_type):
raise TypeError('%s: %s.%s is %s, expected %s' % (self.__class__,
self.title,
name,
type(value),
expected_type))
def validate_title(self):
if not valid_names.match(self.title):
raise ValueError('Name "%s" not alphanumeric' % self.title)
def validate(self):
pass
def to_dict(self):
self._validate_props()
self.validate()
if self.properties:
return encode_to_dict(self.resource)
elif hasattr(self, 'resource_type'):
d = {}
for k, v in self.resource.items():
if k != 'Properties':
d[k] = v
return d
else:
return {}
@classmethod
def _from_dict(cls, title=None, **kwargs):
props = {}
for prop_name, value in kwargs.items():
try:
prop_attrs = cls.props[prop_name]
except KeyError:
raise AttributeError("Object type %s does not have a "
"%s property." % (cls.__name__,
prop_name))
prop_type = prop_attrs[0]
value = kwargs[prop_name]
is_aws_object = is_aws_object_subclass(prop_type)
if is_aws_object:
if not isinstance(value, collections.Mapping):
raise ValueError("Property definition for %s must be "
"a Mapping type" % prop_name)
value = prop_type._from_dict(**value)
if isinstance(prop_type, list):
if not isinstance(value, list):
raise TypeError("Attribute %s must be a "
"list." % prop_name)
new_value = []
for v in value:
new_v = v
if is_aws_object_subclass(prop_type[0]):
if not isinstance(v, collections.Mapping):
raise ValueError(
"Property definition for %s must be "
"a list of Mapping types" % prop_name)
new_v = prop_type[0]._from_dict(**v)
new_value.append(new_v)
value = new_value
props[prop_name] = value
if title:
return cls(title, **props)
return cls(**props)
@classmethod
def from_dict(cls, title, d):
return cls._from_dict(title, **d)
def _validate_props(self):
for k, (_, required) in self.props.items():
if required and k not in self.properties:
rtype = getattr(self, 'resource_type', "<unknown type>")
title = getattr(self, 'title')
msg = "Resource %s required in type %s" % (k, rtype)
if title:
msg += " (title: %s)" % title
raise ValueError(msg)
class AWSObject(BaseAWSObject):
dictname = 'Properties'
class AWSDeclaration(BaseAWSObject):
"""
Used for CloudFormation Resource Property objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-property-reference.html
"""
def __init__(self, title, **kwargs):
super(AWSDeclaration, self).__init__(title, **kwargs)
class AWSProperty(BaseAWSObject):
"""
Used for CloudFormation Resource Property objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-property-reference.html
"""
dictname = None
def __init__(self, title=None, **kwargs):
super(AWSProperty, self).__init__(title, **kwargs)
class AWSAttribute(BaseAWSObject):
dictname = None
"""
Used for CloudFormation Resource Attribute objects
http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/
aws-product-attribute-reference.html
"""
def __init__(self, title=None, **kwargs):
super(AWSAttribute, self).__init__(title, **kwargs)
def validate_delimiter(delimiter):
if not isinstance(delimiter, basestring):
raise ValueError(
"Delimiter must be a String, %s provided" % type(delimiter)
)
def validate_pausetime(pausetime):
if not pausetime.startswith('PT'):
raise ValueError('PauseTime should look like PT#H#M#S')
return pausetime
class UpdatePolicy(BaseAWSObject):
def __init__(self, title, **kwargs):
raise DeprecationWarning(
"This UpdatePolicy class is deprecated, please switch to using "
"the more general UpdatePolicy in troposphere.policies.\n"
)
class AWSHelperFn(object):
def getdata(self, data):
if isinstance(data, BaseAWSObject):
return data.title
else:
return data
def to_dict(self):
return encode_to_dict(self.data)
class GenericHelperFn(AWSHelperFn):
""" Used as a fallback for the template generator """
def __init__(self, data):
self.data = self.getdata(data)
def to_dict(self):
return encode_to_dict(self.data)
class Base64(AWSHelperFn):
def __init__(self, data):
self.data = {'Fn::Base64': data}
class FindInMap(AWSHelperFn):
def __init__(self, mapname, key, value):
self.data = {'Fn::FindInMap': [self.getdata(mapname), key, value]}
class GetAtt(AWSHelperFn):
def __init__(self, logicalName, attrName):
self.data = {'Fn::GetAtt': [self.getdata(logicalName), attrName]}
class GetAZs(AWSHelperFn):
def __init__(self, region=""):
self.data = {'Fn::GetAZs': region}
class If(AWSHelperFn):
def __init__(self, cond, true, false):
self.data = {'Fn::If': [self.getdata(cond), true, false]}
class Equals(AWSHelperFn):
def __init__(self, value_one, value_two):
self.data = {'Fn::Equals': [value_one, value_two]}
class And(AWSHelperFn):
def __init__(self, cond_one, cond_two, *conds):
self.data = {'Fn::And': [cond_one, cond_two] + list(conds)}
class Or(AWSHelperFn):
def __init__(self, cond_one, cond_two, *conds):
self.data = {'Fn::Or': [cond_one, cond_two] + list(conds)}
class Not(AWSHelperFn):
def __init__(self, cond):
self.data = {'Fn::Not': [self.getdata(cond)]}
class Join(AWSHelperFn):
def __init__(self, delimiter, values):
validate_delimiter(delimiter)
self.data = {'Fn::Join': [delimiter, values]}
class Split(AWSHelperFn):
def __init__(self, delimiter, values):
validate_delimiter(delimiter)
self.data = {'Fn::Split': [delimiter, values]}
class Sub(AWSHelperFn):
def __init__(self, input_str, **values):
self.data = {'Fn::Sub': [input_str, values] if values else input_str}
class Name(AWSHelperFn):
def __init__(self, data):
self.data = self.getdata(data)
class Select(AWSHelperFn):
def __init__(self, indx, objects):
self.data = {'Fn::Select': [indx, objects]}
class Ref(AWSHelperFn):
def __init__(self, data):
self.data = {'Ref': self.getdata(data)}
class Condition(AWSHelperFn):
def __init__(self, data):
self.data = {'Condition': self.getdata(data)}
class ImportValue(AWSHelperFn):
def __init__(self, data):
self.data = {'Fn::ImportValue': data}
class Tags(AWSHelperFn):
def __init__(self, *args, **kwargs):
if not args:
# Assume kwargs variant
tag_dict = kwargs
else:
if len(args) != 1:
raise(TypeError, "Multiple non-kwargs passed to Tags")
# Validate single argument passed in is a dict
if not isinstance(args[0], dict):
raise(TypeError, "Tags needs to be either kwargs or dict")
tag_dict = args[0]
self.tags = []
for k, v in sorted(tag_dict.iteritems()):
self.tags.append({
'Key': k,
'Value': v,
})
# allow concatenation of the Tags object via '+' operator
def __add__(self, newtags):
newtags.tags = self.tags + newtags.tags
return newtags
def to_dict(self):
return [encode_to_dict(tag) for tag in self.tags]
class Template(object):
props = {
'AWSTemplateFormatVersion': (basestring, False),
'Description': (basestring, False),
'Parameters': (dict, False),
'Mappings': (dict, False),
'Resources': (dict, False),
'Outputs': (dict, False),
}
def __init__(self, Description=None, Metadata=None):
self.description = Description
self.metadata = {} if Metadata is None else Metadata
self.conditions = {}
self.mappings = {}
self.outputs = {}
self.parameters = {}
self.resources = {}
self.version = None
def add_description(self, description):
self.description = description
def add_metadata(self, metadata):
self.metadata = metadata
def add_condition(self, name, condition):
self.conditions[name] = condition
def handle_duplicate_key(self, key):
raise ValueError('duplicate key "%s" detected' % key)
def _update(self, d, values):
if isinstance(values, list):
for v in values:
if v.title in d:
self.handle_duplicate_key(v.title)
d[v.title] = v
else:
if values.title in d:
self.handle_duplicate_key(values.title)
d[values.title] = values
return values
def add_output(self, output):
return self._update(self.outputs, output)
def add_mapping(self, name, mapping):
self.mappings[name] = mapping
def add_parameter(self, parameter):
if len(self.parameters) >= MAX_PARAMETERS:
raise ValueError('Maximum parameters %d reached' % MAX_PARAMETERS)
return self._update(self.parameters, parameter)
def add_resource(self, resource):
if len(self.resources) >= MAX_RESOURCES:
raise ValueError('Maximum number of resources %d reached'
% MAX_RESOURCES)
return self._update(self.resources, resource)
def add_version(self, version=None):
if version:
self.version = version
else:
self.version = "2010-09-09"
def to_dict(self):
t = {}
if self.description:
t['Description'] = self.description
if self.metadata:
t['Metadata'] = self.metadata
if self.conditions:
t['Conditions'] = self.conditions
if self.mappings:
t['Mappings'] = self.mappings
if self.outputs:
t['Outputs'] = self.outputs
if self.parameters:
t['Parameters'] = self.parameters
if self.version:
t['AWSTemplateFormatVersion'] = self.version
t['Resources'] = self.resources
return encode_to_dict(t)
def to_json(self, indent=4, sort_keys=True, separators=(',', ': ')):
return json.dumps(self.to_dict(), indent=indent,
sort_keys=sort_keys, separators=separators)
class Export(AWSHelperFn):
def __init__(self, name):
self.data = {
'Name': name,
}
class Output(AWSDeclaration):
props = {
'Description': (basestring, False),
'Export': (Export, False),
'Value': (basestring, True),
}
class Parameter(AWSDeclaration):
STRING_PROPERTIES = ['AllowedPattern', 'MaxLength', 'MinLength']
NUMBER_PROPERTIES = ['MaxValue', 'MinValue']
props = {
'Type': (basestring, True),
'Default': (basestring, False),
'NoEcho': (bool, False),
'AllowedValues': (list, False),
'AllowedPattern': (basestring, False),
'MaxLength': (validators.positive_integer, False),
'MinLength': (validators.positive_integer, False),
'MaxValue': (validators.integer, False),
'MinValue': (validators.integer, False),
'Description': (basestring, False),
'ConstraintDescription': (basestring, False),
}
def validate_title(self):
if len(self.title) > PARAMETER_TITLE_MAX:
raise ValueError("Parameter title can be no longer than "
"%d characters" % PARAMETER_TITLE_MAX)
super(Parameter, self).validate_title()
def validate(self):
if self.properties['Type'] != 'String':
for p in self.STRING_PROPERTIES:
if p in self.properties:
raise ValueError("%s can only be used with parameters of "
"the String type." % p)
if self.properties['Type'] != 'Number':
for p in self.NUMBER_PROPERTIES:
if p in self.properties:
raise ValueError("%s can only be used with parameters of "
"the Number type." % p)
|
horacio3/troposphere
|
troposphere/__init__.py
|
Python
|
bsd-2-clause
| 20,073
|
import sys
import unittest
from assertEquals.cli.reporters import detail, _Summarize
from assertEquals.tests.utils import reportersTestCase
OUTPUT_START="""\
-------------------------------<| assertEquals |>-------------------------------
.EF..
======================================================================
ERROR: test_errs (assertEqualsTests.TestCase)
----------------------------------------------------------------------
Traceback (most recent call last):"""; """
<snip>
StandardError: heck
======================================================================
FAIL: test_fails (assertEqualsTests.TestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
<snip>
AssertionError
----------------------------------------------------------------------
Ran 5 tests in 0.002s"""; OUTPUT_END="""
FAILED (failures=1, errors=1)
"""
REPORT_SUCCESS = """\
-------------------------------<| assertEquals |>-------------------------------
..
----------------------------------------------------------------------
Ran 2 tests in 0.000s
OK
"""
class Detail(reportersTestCase):
def testOnlyModuleNoTestCaseTriggersNameError(self):
self.assertRaises(TypeError, detail, 'needsADot')
def testBadModuleTriggersImportError(self):
self.assertRaises(ImportError, detail, 'probablyDoesntExist', 'TestCase')
def testBadTestCaseNameAlsoTriggersImportError(self):
self.assertRaises(ImportError, detail, 'assertEqualsTests', 'ToastCase')
def testBadTestCaseTriggersTypeError(self):
self.assertRaises(TypeError, detail, 'assertEqualsTests', 'itDoesExist')
def testReturnsNormalUnitTestOutputWithOurBanner(self):
actual = detail('assertEqualsTests', 'TestCase')
start = actual[:len(OUTPUT_START)]
end = actual[-len(OUTPUT_END):]
self.assertEqual(start, OUTPUT_START)
self.assertEqual(end, OUTPUT_END)
def testDoesntContainProgramOutput(self):
actual = detail('assertEqualsTests', 'TestCase')
start = actual[:len(OUTPUT_START)]
end = actual[-len(OUTPUT_END):]
self.assertEqual(start, OUTPUT_START)
self.assertEqual(end, OUTPUT_END)
def testTestCaseInSubmodulesWorks(self):
expected = REPORT_SUCCESS
actual = detail('assertEqualsTests.itDoesExist', 'TestCase')
self.assertEqual(expected, actual)
HEADER = """\
-------------------------------<| assertEquals |>-------------------------------
MODULE PASS FAIL ERR ALL
--------------------------------------------------------------------------------
"""
BODY = """\
assertEqualsTests.TestCase 60% 1 1 5
assertEqualsTests.itDoesExist.TestCase 100% 0 0 2
assertEqualsTests.itDoesExist.TestCase2 100% 0 0 1
assertEqualsTests.subpkg.TestCase 100% 0 0 2
"""
BODY_FIND = """\
assertEqualsTests.TestCase - - - 5
assertEqualsTests.itDoesExist.TestCase - - - 2
assertEqualsTests.itDoesExist.TestCase2 - - - 1
assertEqualsTests.subpkg.TestCase - - - 2
"""
BODY_DOTTED_RUN_VERBOSE = """\
assertEqualsTests.itDoesExist.TestCase 100% 0 0 2
assertEqualsTests.itDoesExist.TestCase2 100% 0 0 1
"""
TOTALS_BASIC = """\
--------------------------------------------------------------------------------
TOTALS 50% 4 5 18
"""
TOTALS_BASIC_NO_RUN = """\
--------------------------------------------------------------------------------
TOTALS - - - 18
"""
TOTALS_ZERO = """\
--------------------------------------------------------------------------------
TOTALS 0% 0 0 0
"""
TOTALS_ZERO_NO_RUN = """\
--------------------------------------------------------------------------------
TOTALS - - - 0
"""
TOTALS_ZERO_PERCENT = """\
--------------------------------------------------------------------------------
TOTALS 0% 5 5 10
"""
TOTALS_ZERO_PERCENT_NO_RUN = """\
--------------------------------------------------------------------------------
TOTALS - - - 10
"""
TOTALS_ALL_PASSING = """\
--------------------------------------------------------------------------------
TOTALS 100% 0 0 10
"""
TOTALS_ALL_PASSING_NO_RUN = """\
--------------------------------------------------------------------------------
TOTALS - - - 10
"""
TOTALS_SUMMARIZE = """\
--------------------------------------------------------------------------------
TOTALS 80% 1 1 10
"""
SUMMARIZE = HEADER + BODY + TOTALS_SUMMARIZE
class Summary(reportersTestCase):
def setUpUp(self):
self.summarize = _Summarize()
self.summarize.module = 'assertEqualsTests'
self.summarize.find_only = False
self.summarize.stopwords = ()
# __call__
# ========
def testSummarize(self):
expected = SUMMARIZE
actual = self.summarize('assertEqualsTests')
self.assertEqual(expected, actual)
def testTestCaseTriggersImportError(self):
self.assertRaises(ImportError, self.summarize, 'assertEqualsTests.TestCase')
# load_testcases
# ==============
def testLoadTestCases(self):
mod = __import__('assertEqualsTests')
expected = [('assertEqualsTests.TestCase', mod.TestCase)]
actual = self.summarize.load_testcases(mod)
self.assertEqual(expected, actual)
def testLoadTestCasesDottedAndMultiple(self):
mod = __import__('assertEqualsTests.itDoesExist')
expected = [ ( 'assertEqualsTests.itDoesExist.TestCase'
, mod.itDoesExist.TestCase
)
, ( 'assertEqualsTests.itDoesExist.TestCase2'
, mod.itDoesExist.TestCase2
)
]
actual = self.summarize.load_testcases(mod.itDoesExist)
self.assertEqual(expected, actual)
def testLoadTestCasesOnlyIfTheyHaveTests(self):
mod = __import__('assertEqualsTests.subpkg')
reload(mod.subpkg)
expected = [ ( 'assertEqualsTests.subpkg.TestCase'
, mod.subpkg.TestCase
)
]
actual = self.summarize.load_testcases(mod.subpkg)
self.assertEqual(expected, actual)
self.setUp()
# find_testcases
# ==============
def testFindTestCases(self):
self.summarize.module = 'assertEqualsTests'
self.summarize.find_testcases()
mod = __import__('assertEqualsTests')
expected = [ ( 'assertEqualsTests.TestCase'
, mod.TestCase
)
, ( 'assertEqualsTests.itDoesExist.TestCase'
, mod.itDoesExist.TestCase
)
, ( 'assertEqualsTests.itDoesExist.TestCase2'
, mod.itDoesExist.TestCase2
)
, ( 'assertEqualsTests.subpkg.TestCase'
, mod.subpkg.TestCase
)
]
actual = self.summarize._Summarize__testcases
self.assertEqual(expected, actual)
def testFindTestCasesStopWords(self):
self.summarize.module = 'assertEqualsTests'
self.summarize.stopwords = ('Does',)
self.summarize.find_testcases()
mod = __import__('assertEqualsTests')
expected = [ ('assertEqualsTests.TestCase', mod.TestCase)
, ('assertEqualsTests.subpkg.TestCase', mod.subpkg.TestCase)]
actual = self.summarize._Summarize__testcases
self.assertEqual(expected, actual)
def testFindTestCasesEmptyStopWordsOk(self):
self.summarize.module = 'assertEqualsTests'
self.summarize.stopwords = ('',)
self.summarize.find_testcases()
mod = __import__('assertEqualsTests')
expected = [ ( 'assertEqualsTests.TestCase'
, mod.TestCase
)
, ( 'assertEqualsTests.itDoesExist.TestCase'
, mod.itDoesExist.TestCase
)
, ( 'assertEqualsTests.itDoesExist.TestCase2'
, mod.itDoesExist.TestCase2
)
, ( 'assertEqualsTests.subpkg.TestCase'
, mod.subpkg.TestCase
)
]
actual = self.summarize._Summarize__testcases
self.assertEqual(expected, actual)
# print_header
# ============
def testPrintHeader(self):
self.summarize.print_header()
actual = self.summarize.report.getvalue()
expected = HEADER
self.assertEqual(expected, actual)
# print_body
# ==========
def testPrintBody(self):
self.summarize.module = 'assertEqualsTests'
self.summarize.find_testcases()
self.summarize.print_body()
expected = BODY
actual = self.summarize.report.getvalue()
self.assertEqual(expected, actual)
expected = (1, 1, 10)
actual = self.summarize._Summarize__totals
self.assertEqual(expected, actual)
def testPrintBodyNoRun(self):
self.summarize.module = 'assertEqualsTests'
self.summarize.find_only = True
self.summarize.find_testcases()
self.summarize.print_body()
expected = BODY_FIND
actual = self.summarize.report.getvalue()
self.assertEqual(expected, actual)
expected = (0, 0, 10)
actual = self.summarize._Summarize__totals
self.assertEqual(expected, actual)
def testPrintBodyBaseIsDotted(self):
self.summarize.module = 'assertEqualsTests.itDoesExist'
self.summarize.find_testcases()
self.summarize.quiet = False
self.summarize.print_body()
expected = BODY_DOTTED_RUN_VERBOSE
actual = self.summarize.report.getvalue()
self.assertEqual(expected, actual)
expected = (0, 0, 3)
actual = self.summarize._Summarize__totals
self.assertEqual(expected, actual)
# print_footer
# ============
def testPrintFooterBasicTotalsWithRun(self):
self.summarize._Summarize__totals = (4, 5, 18)
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_BASIC
self.assertEqual(expected, actual)
def testPrintFooterBasicTotalsNoRun(self):
self.summarize._Summarize__totals = (4, 5, 18)
self.summarize.find_only = True
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_BASIC_NO_RUN
self.assertEqual(expected, actual)
def testPrintFooterZeroTotalsWithRun(self):
self.summarize._Summarize__totals = (0, 0, 0)
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ZERO
self.assertEqual(expected, actual)
def testPrintFooterZeroTotalsNoRun(self):
self.summarize._Summarize__totals = (0, 0, 0)
self.summarize.find_only = True
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ZERO_NO_RUN
self.assertEqual(expected, actual)
def testPrintFooterZeroPercentWithRun(self):
self.summarize._Summarize__totals = (5, 5, 10)
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ZERO_PERCENT
self.assertEqual(expected, actual)
def testPrintFooterZeroPercentNoRun(self):
self.summarize._Summarize__totals = (5, 5, 10)
self.summarize.tfail = 5
self.summarize.terr = 5
self.summarize.tall = 10
self.summarize.find_only = True
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ZERO_PERCENT_NO_RUN
self.assertEqual(expected, actual)
def testPrintFooterAllPassing(self):
self.summarize._Summarize__totals = (0, 0, 10)
self.summarize.tfail = 0
self.summarize.terr = 0
self.summarize.tall = 10
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ALL_PASSING
self.assertEqual(expected, actual)
def testPrintFooterAllPassingNoRun(self):
self.summarize._Summarize__totals = (0, 0, 10)
self.summarize.tfail = 0
self.summarize.terr = 0
self.summarize.tall = 10
self.summarize.find_only = True
self.summarize.print_footer()
actual = self.summarize.report.getvalue()
expected = TOTALS_ALL_PASSING_NO_RUN
self.assertEqual(expected, actual)
|
whit537/assertEquals
|
assertEquals/tests/cli.py
|
Python
|
bsd-2-clause
| 13,568
|
import logging
from math import log, sqrt
from src.ranked_posting import RankedPosting
class WeightCalculator(object):
def set_posting_weights(self, index, numdocs):
N = numdocs
logging.info('calculating tf-idf weights ({} docs)'.format(N))
for term in index:
df = len(index[term].postings)
#newlist = SingleList()
newlist = []
for posting in index[term].postings:
tf = len(posting.positions)
rank = self._calc_wt_f_d(tf, df, N)
#logging.debug('term {} doc {} df {} tf {} rank {}'.format(term,posting.docID,df,tf,rank))
newlist.append(RankedPosting(posting.docID, rank, posting.positions))
index[term].postings = newlist
def normalize_posting_weights(self, index, numdocs):
logging.info('normalizing tf-idf weights ({} docs)'.format(numdocs))
doc_norm_factors = [0] * numdocs
for term_postings in index.values():
for posting in term_postings.postings:
doc_norm_factors[posting.docID] += pow(posting.rank, 2)
doc_norm_factors = [sqrt(factor) for factor in doc_norm_factors]
for term_postings in index.values():
for posting in term_postings.postings:
posting.rank /= doc_norm_factors[posting.docID]
def _calc_wt_f_d(self, term_freq, doc_freq, numdocs):
return 0 if term_freq == 0 else (1 + log(term_freq, 10)) * log(numdocs / doc_freq, 10)
|
foobar999/Suchmaschine
|
src/vector/weight_calculator.py
|
Python
|
bsd-2-clause
| 1,578
|
from __future__ import print_function, absolute_import
import ctypes
from ctypes import *
from ctypes.util import find_library
import subprocess
import sys
import unittest
import platform
from llvmlite import six
from llvmlite import binding as llvm
from llvmlite.binding import ffi
from . import TestCase
asm_sum = r"""
; ModuleID = '<string>'
target triple = "{triple}"
@glob = global i32 0
@glob_b = global i8 0
@glob_f = global float 1.5
@glob_struct = global {{ i64, [2 x i64]}} {{i64 0, [2 x i64] [i64 0, i64 0]}}
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
%.4 = add i32 0, %.3
ret i32 %.4
}}
"""
asm_sum2 = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = add i32 %.1, %.2
ret i32 %.3
}}
"""
asm_mul = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @mul(i32 %.1, i32 %.2) {{
%.3 = mul i32 %.1, %.2
ret i32 %.3
}}
"""
# `fadd` used on integer inputs
asm_parse_error = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define i32 @sum(i32 %.1, i32 %.2) {{
%.3 = fadd i32 %.1, %.2
ret i32 %.3
}}
"""
# "%.bug" definition references itself
asm_verification_fail = r"""
; ModuleID = '<string>'
target triple = "{triple}"
define void @sum() {{
%.bug = add i32 1, %.bug
ret void
}}
"""
asm_sum_declare = r"""
; ModuleID = '<string>'
target triple = "{triple}"
declare i32 @sum(i32 %.1, i32 %.2)
"""
class BaseTest(TestCase):
def setUp(self):
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
def module(self, asm=asm_sum):
asm = asm.format(triple=llvm.get_default_triple())
mod = llvm.parse_assembly(asm)
return mod
def glob(self, name='glob', mod=None):
if mod is None:
mod = self.module()
return mod.get_global_variable(name)
def target_machine(self):
target = llvm.Target.from_default_triple()
return target.create_target_machine()
class TestMisc(BaseTest):
"""
Test miscellaneous functions in llvm.binding.
"""
def test_parse_assembly(self):
self.module(asm_sum)
def test_parse_assembly_error(self):
with self.assertRaises(RuntimeError) as cm:
self.module(asm_parse_error)
s = str(cm.exception)
self.assertIn("parsing error", s)
self.assertIn("invalid operand type", s)
def test_dylib_symbols(self):
llvm.add_symbol("__xyzzy", 1234)
llvm.add_symbol("__xyzzy", 5678)
addr = llvm.address_of_symbol("__xyzzy")
self.assertEqual(addr, 5678)
addr = llvm.address_of_symbol("__foobar")
self.assertIs(addr, None)
def test_get_default_triple(self):
triple = llvm.get_default_triple()
self.assertIsInstance(triple, str)
self.assertTrue(triple)
def test_get_host_cpu_name(self):
cpu = llvm.get_host_cpu_name()
self.assertIsInstance(cpu, str)
self.assertTrue(cpu)
def test_initfini(self):
code = """if 1:
from llvmlite import binding as llvm
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.shutdown()
"""
subprocess.check_call([sys.executable, "-c", code])
def test_set_option(self):
# We cannot set an option multiple times (LLVM would exit() the
# process), so run the code in a subprocess.
code = """if 1:
from llvmlite import binding as llvm
llvm.set_option("progname", "-debug-pass=Disabled")
"""
subprocess.check_call([sys.executable, "-c", code])
def test_version(self):
major, minor, patch = llvm.llvm_version_info
self.assertIn((major, minor), [(3, 5), (3, 6)])
self.assertIn(patch, range(10))
class TestModuleRef(BaseTest):
def test_str(self):
mod = self.module()
s = str(mod).strip()
self.assertTrue(s.startswith('; ModuleID ='), s)
def test_close(self):
mod = self.module()
str(mod)
mod.close()
with self.assertRaises(ctypes.ArgumentError):
str(mod)
mod.close()
def test_with(self):
mod = self.module()
str(mod)
with mod:
str(mod)
with self.assertRaises(ctypes.ArgumentError):
str(mod)
with self.assertRaises(RuntimeError):
with mod:
pass
def test_data_layout(self):
mod = self.module()
s = mod.data_layout
self.assertIsInstance(s, str)
mod.data_layout = s
self.assertEqual(s, mod.data_layout)
def test_triple(self):
mod = self.module()
s = mod.triple
self.assertEqual(s, llvm.get_default_triple())
mod.triple = ''
self.assertEqual(mod.triple, '')
def test_verify(self):
# Verify successful
mod = self.module()
self.assertIs(mod.verify(), None)
# Verify failed
mod = self.module(asm_verification_fail)
with self.assertRaises(RuntimeError) as cm:
mod.verify()
s = str(cm.exception)
self.assertIn("%.bug = add i32 1, %.bug", s)
def test_get_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertIsInstance(fn, llvm.ValueRef)
self.assertEqual(fn.name, "sum")
with self.assertRaises(NameError):
mod.get_function("foo")
# Check that fn keeps the module instance alive
del mod
str(fn.module)
def test_get_global_variable(self):
mod = self.module()
gv = mod.get_global_variable("glob")
self.assertIsInstance(gv, llvm.ValueRef)
self.assertEqual(gv.name, "glob")
with self.assertRaises(NameError):
mod.get_global_variable("bar")
# Check that gv keeps the module instance alive
del mod
str(gv.module)
def test_global_variables(self):
mod = self.module()
it = mod.global_variables
del mod
globs = sorted(it, key=lambda value: value.name)
self.assertEqual(len(globs), 4)
self.assertEqual([g.name for g in globs],
["glob", "glob_b", "glob_f", "glob_struct"])
def test_functions(self):
mod = self.module()
it = mod.functions
del mod
funcs = list(it)
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "sum")
def test_link_in(self):
dest = self.module()
src = self.module(asm_mul)
dest.link_in(src)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.get_function("mul")
dest.close()
with self.assertRaises(ctypes.ArgumentError):
src.get_function("mul")
def test_link_in_preserve(self):
dest = self.module()
src2 = self.module(asm_mul)
dest.link_in(src2, preserve=True)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.close()
self.assertEqual(sorted(f.name for f in src2.functions), ["mul"])
src2.get_function("mul")
def test_link_in_error(self):
# Raise an error by trying to link two modules with the same global
# definition "sum".
dest = self.module()
src = self.module(asm_sum2)
with self.assertRaises(RuntimeError) as cm:
dest.link_in(src)
self.assertIn("symbol multiply defined", str(cm.exception))
def test_as_bitcode(self):
mod = self.module()
bc = mod.as_bitcode()
# Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064
# and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC'
self.assertTrue(bc.startswith(bitcode_magic) or
bc.startswith(bitcode_wrapper_magic))
def test_parse_bitcode_error(self):
with self.assertRaises(RuntimeError) as cm:
llvm.parse_bitcode(b"")
self.assertIn("LLVM bitcode parsing error", str(cm.exception))
self.assertIn("Invalid bitcode signature", str(cm.exception))
def test_bitcode_roundtrip(self):
bc = self.module().as_bitcode()
mod = llvm.parse_bitcode(bc)
self.assertEqual(mod.as_bitcode(), bc)
mod.get_function("sum")
mod.get_global_variable("glob")
def test_cloning(self):
m = self.module()
cloned = m.clone()
self.assertIsNot(cloned, m)
self.assertEqual(cloned.as_bitcode(), m.as_bitcode())
class JITTestMixin(object):
"""
Mixin for ExecutionEngine tests.
"""
def test_run_code(self):
mod = self.module()
with self.jit(mod) as ee:
ee.finalize_object()
cfptr = ee.get_pointer_to_global(mod.get_function('sum'))
cfunc = CFUNCTYPE(c_int, c_int, c_int)(cfptr)
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
ee = self.jit(self.module())
ee.close()
ee.close()
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_with(self):
ee = self.jit(self.module())
with ee:
pass
with self.assertRaises(RuntimeError):
with ee:
pass
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_module_lifetime(self):
mod = self.module()
ee = self.jit(mod)
ee.close()
mod.close()
def test_module_lifetime2(self):
mod = self.module()
ee = self.jit(mod)
mod.close()
ee.close()
def test_add_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
with self.assertRaises(KeyError):
ee.add_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertTrue(mod.closed)
def test_add_module_lifetime(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
mod.close()
ee.close()
def test_add_module_lifetime2(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.close()
mod.close()
def test_remove_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
ee.remove_module(mod)
with self.assertRaises(KeyError):
ee.remove_module(mod)
self.assertFalse(mod.closed)
ee.close()
self.assertFalse(mod.closed)
def test_target_data(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
# A singleton is returned
self.assertIs(ee.target_data, td)
str(td)
del mod, ee
str(td)
def test_target_data_abi_enquiries(self):
mod = self.module()
ee = self.jit(mod)
td = ee.target_data
gv_i32 = mod.get_global_variable("glob")
gv_i8 = mod.get_global_variable("glob_b")
gv_struct = mod.get_global_variable("glob_struct")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
for g in (gv_i32, gv_i8, gv_struct):
self.assertEqual(td.get_abi_size(g.type), pointer_size)
self.assertEqual(td.get_pointee_abi_size(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_alignment(gv_i32.type), 4)
self.assertEqual(td.get_pointee_abi_size(gv_i8.type), 1)
self.assertIn(td.get_pointee_abi_alignment(gv_i8.type), (1, 2, 4))
self.assertEqual(td.get_pointee_abi_size(gv_struct.type), 24)
self.assertIn(td.get_pointee_abi_alignment(gv_struct.type), (4, 8))
class JITWithTMTestMixin(JITTestMixin):
def test_emit_assembly(self):
"""Test TargetMachineRef.emit_assembly()"""
target_machine = self.target_machine()
mod = self.module()
ee = self.jit(mod, target_machine)
raw_asm = target_machine.emit_assembly(mod)
self.assertIn("sum", raw_asm)
def test_emit_object(self):
"""Test TargetMachineRef.emit_object()"""
target_machine = self.target_machine()
mod = self.module()
ee = self.jit(mod, target_machine)
code_object = target_machine.emit_object(mod)
self.assertIsInstance(code_object, six.binary_type)
if sys.platform.startswith('linux'):
# Sanity check
self.assertIn(b"ELF", code_object[:10])
class TestMCJit(BaseTest, JITWithTMTestMixin):
"""
Test JIT engines created with create_mcjit_compiler().
"""
def jit(self, mod, target_machine=None):
if target_machine is None:
target_machine = self.target_machine()
return llvm.create_mcjit_compiler(mod, target_machine)
class TestValueRef(BaseTest):
def test_str(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(str(glob), "@glob = global i32 0")
def test_name(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertEqual(glob.name, "glob")
glob.name = "foobar"
self.assertEqual(glob.name, "foobar")
def test_linkage(self):
mod = self.module()
glob = mod.get_global_variable("glob")
linkage = glob.linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
glob.linkage = linkage
self.assertEqual(glob.linkage, linkage)
for linkage in ("internal", "external"):
glob.linkage = linkage
self.assertIsInstance(glob.linkage, llvm.Linkage)
self.assertEqual(glob.linkage.name, linkage)
def test_add_function_attribute(self):
mod = self.module()
fn = mod.get_function("sum")
fn.add_function_attribute("zext")
def test_module(self):
mod = self.module()
glob = mod.get_global_variable("glob")
self.assertIs(glob.module, mod)
def test_type(self):
mod = self.module()
glob = mod.get_global_variable("glob")
tp = glob.type
self.assertIsInstance(tp, ffi.LLVMTypeRef)
def test_close(self):
glob = self.glob()
glob.close()
glob.close()
def test_is_declaration(self):
defined = self.module().get_function('sum')
declared = self.module(asm_sum_declare).get_function('sum')
self.assertFalse(defined.is_declaration)
self.assertTrue(declared.is_declaration)
class TestTarget(BaseTest):
def test_from_triple(self):
f = llvm.Target.from_triple
with self.assertRaises(RuntimeError) as cm:
f("foobar")
self.assertIn("No available targets are compatible with this triple",
str(cm.exception))
triple = llvm.get_default_triple()
target = f(triple)
self.assertEqual(target.triple, triple)
target.close()
def test_create_target_machine(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
# With the default settings
target.create_target_machine('', '', 1, 'default', 'default')
# With the host's CPU
cpu = llvm.get_host_cpu_name()
target.create_target_machine(cpu, '', 1, 'default', 'default')
def test_name(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.name, str)
self.assertEqual(t.name, u.name)
def test_description(self):
t = llvm.Target.from_triple(llvm.get_default_triple())
u = llvm.Target.from_default_triple()
self.assertIsInstance(t.description, str)
self.assertEqual(t.description, u.description)
def test_str(self):
target = llvm.Target.from_triple(llvm.get_default_triple())
s = str(target)
self.assertIn(target.name, s)
self.assertIn(target.description, s)
class TestTargetData(BaseTest):
def target_data(self):
return llvm.create_target_data("e-m:e-i64:64-f80:128-n8:16:32:64-S128")
def test_get_abi_size(self):
td = self.target_data()
glob = self.glob()
self.assertEqual(td.get_abi_size(glob.type), 8)
def test_add_pass(self):
td = self.target_data()
pm = llvm.create_module_pass_manager()
td.add_pass(pm)
class TestTargetMachine(BaseTest):
def test_add_analysis_passes(self):
tm = self.target_machine()
pm = llvm.create_module_pass_manager()
tm.add_analysis_passes(pm)
def test_target_data_from_tm(self):
tm = self.target_machine()
td = tm.target_data
mod = self.module()
gv_i32 = mod.get_global_variable("glob")
# A global is a pointer, it has the ABI size of a pointer
pointer_size = 4 if sys.maxsize < 2 ** 32 else 8
self.assertEqual(td.get_abi_size(gv_i32.type), pointer_size)
class TestTargetLibraryInfo(BaseTest):
def tli(self):
return llvm.create_target_library_info(llvm.get_default_triple())
def test_create_target_library_info(self):
tli = llvm.create_target_library_info(llvm.get_default_triple())
with tli:
pass
tli.close()
def test_get_libfunc(self):
tli = self.tli()
with self.assertRaises(NameError):
tli.get_libfunc("xyzzy")
fmin = tli.get_libfunc("fmin")
self.assertEqual(fmin.name, "fmin")
self.assertIsInstance(fmin.identity, int)
fmax = tli.get_libfunc("fmax")
self.assertNotEqual(fmax.identity, fmin.identity)
def test_set_unavailable(self):
tli = self.tli()
fmin = tli.get_libfunc("fmin")
tli.set_unavailable(fmin)
def test_disable_all(self):
tli = self.tli()
tli.disable_all()
def test_add_pass(self):
tli = self.tli()
pm = llvm.create_module_pass_manager()
tli.add_pass(pm)
class TestPassManagerBuilder(BaseTest):
def pmb(self):
return llvm.PassManagerBuilder()
def test_old_api(self):
# Test the create_pass_manager_builder() factory function
pmb = llvm.create_pass_manager_builder()
pmb.inlining_threshold = 2
pmb.opt_level = 3
def test_close(self):
pmb = self.pmb()
pmb.close()
pmb.close()
def test_opt_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.opt_level, six.integer_types)
for i in range(4):
pmb.opt_level = i
self.assertEqual(pmb.opt_level, i)
def test_size_level(self):
pmb = self.pmb()
self.assertIsInstance(pmb.size_level, six.integer_types)
for i in range(4):
pmb.size_level = i
self.assertEqual(pmb.size_level, i)
def test_inlining_threshold(self):
pmb = self.pmb()
with self.assertRaises(NotImplementedError):
pmb.inlining_threshold
for i in (25, 80, 350):
pmb.inlining_threshold = i
def test_disable_unit_at_a_time(self):
pmb = self.pmb()
self.assertIsInstance(pmb.disable_unit_at_a_time, bool)
for b in (True, False):
pmb.disable_unit_at_a_time = b
self.assertEqual(pmb.disable_unit_at_a_time, b)
def test_disable_unroll_loops(self):
pmb = self.pmb()
self.assertIsInstance(pmb.disable_unroll_loops, bool)
for b in (True, False):
pmb.disable_unroll_loops = b
self.assertEqual(pmb.disable_unroll_loops, b)
def test_populate_module_pass_manager(self):
pmb = self.pmb()
pm = llvm.create_module_pass_manager()
pmb.populate(pm)
pmb.close()
pm.close()
def test_populate_function_pass_manager(self):
mod = self.module()
pmb = self.pmb()
pm = llvm.create_function_pass_manager(mod)
pmb.populate(pm)
pmb.close()
pm.close()
class PassManagerTestMixin(object):
def pmb(self):
pmb = llvm.create_pass_manager_builder()
pmb.opt_level = 2
return pmb
def test_close(self):
pm = self.pm()
pm.close()
pm.close()
class TestModulePassManager(BaseTest, PassManagerTestMixin):
def pm(self):
return llvm.create_module_pass_manager()
def test_run(self):
pm = self.pm()
self.pmb().populate(pm)
mod = self.module()
orig_asm = str(mod)
pm.run(mod)
opt_asm = str(mod)
# Quick check that optimizations were run
self.assertIn("%.3", orig_asm)
self.assertNotIn("%.3", opt_asm)
class TestFunctionPassManager(BaseTest, PassManagerTestMixin):
def pm(self, mod=None):
mod = mod or self.module()
return llvm.create_function_pass_manager(mod)
def test_initfini(self):
pm = self.pm()
pm.initialize()
pm.finalize()
def test_run(self):
mod = self.module()
fn = mod.get_function("sum")
pm = self.pm(mod)
self.pmb().populate(pm)
mod.close()
orig_asm = str(fn)
pm.initialize()
pm.run(fn)
pm.finalize()
opt_asm = str(fn)
# Quick check that optimizations were run
self.assertIn("%.4", orig_asm)
self.assertNotIn("%.4", opt_asm)
class TestDylib(BaseTest):
def test_bad_library(self):
with self.assertRaises(RuntimeError):
llvm.load_library_permanently("zzzasdkf;jasd;l")
@unittest.skipUnless(platform.system() in ["Linux", "Darwin"],
"test only works on Linux and Darwin")
def test_libm(self):
system = platform.system()
if system == "Linux":
libm = find_library("m")
elif system == "Darwin":
libm = find_library("libm")
llvm.load_library_permanently(libm)
if __name__ == "__main__":
unittest.main()
|
squisher/llvmlite
|
llvmlite/tests/test_binding.py
|
Python
|
bsd-2-clause
| 22,542
|
from pipedream.dispatcher import Dispatcher
from pipedream.exceptions import *
|
tgecho/pipedream
|
pipedream/__init__.py
|
Python
|
bsd-2-clause
| 79
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""Fix Bound Joint
v0.1.0
Description
-----------
Fixes bound joints to the correct worldOrientation in connected skinClusters
Usage
-----
Select a joint which is an influence for a skinCluster, open up the script UI,
select "freeze transformations" if you want to zero rotation values, select
"Apply to children" if you want to apply the same fix to child joints...
Change Log
----------
0.1.0
-----
- Moved the script to Python
- It also fixes any joint with input connection, thus it can fix joints which
are in character set or have an input connection
"""
__version__ = "0.1.0"
import pymel.core as pm
def UI():
"""The UI of the script
"""
window_width = 153
window_height = 80
window_name = "oyFixBoundJoint_Window"
if pm.window(window_name, ex=True):
pm.deleteUI(window_name, window=True)
window = pm.window(
window_name,
tlb=True,
title="fixBoundJoint " + __version__,
widthHeight=(window_width, window_height)
)
pm.columnLayout("FBJ_columnLayout1", adj=True)
pm.checkBox(
"FBJ_checkBox1",
l="Freeze transformations",
al="left",
v=1
)
pm.checkBox(
"FBJ_checkBox2",
l="Apply to children",
al="left"
)
pm.button(
"FBJ_button1",
l="Apply",
c=get_check_box_states_and_run
)
pm.setParent()
window.show()
window.setWidthHeight(val=(window_width, window_height))
def get_check_box_states_and_run(*args, **kwargs):
"""Gets the data from UI and runs the script
"""
freeze = pm.checkBox("FBJ_checkBox1", q=True, v=True)
apply_to_children = pm.checkBox("FBJ_checkBox2", q=True, v=True)
selection_list = pm.ls(sl=1, type="joint")
do_fix(selection_list, freeze, apply_to_children)
pm.select(selection_list)
def do_fix(joints, freeze=True, apply_to_children=False):
"""Fixes the given list of bound joints by copying the current worldMatrix
information to the related skinClusters.
:param freeze: If freeze is given as True (default) it will also set the
rotations of the joint to (0, 0, 0). The default value is True.
:param apply_to_children: If given as True it will also apply the operation
to the children of the given joints
"""
new_selection_list = joints
if apply_to_children:
pm.select(joints, hi=True)
new_selection_list = pm.ls(sl=1, type="joint")
for joint in new_selection_list:
connections = joint.worldMatrix.outputs(
c=1,
p=1,
t="skinCluster",
et=True
)
if freeze:
freeze_joint(joint)
matrix = joint.worldInverseMatrix.get()
for attribute_data in connections:
skinCluster_attribute = attribute_data[1]
skinCluster_node = skinCluster_attribute.node()
index = skinCluster_attribute.index()
skinCluster_node.bindPreMatrix[index].set(matrix)
def freeze_joint(joint):
"""Freezes the given joint by duplicating it and applying the freeze to the
duplicate and then copy the joint orientation values to the original joint.
:param joint: The joint which wanted to be frozen
"""
dup_joint = pm.duplicate(joint, rc=1)[0]
# if the duplicate has any children delete them
pm.delete(dup_joint.getChildren())
# unlock rotate channels
dup_joint.rotateX.unlock()
dup_joint.rotateY.unlock()
dup_joint.rotateZ.unlock()
# freeze the joint
pm.makeIdentity(dup_joint, apply=1, r=1)
# set rotation to zero
if not joint.rotateX.isLocked():
joint.rotateX.set(0)
else:
# unlock and lock it again
joint.rotateX.unlock()
joint.rotateX.set(0)
joint.rotateX.lock()
if not joint.rotateY.isLocked():
joint.rotateY.set(0)
else:
# unlock and lock it again
joint.rotateY.unlock()
joint.rotateY.set(0)
joint.rotateY.lock()
if not joint.rotateZ.isLocked():
joint.rotateZ.set(0)
else:
# unlock and lock it again
joint.rotateZ.unlock()
joint.rotateZ.set(0)
joint.rotateZ.lock()
# get the joint orient
joint.jointOrient.set(dup_joint.jointOrient.get())
# delete the duplicate joint
pm.delete(dup_joint)
|
sergeneren/anima
|
anima/env/mayaEnv/fix_bound_joint.py
|
Python
|
bsd-2-clause
| 4,560
|
import logging
import time
import sys
import numpy as np
from sklearn.utils.extmath import safe_sparse_dot
from chain_opt import optimize_chain_fast
from graph_utils import decompose_graph, decompose_grid_graph
def optimize_chain(chain, unary_cost, pairwise_cost, edge_index):
n_nodes = chain.shape[0]
n_states = unary_cost.shape[1]
p = np.zeros((n_states, n_nodes))
track = np.zeros((n_states, n_nodes), dtype=np.int32)
p[:,0] = unary_cost[0,:]
track[:,0] = -1
for i in xrange(1, n_nodes):
p[:,i] = unary_cost[i,:]
p_cost = pairwise_cost[edge_index[(chain[i - 1], chain[i])]]
for k in xrange(n_states):
p[k,i] += np.min(p[:,i - 1] + p_cost[:,k])
track[k,i] = np.argmin(p[:,i - 1] + p_cost[:,k])
x = np.zeros(n_nodes, dtype=np.int32)
current = np.argmin(p[:,n_nodes - 1])
for i in xrange(n_nodes - 1, -1, -1):
x[i] = current
current = track[current,i]
return x, np.min(p[:,n_nodes - 1])
class Over(object):
def __init__(self, n_states, n_features, n_edge_features,
C=1, verbose=0, max_iter=200, check_every=1):
self.n_states = n_states
self.n_features = n_features
self.n_edge_features = n_edge_features
self.C = C
self.verbose = verbose
self.max_iter = max_iter
self.size_w = (self.n_states * self.n_features +
self.n_states * self.n_edge_features)
self.logger = logging.getLogger(__name__)
self.check_every = check_every
def _get_edges(self, x):
return x[1]
def _get_features(self, x):
return x[0]
def _get_edge_features(self, x):
return x[2]
def _get_pairwise_potentials(self, x, w):
edge_features = self._get_edge_features(x)
pairwise = np.asarray(w[self.n_states * self.n_features:])
pairwise = pairwise.reshape(self.n_edge_features, -1)
pairwise = np.dot(edge_features, pairwise)
res = np.zeros((edge_features.shape[0], self.n_states, self.n_states))
for i in range(edge_features.shape[0]):
res[i, :, :] = np.diag(pairwise[i, :])
return res
def _get_unary_potentials(self, x, w):
features = self._get_features(x)
unary_params = w[:self.n_states * self.n_features].reshape(self.n_states, self.n_features)
return safe_sparse_dot(features, unary_params.T, dense_output=True)
def _loss_augment_unaries(self, unaries, y, weights):
unaries = unaries.copy()
for label in xrange(self.n_states):
mask = y != label
unaries[mask, label] -= weights[mask]
return unaries
def _joint_features(self, chain, x, y, edge_index, multiplier):
features = self._get_features(x)[chain,:]
n_nodes = features.shape[0]
features *= multiplier[chain,:]
e_ind = []
edges = []
for i in xrange(chain.shape[0] - 1):
edges.append((i, i + 1))
e_ind.append(edge_index[(chain[i], chain[i + 1])])
edges = np.array(edges)
edge_features = self._get_edge_features(x)[e_ind,:]
unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.float64)
unary_marginals[np.ogrid[:n_nodes], y] = 1
unaries_acc = safe_sparse_dot(unary_marginals.T, features,
dense_output=True)
pw = np.zeros((self.n_edge_features, self.n_states))
for label in xrange(self.n_states):
mask = (y[edges[:, 0]] == label) & (y[edges[:, 1]] == label)
pw[:, label] = np.sum(edge_features[mask], axis=0)
return np.hstack([unaries_acc.ravel(), pw.ravel()])
def _joint_features_full(self, x, y):
features, edges, edge_features = \
self._get_features(x), self._get_edges(x), self._get_edge_features(x)
n_nodes = features.shape[0]
y = y.reshape(n_nodes)
unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.float64)
unary_marginals[np.ogrid[:n_nodes], y] = 1
unaries_acc = safe_sparse_dot(unary_marginals.T, features,
dense_output=True)
pw = np.zeros((self.n_edge_features, self.n_states))
for label in xrange(self.n_states):
mask = (y[edges[:, 0]] == label) & (y[edges[:, 1]] == label)
pw[:, label] = np.sum(edge_features[mask], axis=0)
return np.hstack([unaries_acc.ravel(), pw.ravel()])
def fit(self, X, Y, train_scorer, test_scorer, decompose='general'):
self.logger.info('Initialization')
if decompose == 'general':
contains_node, chains, edge_index = decompose_graph(X)
elif decompose == 'grid':
contains_node, chains, edge_index = decompose_grid_graph(X)
else:
raise ValueError
y_hat = []
lambdas = []
multiplier = []
for k in xrange(len(X)):
n_nodes = X[k][0].shape[0]
_lambdas = []
_y_hat = []
_multiplier = []
for p in xrange(n_nodes):
_multiplier.append(1.0 / len(contains_node[k][p]))
for chain in chains[k]:
_lambdas.append(np.zeros((len(chain), self.n_states)))
_y_hat.append(np.zeros(len(chain)))
lambdas.append(_lambdas)
y_hat.append(_y_hat)
_multiplier = np.array(_multiplier)
_multiplier.shape = (n_nodes, 1)
multiplier.append(_multiplier)
w = np.zeros(self.size_w)
self.w = w.copy()
self.start_time = time.time()
self.timestamps = [0]
self.objective_curve = []
self.train_score = []
self.test_score = []
self.w_history = []
learning_rate = 0.1
for iteration in xrange(self.max_iter):
self.logger.info('Iteration %d', iteration)
self.logger.info('Optimize slave MRF and update w')
objective = 0
dw = np.zeros(w.shape)
for k in xrange(len(X)):
self.logger.info('object %d', k)
x, y = X[k], Y[k]
n_nodes = x[0].shape[0]
unaries = self._loss_augment_unaries(self._get_unary_potentials(x, w), y.full, y.weights)
unaries *= multiplier[k]
pairwise = self._get_pairwise_potentials(x, w)
objective += np.dot(w, self._joint_features_full(x, y.full))
dw -= self._joint_features_full(x, y.full)
for i in xrange(len(chains[k])):
y_hat[k][i], energy = optimize_chain(chains[k][i],
lambdas[k][i] + unaries[chains[k][i],:],
pairwise,
edge_index[k])
dw += self._joint_features(chains[k][i], x, y_hat[k][i], edge_index[k], multiplier[k])
objective -= energy
dw -= w / self.C
w += learning_rate * dw
objective = self.C * objective + np.sum(w ** 2) / 2
if iteration and (iteration % self.check_every == 0):
self.logger.info('Compute train and test scores')
self.train_score.append(train_scorer(w))
self.logger.info('Train SCORE: %f', self.train_score[-1])
self.test_score.append(test_scorer(w))
self.logger.info('Test SCORE: %f', self.test_score[-1])
self.logger.info('Update lambda')
for k in xrange(len(X)):
n_nodes = X[k][0].shape[0]
lambda_sum = np.zeros((n_nodes, self.n_states), dtype=np.float64)
for p in xrange(n_nodes):
for i in contains_node[k][p]:
pos = np.where(chains[k][i] == p)[0][0]
lambda_sum[p, y_hat[k][i][pos]] += multiplier[k][p]
for i in xrange(len(chains[k])):
N = lambdas[k][i].shape[0]
lambdas[k][i][np.ogrid[:N], y_hat[k][i]] += learning_rate
lambdas[k][i] -= learning_rate * lambda_sum[chains[k][i],:]
self.logger.info('diff: %f', np.sum((w-self.w)**2))
if iteration:
learning_rate = 1.0 / iteration
self.timestamps.append(time.time() - self.start_time)
self.objective_curve.append(objective)
self.logger.info('Objective: %f', objective)
self.w = w.copy()
self.w_history.append(self.w)
self.w = w
self.timestamps = np.array(self.timestamps)
self.objective_curve = np.array(self.objective_curve)
self.train_score = np.array(self.train_score)
self.test_score = np.array(self.test_score)
self.w_history = np.vstack(self.w_history)
|
kondra/latent_ssvm
|
over.py
|
Python
|
bsd-2-clause
| 9,015
|
#!/usr/bin/env python
import sys
import pickle
# Test zip, and format in print
names = ["xxx", "yyy", "zzz"]
ages = [18, 19, 20]
persons = zip(names, ages)
for name, age in persons:
print "{0}'s age is {1}".format(name, age)
# Check argument
if len(sys.argv) != 2:
print("%s filename" % sys.argv[0])
raise SystemExit(1)
# Write tuples
file = open(sys.argv[1], "wb");
line = []
while True:
print("Enter name, age, score (ex: zzz, 16, 90) or quit");
line = sys.stdin.readline()
if line == "quit\n":
break
raws = line.split(",")
name = raws[0]
age = int(raws[1])
score = int(raws[2])
record = (name, age, score)
pickle.dump(record, file)
file.close()
# Read back
file = open(sys.argv[1], "rb");
while True:
try:
record = pickle.load(file)
print record
name, age, score= record
print("name = %s" % name)
print("name = %d" % age)
print("name = %d" % score)
except (EOFError):
break
file.close()
|
zzz0072/Python_Exercises
|
01_Built-in_Types/tuple.py
|
Python
|
bsd-2-clause
| 1,022
|
# -*- python -*-
#
# This file is part of the CNO package
#
# Copyright (c) 2012-2013 - EMBL-EBI
#
# File author(s): Thomas Cokelaer (cokelaer@ebi.ac.uk)
#
# Distributed under the GLPv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: github.com/cellnopt/cellnopt
#
##############################################################################
import sys
import numpy as np
import pylab
import pandas as pd
from easydev import precision
and_symbol = "^"
__all = ["Models", "BooleanModels", "ContinousModels",
"DTModels", "FuzzyModels", "CompareModels"]
class Models(object):
"""Data structure to store models.
Models are stored in dataframes. Columns will hold the reactions.
"""
def __init__(self, data, reacID=None, index_col=None, verbose=True):
""".. rubric:: constructor
:param data: could be a string to read a file (CSV). The CSV header
should contain the reaction name. First column is not expected
to be found. Note, however that :param:`index_col` may be used.
The input can also be a dataframe (column names being the reactions)
set to 0/1. The input can also be an instance of :class:`Models`.
:param list reacID: if provided, columns are renamed using this list
:param index_col:
:param bool verbose:
Reaction names may contain a symbol indicating the logical ANDs. This
should be "^" character.
"""
self.verbose = verbose
# FIXME interpret the first columns automatically ?
if isinstance(data, str):
self.filename = data
self.df = pd.read_csv(self.filename, index_col=index_col)
# FIXIME What is this
if reacID:
reacID = pd.read_csv(reacID)
self.df.columns = reacID.ix[:,0]
if 'Score' in self.df.columns:
self.scores = self.df.Score
del self.df['Score']
if 'score' in self.df.columns:
self.scores = self.df.score
del self.df['score']
elif isinstance(data, pd.DataFrame):
self.df = data.copy()
if 'Score' in self.df.columns:
self.scores = self.df.Score
del self.df['Score']
if 'score' in self.df.columns:
self.scores = self.df.score
del self.df['score']
elif isinstance(data, Models):
self.df = data.df.copy()
else:
from cno import CNOError
raise CNOError("input data not understood. Could be a filename, a dataframe or a Models instance")
if hasattr(data, 'scores'):
self.scores = getattr(data, 'scores')
# TODO: In a reaction from cnograph, they should be not ORs, just simple
# reactions and ANDS (e.g., A^B=C). If "A+B=C" is found, this is coming
# from CellNOptR, ,which has a different conventions. So, we replace
# all + by "^" !! Do we want a warning ?
for reaction in self.df.columns:
count = 0
if "+" in reaction:
# todo: use logging
if self.verbose and count == 0:
sys.stdout.write("Warning in Models. found a + sign... in %s. Interepreted as ^" % reaction)
count = 1
def convert(x):
from cno import Reaction
r = Reaction(x)
r.sort()
name = r.name
name = name.replace("+", "^")
return name
self.df.columns = [convert(x) for x in self.df.columns]
# we also reorder alphabetically the species in the and reactions
# keep this import here to avoid cycling imports
from cno.io.cnograph import CNOGraph
from cno.io import Reaction
self.cnograph = CNOGraph()
non_reactions = []
for this in self.df.columns:
try:
reac = Reaction(str(this))
self.cnograph.add_reaction(str(this))
except:
if self.verbose:
sys.stdout.write('Skipping column %s (not valid reaction ?)' % this)
non_reactions.append(this)
#self.non_reactions = non_reactions
#self.df_non_reactions = self.df[non_reactions].copy()
def drop_scores_above(self, tolerance=None):
max_score = self.scores.min() * (1+tolerance)
index = self.df.ix[self.scores<=max_score].index
self.df = self.df.ix[index]
self.scores = self.scores.ix[index]
def get_average_model(self, max_score=None):
"""Returns the average model (on each reaction)"""
if max_score is None:
return self.df.mean(axis=0)
else:
#filter scores below some vlues
N = float(sum(self.scores<=max_score))
sys.stdout.write('Keeping %s percent of the models' % str( N /len(self.scores)*100.))
return self.df.ix[self.scores<=max_score].mean(axis=0)
def to_csv(self, filename, index=False):
"""Exports the dataframe to a CSV file"""
try:
self.df['score'] = self.scores.values
except:
self.df['score'] = self.scores
self.df.to_csv(filename, index=False)
del self.df['score']
def to_sif(self, filename=None):
"""Exports 2 SIF using the "and" convention
can read the results with CellNOptR for instance::
library(CellNOptR)
plotModel(readSIF("test.sif"))
"""
return self.cnograph.to_sif(filename)
def __eq__(self, other):
if len(self.df) != len(other.df):
return False
df1 = self.df.copy()
df2 = other.df.copy()
if all(df1.columns != df2.columns):
return False
# make sure the columns are ordered similarly
df2 = df2[df1.columns]
return all(df1.sort() == df2.sort())
def __len__(self):
return len(self.df)
class ContinousModels(Models):
def __init__(self, data, reacID=None, index_col=None):
super(ContinousModels, self).__init__(data, reacID, index_col)
def drop_duplicates(self):
self.df['score'] = self.scores
self.df.drop_duplicates(inplace=True)
self.scores = self.df['score']
del self.df['score']
class FuzzyModels(Models):
def __init__(self, data, reacID=None, index_col=None):
super(FuzzyModels, self).__init__(data, reacID, index_col)
def copy(self):
return FuzzyModels(self)
class BooleanModels(Models):
"""Class to read and plot models as exported by CASPO or CellNOptR
Models contains dataframe with reactions as columns and models as rows.
For each reaction, we can then obtain the average paramters for a reaction.
In a boolean case, a Model stores a value made of 0/1
scores may be available. No sizes are stored. Sizes could be extracted easily
as sum over rows.
::
>>> from cno.core.models import Models
>>> m = Models()
>>> m.plot() # average model, whcih can be obtained with m.get_average_model()
>>> m.plot(model_number=0) # indices are m.df.index
>>> m.plot(model_number=0) # indices are m.df.index
.. note:: One difficulty is the way ANDs are coded in different software. In CASPO,
the AND gate is coded as "A+B=C". Note that internally we use ^ especially
in CNOGraph. Then, an AND edge is splitted in sub edges. so, A+B=C is made
of 3 edges A -> A+B=C , B -> A+B=C and A+B=C -> C. This explains the wierd
code in :meth:`cno.io.cnograph.plot`.
- plots average models with edges on/off
- plot of errobars on edges sorted by average presence
- plots heatmap of the models
"""
def __init__(self, data, reacID=None, index_col=None):
"""
if you have a first column, whihc is not a reaction, set index_col to 0
.. todo:: values are 0/1 since we have bit strings but could be anything in other
formalisms (e.g., ODE) how to handle those cases ?
:param dta: a filename with columns as the reacitons and rowss as
parameters for each reactions. Each row is therefore a model.
"""
super(BooleanModels, self).__init__(data, reacID, index_col)
def get_cv_model(self):
"""Returns the average coefficient of variation on each reaction"""
res = self.df.std(axis=0)/self.df.mean(axis=0)
res = res.fillna(0)
return res
def compute_average(self, model_number=None, tolerance=None):
"""Compute the average and update the cnograph accordingly
:param int model_number: model_number as shown by :attr:`df.index`
if not provided, the average is taken
"""
if model_number is None and tolerance is None:
model = self.get_average_model()
elif model_number == 'cv':
model = self.get_cv_model()
elif tolerance is not None:
model = self.get_average_model(max_score = self.scores.min() * (1.+tolerance))
if len(model) == 0:
raise ValueError('No model found within that tolerance')
else:
model = self.df.ix[model_number]
# This is to set the average and label and penwidth
# TODO: could be simplified using Reaction ?
for edge in self.cnograph.edges(data=True):
link = edge[2]['link']
if and_symbol not in edge[0] and and_symbol not in edge[1]:
if link == "-" :
name = "!" + edge[0] + "=" + edge[1]
else:
name = edge[0] + "=" + edge[1]
value = model[name]
elif and_symbol in edge[0]:
value = model[edge[0]]
elif and_symbol in edge[1]:
value = model[edge[1]]
else:
raise ValueError()
self.cnograph.edge[edge[0]][edge[1]]["label"] = precision(value)
self.cnograph.edge[edge[0]][edge[1]]["average"] = precision(value)
# if values are between 0 and 1
M = float(model.max())
self.cnograph.edge[edge[0]][edge[1]]["penwidth"] = precision(value, 2) * 5/M
def plot(self, model_number=None, cmap='gist_heat_r',
colorbar=True, tolerance=None, filename=None, **kargs):
"""Plot the average model"""
self.compute_average(model_number=model_number, tolerance=tolerance)
self.cnograph.plot(edge_attribute="average", cmap=cmap,
colorbar=colorbar, filename=filename, **kargs)
def errorbar(self, tolerance=1e8, errorbar=True):
"""Plot the average presence of reactions over all models"""
try:
df = self.df.ix[self.scores<=self.scores.min()*(1+tolerance)]
except:
df = self.df[(self.scores<=self.scores.min()*(1+tolerance)).values]
mu = df.mean()
mu.sort(inplace=True)
sigma = df.std()
pylab.clf()
X = range(0,len(mu.index))
if errorbar is True:
errorbar = 1
else:
errorbar = 0
pylab.errorbar(X, mu.values, yerr=sigma.ix[mu.index].values*errorbar,
marker='x', color='r', lw=0, elinewidth=2, ecolor='b')
pylab.xticks(X, mu.index, rotation=90)
pylab.title('')
pylab.grid()
pylab.ylim([-0.1, 1.1])
#pylab.xlim([-0.5, len(X)+.5])
pylab.tight_layout()
return df
def heatmap(self, num=1, transpose=False, cmap='gist_heat_r', heatmap_attr={}):
""" """
#df = self.get_average_models()
from biokit.viz.heatmap import Heatmap
if transpose:
df = self.df.transpose()
else:
df = self.df
h = Heatmap(df)
h.plot(cmap=cmap,num=num, **heatmap_attr)
return h
def __add__(self, other):
import pandas as pd
df = pd.concat([self.df, other.df])
df.drop_duplicates(inplace=True)
return Models(df)
def __str__(self):
txt = "Models contains {0} rows".format(len(self))
return txt
def copy(self):
return BooleanModels(self)
def _get_sizes(self):
return self.df.sum(axis=1)
sizes = property(_get_sizes)
def drop_duplicates(self):
self.df['score'] = self.scores
self.df.drop_duplicates(inplace=True)
self.scores = self.df['score']
del self.df['score']
def get_main_reactions(self, threshold=0.5):
reactions = list(self.df.columns[self.df.mean() > threshold])
reactions = [x.replace('+','^') for x in reactions]
return reactions
def get_consensus_model(self, threshold=0.5):
df = self.df.ix[self.scores<=self.scores.min()*(1.)]
reactions = list(df.mean()[df.mean() > threshold].index)
return reactions
def get_jaccard(self, progress=True):
import sklearn.metrics
N = len(self.df)
J = np.zeros((N,N))
from easydev import progress_bar
pb = progress_bar(N)
for ic, i in enumerate(self.df.index):
for jc, j in enumerate(self.df.index):
J[ic][jc] = sklearn.metrics.jaccard_similarity_score(self.df.ix[i], self.df.ix[j])
pb.animate(1+ic)
return J
class DTModels(BooleanModels):
def __init__(self, data, reacID=None, index_col=None):
super(DTModels, self).__init__(data, reacID, index_col)
def copy(self):
return DTModels(self)
class CompareTwoModels(object):
"""
"""
def __init__(self, m1, m2):
"""
:param m1: first model as a Pandas time series e.g. row of BooleanModels
:param m2: first model as a Pandas time series e.g. row of BooleanModels
:return:
from a models, m1 = pd.TimeSeries(models.df.ix[0], dtype=int)
m2 = pd.TimeSeries(models.df.ix[1], dtype=int)
"""
self.m1 = m1
self.m2 = m2
assert all(self.m1.index == self.m2.index) == True
self.midas = None
def get_intersection(self):
return self.m1[np.logical_and(self.m1, self.m2)]
def get_union(self):
return self.m1[np.logical_or(self.m1 , self.m2)]
def get_both(self):
return self.get_intersection()
def get_m1_only(self):
return self.m1[np.logical_and(self.m1==1, self.m2==0)]
def get_m2_only(self):
return self.m2[np.logical_and(self.m1==0, self.m2==1)]
def get_both_off(self):
return self.m2[np.logical_and(self.m1==0, self.m2==0)]
def plot_multigraph(self, cmap='jet'):
sys.stdout.write('plot_multigraph may not wrok as expected. Experimental')
from cno.io.multigraph import CNOGraphMultiEdges
#from cno import CNOGraph
from cno import Reaction
c = CNOGraphMultiEdges()
c.midas = self.midas
for reaction in self.get_both().index:
sys.stdout.write(str(reaction))
r = Reaction(reaction)
r.sort()
sys.stdout.write(str(c.reac2edges(r.name)))
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1,edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.1,
color='black', penwidth=6, label='both')
for reaction in self.get_m1_only().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.3,
label='m1', color='red', penwidth=3)
for reaction in self.get_m2_only().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.5,
label='m2', color='green', penwidth=3)
for reaction in self.get_both_off().index:
r = Reaction(reaction)
r.sort()
for this in c.reac2edges(r.name):
try:
edge1, edge2, link = this
except:
edge1, edge2 = this
link = "+"
c.add_edge(edge1, edge2, link=link, edgecolor=.9,
label='', arrowsize=0, color='gray', penwidth=0)
#c.plot(edge_attribute='edgecolor', cmap=cmap)
c.plot()
return c
class MultiModels(object):
pass
|
cellnopt/cellnopt
|
cno/core/models.py
|
Python
|
bsd-2-clause
| 17,116
|
import logging
l = logging.getLogger('simuvex.s_action')
import claripy
import functools
#pylint:disable=unidiomatic-typecheck
_noneset = frozenset()
def _raw_ast(a):
if type(a) is SimActionObject:
return a.ast
elif type(a) is dict:
return { k:_raw_ast(a[k]) for k in a }
elif type(a) in (tuple, list, set, frozenset):
return type(a)((_raw_ast(b) for b in a))
else:
return a
def _all_objects(a):
if type(a) is SimActionObject:
yield a
elif type(a) is dict:
for b in a.itervalues():
for o in _all_objects(b):
yield o
elif type(a) is (tuple, list, set, frozenset):
for b in a:
for o in _all_objects(b):
yield o
def ast_stripping_op(f, *args, **kwargs):
new_args = _raw_ast(args)
new_kwargs = _raw_ast(kwargs)
return f(*new_args, **new_kwargs)
def ast_preserving_op(f, *args, **kwargs):
tmp_deps = frozenset.union(_noneset, *(a.tmp_deps for a in _all_objects(args)))
reg_deps = frozenset.union(_noneset, *(a.reg_deps for a in _all_objects(args)))
a = ast_stripping_op(f, *args, **kwargs)
if isinstance(a, claripy.ast.Base):
return SimActionObject(a, reg_deps=reg_deps, tmp_deps=tmp_deps)
else:
return a
def ast_stripping_decorator(f):
@functools.wraps(f)
def ast_stripper(*args, **kwargs):
new_args = _raw_ast(args)
new_kwargs = _raw_ast(kwargs)
return f(*new_args, **new_kwargs)
return ast_stripper
class SimActionObject(object):
"""
A SimActionObject tracks an AST and its dependencies.
"""
def __init__(self, ast, reg_deps=None, tmp_deps=None):
if type(ast) is SimActionObject:
raise SimActionError("SimActionObject inception!!!")
self.ast = ast
self.reg_deps = _noneset if reg_deps is None else reg_deps
self.tmp_deps = _noneset if tmp_deps is None else tmp_deps
def __repr__(self):
return '<SAO {}>'.format(self.ast)
def __getstate__(self):
return self.ast, self.reg_deps, self.tmp_deps
def __setstate__(self, data):
self.ast, self.reg_deps, self.tmp_deps = data
def _preserving_unbound(self, f, *args, **kwargs):
return ast_preserving_op(f, *((self,) + tuple(args)), **kwargs)
def _preserving_bound(self, f, *args, **kwargs): #pylint:disable=no-self-use
return ast_preserving_op(f, *args, **kwargs)
def __getattr__(self, attr):
if attr == '__slots__':
raise AttributeError("not forwarding __slots__ to AST")
f = getattr(self.ast, attr)
if hasattr(f, '__call__'):
return functools.partial(self._preserving_bound, f)
elif isinstance(f, claripy.ast.Base):
return SimActionObject(f, reg_deps=self.reg_deps, tmp_deps=self.tmp_deps)
else:
return f
def __len__(self):
return len(self.ast)
def to_claripy(self):
return self.ast
def copy(self):
return SimActionObject(self.ast, self.reg_deps, self.tmp_deps)
#
# Overload the operators
#
def _operator(cls, op_name):
def wrapper(self, *args, **kwargs):
# TODO: don't always get from BV, we'll run into issues...
return self._preserving_unbound(getattr(claripy.ast.BV, op_name), *args, **kwargs)
wrapper.__name__ = op_name
setattr(cls, op_name, wrapper)
def make_methods():
for name in claripy.operations.expression_operations | { '__getitem__' } :
_operator(SimActionObject, name)
make_methods()
from .s_errors import SimActionError
|
chubbymaggie/simuvex
|
simuvex/s_action_object.py
|
Python
|
bsd-2-clause
| 3,613
|
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from app.myblog.models import Article
from django.db.models import Count, StdDev
from dss import Serializer
from app.blog_lab.proxy.huiyuan import play
class Command(BaseCommand):
def handle(self, *args, **options):
# art_list = Article.objects.all()
# # for itm in art_list:
# # print itm.caption__max
# print Serializer.serializer(art_list, except_attr=('content', 'caption', 'classification', 'publish'))
play()
|
madarou/angular-django
|
app/blog_lab/management/commands/play.py
|
Python
|
bsd-2-clause
| 541
|
from importlib import import_module
from ..exceptions import UnknownSource, SourceNotSupported
JSON_SCHEMA_NS = 'http://json-schema.org/draft-04/schema#'
SOURCES = {
'sqlite': 'prov_extractor.sources.sqlite',
'postgresql': 'prov_extractor.sources.postgresql',
'delimited': 'prov_extractor.sources.delimited',
'filesystem': 'prov_extractor.sources.filesystem',
'excel': 'prov_extractor.sources.excel',
'mongodb': 'prov_extractor.sources.mongodb',
'mysql': 'prov_extractor.sources.mysql',
'oracle': 'prov_extractor.sources.oracle',
'vcf': 'prov_extractor.sources.vcf',
'redcap-mysql': 'prov_extractor.sources.redcap_mysql',
'redcap-api': 'prov_extractor.sources.redcap_api',
'redcap-csv': 'prov_extractor.sources.redcap_csv',
'harvest': 'prov_extractor.sources.harvest',
'datadict': 'prov_extractor.sources.datadict',
'github-issues': 'prov_extractor.sources.github_issues',
'git': 'prov_extractor.sources.git',
}
SOURCE_ALIASES = {
'redcap': {
'source': 'redcap-mysql',
},
'csv': {
'source': 'delimited',
'options': {
'delimiter': ',',
}
},
'tab': {
'source': 'delimited',
'options': {
'delimiter': '\t',
}
}
}
_modules = {}
def get_module(name):
"Attempts to import a source by name."
if name in SOURCE_ALIASES:
name = SOURCE_ALIASES[name]['source']
if name in _modules:
return _modules[name]
module = SOURCES.get(name)
if not module:
raise UnknownSource(name)
try:
module = import_module(module)
except ImportError as e:
raise SourceNotSupported(str(e))
_modules[name] = module
return module
def get(name):
"Returns the client class of a source."
return get_module(name).Client
|
chop-dbhi/prov-extractor
|
prov_extractor/sources/__init__.py
|
Python
|
bsd-2-clause
| 1,847
|
from classifierBase import *
from PyQt4.QtGui import QInputDialog
#*******************************************************************************
# C l a s s i f i e r R a n d o m F o r e s t *
#*******************************************************************************
class ClassifierRandomForest(ClassifierBase):
#human readable information
name = "Random forest classifier"
description = "Random forest classifier with extensions"
author = "HCI, University of Heidelberg"
homepage = "http://hci.iwr.uni-heidelberg.de"
#minimum required isotropic context
#0 means pixel based classification
#-1 means whole dataset
minContext = 0
#The _total_ number of trees.
#For performance reason, the classifier is split up into self.numWorkers
#parts.
treeCount = 100
def __init__(self):
ClassifierBase.__init__(self)
@classmethod
def settings(cls):
(number, ok) = QInputDialog.getInt(None, "Random Forest parameters", "Number of trees", cls.treeCount, 20, 255)
if ok:
cls.treeCount = number
print "setting number of trees to", cls.treeCount
def train(self, features, labels, isInteractive):
assert self.numWorkers > 0, "Need at least one worker. Use setWorker() method..."
thisTreeCount = int(self.treeCount/self.numWorkers)
if(self.workerNumber == self.numWorkers-1):
thisTreeCount += int(self.treeCount % self.numWorkers)
#print "RandomForest training [%d of %d] with %d trees" % (self.workerNumber, self.numWorkers, thisTreeCount)
self.RF = None
if features is None:
return
if features.shape[0] != labels.shape[0]:
# #features != # labels"
return
if not labels.dtype == numpy.uint32:
labels = labels.astype(numpy.uint32)
if not features.dtype == numpy.float32:
features = features.astype(numpy.float32)
if labels.ndim == 1:
labels.shape = labels.shape + (1,)
self.unique_vals = numpy.unique(labels)
# Have to set this becauce the new rf dont set mtry properly by default
mtry = max(1,int(numpy.sqrt(features.shape[1]))+1)
self.RF = vigra.learning.RandomForest(treeCount=thisTreeCount)
oob = self.RF.learnRF(features, labels)
ClassifierBase.printLock.acquire()
print "Out-of-bag error %4.3f" % oob
ClassifierBase.printLock.release()
def predict(self, features):
#3d: check that only 1D data arrives here
if self.RF is not None and features is not None and len(self.unique_vals) > 1:
if not features.dtype == numpy.float32:
features = numpy.array(features, dtype=numpy.float32)
return self.RF.predictProbabilities(features)
else:
return None
def serialize(self, fileName, pathInFile, overwriteFlag=False):
# cannot serilaze into grp because can not pass h5py handle to vigra yet
# works only with new RF version
self.RF.writeHDF5(fileName, pathInFile, overwriteFlag)
@classmethod
def deserialize(cls, h5G):
"""FIXME: we do not load the complete random forest here, but require the user to re-train
after loading the project file. The only thing we do load is the total number of
trees (in a very hackish way)."""
thisForestPath = h5G.name
allForestsPath = thisForestPath[0:thisForestPath.rfind("/")]
if allForestsPath in h5G.file:
trees = h5G.file[allForestsPath].keys()
treeCount = 0
for t in trees:
#print "before"
#print allForestsPath+"/"+t+"/_options/tree_count_"
#print "after"
treeCount += h5G.file[allForestsPath+"/"+t+"/_options/tree_count_"][0]
print "total number of RandomForest trees = ", treeCount
ClassifierRandomForest.treeCount = treeCount
return 0
@classmethod
def loadRFfromFile(cls, fileName, pathInFile):
classifier = cls()
classifier.RF = vigra.learning.RandomForest(fileName, pathInFile)
classifier.treeCount = classifier.RF.treeCount()
classifier.unique_vals = range(classifier.RF.labelCount())
return classifier
|
ilastik/ilastik-0.5
|
ilastik/modules/classification/core/classifiers/classifierRandomForest.py
|
Python
|
bsd-2-clause
| 4,475
|
#!/usr/bin/env python
# Command line arguments
# 1: taxon.txt
# 2: directory in which to put taxonomy.tsv and synonyms.tsv
col = {"taxonID": 0,
"parentNameUsageID": 1,
"acceptedNameUsageID": 2,
"canonicalName": 3,
"taxonRank": 4,
"taxonomicStatus": 5,
"nameAccordingTo": 6}
not_doubtful = {
8407745: "Hierococcyx"
}
import sys, os, json
from collections import Counter
"""
ignore.txt should include a list of ids to ignore, all of their children
should also be ignored but do not need to be listed
"""
incertae_sedis_kingdom = 0
def process_gbif(inpath, outdir):
col_acceptedNameUsageID = col['acceptedNameUsageID']
col_taxonID = col['taxonID']
col_canonicalName = col['canonicalName']
col_nameAccordingTo = col['nameAccordingTo']
col_taxonomicStatus = col['taxonomicStatus']
col_taxonRank = col['taxonRank']
col_parentNameUsageID = col['parentNameUsageID']
to_ignore = [] # stack
to_ignore.append(incertae_sedis_kingdom) #kingdom incertae sedis
infile = open(inpath,"r")
outfile = open(os.path.join(outdir, "taxonomy.tsv"), "w")
outfilesy = open(os.path.join(outdir, "synonyms.tsv"), "w")
infile_taxon_count = 0
infile_synonym_count = 0
count = 0
bad_id = 0
no_parent = 0
parent ={} #key is taxon id, value is the parent
children ={} #key is taxon id, value is list of children (ids)
nm_storage = {} #key is taxon id, value is the name
nrank = {} #key is taxon id, value is rank
synnames = {} #key is synonym id, value is name
syntargets = {} #key is synonym id, value is taxon id of target
syntypes = {} #key is synonym id, value is synonym type
to_remove = [] #list of ids
paleos = [] #ids that come from paleodb
flushed_because_source = 0
print "taxa synonyms no_parent"
for row in infile:
fields = row.split('\t')
# For information on what information is in each column see
# meta.xml in the gbif distribution.
if fields[0] == 'taxonID': continue # header for in 2013
# acceptedNameUsageID
syn_target_id_string = fields[col_acceptedNameUsageID].strip()
synonymp = syn_target_id_string.isdigit()
if synonymp:
infile_synonym_count += 1
else:
infile_taxon_count += 1
id_string = fields[col_taxonID].strip()
if len(id_string) == 0 or not id_string.isdigit():
# Header line has "taxonID" here
bad_id += 1
continue
id = int(id_string)
name = fields[col_canonicalName].strip()
if name == '':
bad_id += 1
continue
source = fields[col_nameAccordingTo].strip()
tstatus = fields[col_taxonomicStatus].strip() # taxonomicStatus
# Filter out IRMNG and IPNI tips
if (("IRMNG Homonym" in source) or
("Interim Register of Marine" in source) or
("International Plant Names Index" in source) or
# Blah. See http://www.gbif.org/dataset/d9a4eedb-e985-4456-ad46-3df8472e00e8
(source == "d9a4eedb-e985-4456-ad46-3df8472e00e8")):
flushed_because_source += 1
if synonymp:
continue
else:
to_remove.append(id)
elif synonymp:
synnames[id] = name
syntargets[id] = int(syn_target_id_string)
syntypes[id] = tstatus # heterotypic synonym, etc.
continue
elif ("Paleobiology Database" in source) or (source == "c33ce2f2-c3cc-43a5-a380-fe4526d63650"):
paleos.append(id)
if tstatus == 'synonym' or (tstatus == 'doubtful' and not id in not_doubtful):
to_remove.append(id)
continue
if tstatus != 'accepted': # doesn't happen
print id, name, tstatus, source
rank = fields[col_taxonRank].strip()
if rank == "form" or rank == "variety" or rank == "subspecies" or rank == "infraspecificname":
to_ignore.append(id)
parent_id_string = fields[col_parentNameUsageID].strip()
if len(parent_id_string) == 0 and rank != 'kingdom':
no_parent += 1
continue
# Past all the filters, time to store
nm_storage[id] = name
nrank[id] = rank
if len(parent_id_string) > 0:
parent_id = int(parent_id_string)
parent[id] = parent_id
if parent_id not in children:
children[parent_id] = [id]
else:
children[parent_id].append(id)
count += 1
if count % 100000 == 0:
print count, len(synnames), no_parent
infile.close()
print ('%s taxa, %s synonyms\n' % (infile_taxon_count, infile_synonym_count))
print ('%s bad id; %s no parent id; %s synonyms; %s bad source' %
(bad_id, no_parent, len(synnames), flushed_because_source))
# Parent/child homonyms now get fixed by smasher
# Flush terminal taxa from IRMNG and IPNI (OTT picks up IRMNG separately)
count = 0
for id in to_remove:
if (not id in children): # and id in nrank and nrank[id] != "species":
if id in nm_storage:
del nm_storage[id]
# should remove from children[parent[id]] too
count += 1
print "tips removed (IRMNG and IPNI):", count
# Put parentless taxa into the ignore list.
# This isn't really needed any more; smasher can cope with multiple roots.
count = 0
for id in nm_storage:
if id in parent and parent[id] not in nm_storage:
count += 1
if parent[id] != 0:
to_ignore.append(id)
if count % 1000 == 0:
print "example orphan ",id,nm_storage[id]
print "orphans to be pruned: ", count
# Now delete the taxa-to-be-ignored and all of their descendants.
if len(to_ignore) > 0:
print 'pruning %s taxa' % len(to_ignore)
seen = {}
stack = to_ignore
while len(stack) != 0:
curid = stack.pop()
if curid in seen:
continue
seen[curid] = True
if curid in children:
for id in children[curid]:
stack.append(id)
for id in seen:
if id in nm_storage:
del nm_storage[id]
"""
output the id parentid name rank
"""
print "writing %s taxa" % len(nm_storage)
outfile.write("uid\t|\tparent_uid\t|\tname\t|\trank\t|\t\n")
count = 0
for id in nm_storage:
parent_id = ""
if id == incertae_sedis_kingdom:
print "kingdom incertae sedis should have been deleted by now"
elif id in parent:
parent_id = str(parent[id])
elif nrank[id] == 'kingdom':
parent_id = "0"
outfile.write("%s\t|\t%s\t|\t%s\t|\t%s\t|\t\n" %
(id, parent_id, nm_storage[id], nrank[id]))
count += 1
if count % 100000 == 0:
print count
outfile.write("0\t|\t\t|\tlife\t|\t\t|\t\n")
outfile.close()
print "writing %s synonyms" % len(synnames)
outfilesy.write('uid\t|\tname\t|\ttype\t|\t\n')
for id in synnames:
target = syntargets[id] # taxon id of target (int)
if target in nm_storage:
outfilesy.write('%s\t|\t%s\t|\t%s\t|\t\n' %
(target, synnames[id], syntypes[id]))
outfilesy.close()
print 'writing %s paleodb ids' % len(paleos)
paleofile = open(os.path.join(outdir, 'paleo.tsv'), 'w')
for id in paleos:
paleofile.write(('%s\n' % id))
paleofile.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print "** Arg count"
print "python process_ottol_taxonomy.py taxa.txt ignore.txt outfile"
sys.exit(0)
inpath = sys.argv[1]
outdir = sys.argv[2]
process_gbif(inpath, outdir)
|
OpenTreeOfLife/reference-taxonomy
|
import_scripts/gbif/process_gbif_taxonomy.py
|
Python
|
bsd-2-clause
| 8,023
|
# -*- coding: UTF-8 -*-
# Copyright 2012 Luc Saffre
# License: BSD (see file COPYING for details)
from lino.utils.instantiator import Instantiator, i2d
from lino.core.utils import resolve_model
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from lino.api import dd
def objects():
mailType = Instantiator('notes.NoteType').build
yield mailType(**dd.babel_values('name',
en="Enrolment",
fr=u'Inscription', de=u"Einschreibeformular"))
yield mailType(**dd.babel_values('name',
en="Timetable",
fr=u'Horaire', de=u"Stundenplan"))
yield mailType(**dd.babel_values('name',
en="Letter",
fr=u'Lettre', de=u"Brief"))
|
khchine5/book
|
lino_book/projects/homeworkschool/fixtures/std.py
|
Python
|
bsd-2-clause
| 876
|
__author__ = 'aj'
|
cfrs2005/flask_blog
|
lib/__init__.py
|
Python
|
bsd-2-clause
| 18
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import astropy.wcs.utils
from astropy.wcs import WCSSUB_CELESTIAL
from .frames import *
__all__ = ['solar_wcs_frame_mapping']
def solar_wcs_frame_mapping(wcs):
"""
This function registers the coordinates frames to their FITS-WCS coordinate
type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry.
"""
# SunPy Map adds some extra attributes to the WCS object.
# We check for them here, and default to None.
dateobs = wcs.wcs.dateobs if wcs.wcs.dateobs else None
hglon = None
hglat = None
dsun = None
if hasattr(wcs, 'heliographic_longitude'):
hglon = wcs.heliographic_longitude
if hasattr(wcs, 'heliographic_latitude'):
hglat = wcs.heliographic_latitude
if hasattr(wcs, 'dsun'):
dsun = wcs.dsun
# First we try the Celestial sub, which rectifies the order.
# It will return any thing matching ??LN*, ??LT*
wcss = wcs.sub([WCSSUB_CELESTIAL])
# If the SUB works, use it.
if wcss.naxis == 2:
wcs = wcss
xcoord = wcs.wcs.ctype[0][0:4]
ycoord = wcs.wcs.ctype[1][0:4]
if xcoord == 'HPLN' and ycoord == 'HPLT':
return Helioprojective(dateobs=dateobs, L0=hglon, B0=hglat, D0=dsun)
if xcoord == 'HGLN' and ycoord == 'HGLT':
return HeliographicStonyhurst(dateobs=dateobs)
if xcoord == 'CRLN' and ycoord == 'CRLT':
return HeliographicCarrington(dateobs=dateobs)
if xcoord == 'SOLX' and ycoord == 'SOLY':
return Heliocentric(dateobs=dateobs, L0=hglon, B0=hglat, D0=dsun)
astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
# The following is a patch for wcsaxes 0.6 and lower:
try:
import wcsaxes.wcs_utils
if hasattr(wcsaxes.wcs_utils, 'WCS_FRAME_MAPPINGS'):
wcsaxes.wcs_utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping])
except ImportError:
pass
# Now we try for heliocentric without the sub.
|
Alex-Ian-Hamilton/sunpy
|
sunpy/coordinates/wcs_utils.py
|
Python
|
bsd-2-clause
| 2,000
|
#!/usr/bin/env python
"""Test reading GO IDs from a file."""
import os
import sys
from goatools.base import get_godag
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.grouper.grprdflts import GrouperDflts
from goatools.grouper.hdrgos import HdrgosSections
from goatools.grouper.grprobj import Grouper
from goatools.grouper.read_goids import read_sections
from goatools.grouper.wr_sections import WrSectionsTxt
from goatools.grouper.wr_sections import WrSectionsPy
# from goatools.cli.wr_sections import
from goatools.test_data.gjoneska_goea_consistent_increase import goea_results
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_wr_sections_all():
"""Test that all sections files generated by wr_sections have the same content."""
f_sec_rd = os.path.join(REPO, "data/gjoneska_pfenning/sections_in.txt")
f_sec_wr = os.path.join(REPO, "tmp_test_sections_out.txt")
# Travis-CI path is cwd
f_sec_py = os.path.join(REPO, "tmp_test_sections.py")
# f_sec_mod = "tmp_test_sections"
# Read user GO IDs. Setup to write sections text file and Python file
usrgos = [getattr(nt, 'GO') for nt in goea_results]
sec_rd = _read_sections(f_sec_rd)
# Do preliminaries
godag = _get_godag()
gosubdag = GoSubDag(usrgos, godag, relationships=True, tcntobj=None)
grprdflt = _get_grprdflt(gosubdag)
# Exclude ungrouped "Misc." section of sections var(sec_rd)
hdrobj = HdrgosSections(gosubdag, grprdflt.hdrgos_dflt, sec_rd[:-1])
assert sec_rd[-1][0] == hdrobj.secdflt, sec_rd[-1][0]
grprobj = Grouper("test", usrgos, hdrobj, gosubdag)
# Create text and Python sections files
objsecwr = WrSectionsTxt(grprobj)
objsecwr.wr_txt_section_hdrgos(os.path.join(REPO, f_sec_wr))
objsecpy = WrSectionsPy(grprobj)
objsecpy.wr_py_sections(os.path.join(REPO, f_sec_py), sec_rd, doc=godag.version)
# Read text and Python sections files
sec_wr = _read_sections(f_sec_wr)
sec_py = _read_sections(f_sec_py)
# sec_mod = _read_sections(f_sec_mod)
# _chk_sections(sec_py, sec_mod)
# _chk_sections(sec_wr, sec_mod, hdrobj.secdflt)
def _get_godag():
"""Get GO DAG."""
fin = os.path.join(REPO, 'go-basic.obo')
return get_godag(fin, prt=None, loading_bar=False, optional_attrs=['relationship'])
def _get_grprdflt(gosubdag=None):
"""Get Grouper defaults."""
fin_slim = os.path.join(REPO, 'goslim_generic.obo')
return GrouperDflts(gosubdag, fin_slim)
def test_wr_sections_txt():
"""Group depth-02 GO terms under their most specific depth-01 GO parent(s)."""
# Get GOs to be grouped
usrgos = [getattr(nt, 'GO') for nt in goea_results]
# Read OBO files once to save time
grprdflt = _get_grprdflt()
# ------------------------------------------------------------------
# Print usrgos in txt (Do not use sections containing hdrgos)
# ------------------------------------------------------------------
# Show GO grouping hdrgos and usrgos to show how usrgos are grouped
_wr_sections_txt("a_sec0_hdr1.txt", usrgos, sections=None, grprdflt=grprdflt)
# ------------------------------------------------------------------
# Print usrgos in txt using sections containing hdrgos
# ------------------------------------------------------------------
sec1 = _read_sections("./data/gjoneska_pfenning/sections_in.txt")
_wr_sections_txt("a_sec1_hdr1.txt", usrgos, sec1, grprdflt=grprdflt)
# ------------------------------------------------------------------
sec2a = _read_sections("goatools/test_data/sections/gjoneska_pfenning.py")
_wr_sections_txt("b_sec1_hdr1.txt", usrgos, sec2a, grprdflt=grprdflt)
sec2b = _read_sections("goatools.test_data.sections.gjoneska_pfenning")
_wr_sections_txt("c_sec1_hdr1.txt", usrgos, sec2b, grprdflt=grprdflt)
_chk_sections(sec2a, sec2b)
def _chk_sections(sec_a, sec_b, hdrgos_dflt=None):
"""Do the two sections variables contain the same data?"""
if hdrgos_dflt:
assert sec_a[-1][0] == hdrgos_dflt, "EXP({}) ACT({})".format(hdrgos_dflt, sec_a[-1][0])
sec_a = sec_a[:-1]
print("EXP({}) ACT({})".format(hdrgos_dflt, sec_b[-1][0]))
# sec_b = sec_b[:-1]
assert len(sec_a) == len(sec_b), "LENGTH MISMATCH: {A} != {B}".format(
A=len(sec_a), B=len(sec_b))
for (name_a, gos_a), (name_b, gos_b) in zip(sec_a, sec_b):
assert name_a == name_b, "NAME MISMATCH: {A} != {B}".format(A=name_a, B=name_b)
assert gos_a == gos_b, "{NM} GO IDs MISMATCH: {A} != {B}".format(
NM=name_a, A=gos_a, B=gos_b)
def _read_sections(sec):
"""Get sections variable from file."""
if '/' in sec:
sec = os.path.join(REPO, sec)
var = read_sections(sec)
assert var, "EMPTY SECTIONS FILE({})".format(sec)
return var
def _wr_sections_txt(fout_txt, usrgos, sections, grprdflt):
"""Given a list of usrgos and sections, write text file."""
try:
hdrobj = HdrgosSections(grprdflt.gosubdag, grprdflt.hdrgos_dflt, sections=sections)
grprobj = Grouper(fout_txt, usrgos, hdrobj, grprdflt.gosubdag, go2nt=None)
full_txt = os.path.join(REPO, fout_txt)
WrSectionsTxt(grprobj).wr_txt_section_hdrgos(full_txt, sortby=None, prt_section=True)
assert os.path.exists(full_txt)
except RuntimeError as inst:
sys.stdout.write("\n **FATAL: {MSG}\n\n".format(MSG=str(inst)))
if __name__ == '__main__':
test_wr_sections_all()
test_wr_sections_txt()
|
tanghaibao/goatools
|
tests/test_wr_sections_txt.py
|
Python
|
bsd-2-clause
| 5,514
|
"""
Todo: I don't think this DHT knits data storage around broken nodes but the fallback on the server should be a temporary work around.
"""
import sys, os
import json
import random
import socket
import select
import hashlib
import re
try:
import socketserver
except:
import SocketServer
socketserver = SocketServer
import threading
import time
import uuid
import hashlib
from .bucketset import BucketSet
from .hashing import hash_function, random_id, id_from_addr
from .peer import Peer
from .shortlist import Shortlist
from .proof_of_work import ProofOfWork
k = 20
alpha = 3
id_bits = 256
iteration_sleep = 1
debug = 1
class DHTRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
try:
with self.server.send_lock:
#Test alive for nodes in routing table.
main = self.server.dht
elapsed = time.time() - main.last_ping
if elapsed >= main.ping_interval:
for freshness in main.buckets.node_freshness:
#Check freshness.
elapsed = time.time() - freshness["timestamp"]
if elapsed < main.ping_interval:
break
#Store pending ping.
bucket_no = freshness["bucket_no"]
bucket = main.buckets.buckets[bucket_no]
node = freshness["node"]
magic = hashlib.sha256(str(uuid.uuid4()).encode("ascii")).hexdigest()
freshness["timestamp"] = time.time()
main.ping_ids[magic] = {
"node": node,
"timestamp": time.time(),
"bucket_no": bucket_no,
"freshness": freshness
}
#Send ping.
message = {
"message_type": "ping",
"magic": magic
}
peer = Peer(node[0], node[1], node[2])
peer._sendmessage(message, self.server.socket, peer_id=peer.id)
#Indicate freshness in ordering.
del main.buckets.node_freshness[0]
main.buckets.node_freshness.append(freshness)
break
#Refresh last ping.
main.last_ping = time.time()
#Record expired pings.
expired = []
for magic in list(main.ping_ids):
ping = main.ping_ids[magic]
elapsed = time.time() - ping["timestamp"]
if elapsed >= main.ping_expiry:
expired.append(magic)
#Timeout pending pings and remove old routing entries.
for magic in list(set(expired)):
try:
bucket_no = main.ping_ids[magic]["bucket_no"]
node = main.ping_ids[magic]["node"]
"""
Todo: there was an error here where the node wasn't found in the bucket. Not sure what could be causing this but this is a simple work-around.
After investigation: I think the problem is that when buckets get full the end node gets popped to add room. This could cause a recent node entry to no longer be available in the table when the line bellow is executed to try remove it.
Another explanation is multiple outstanding pings for the same node?
"""
if node in main.buckets.buckets[bucket_no]:
main.buckets.buckets[bucket_no].remove(node)
#More cleanup stuff so new nodes can be added.
host, port, id = node
if host in main.buckets.seen_ips:
if port in main.buckets.seen_ips[host]:
main.buckets.seen_ips[host].remove(port)
if not len(main.buckets.seen_ips[host]):
del main.buckets.seen_ips[host]
if id in main.buckets.seen_ids:
del main.buckets.seen_ids[id]
#Added a check here just in case.
freshness = main.ping_ids[magic]["freshness"]
if freshness in main.buckets.node_freshness:
main.buckets.node_freshness.remove(freshness)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno, str(e))
finally:
del main.ping_ids[magic]
#Check for expired keys.
if main.store_expiry:
#Time to run check again?
elapsed = time.time() - main.last_store_check
if elapsed >= main.store_check_interval:
#Record expired keys.
expired = []
for key in list(main.data):
value = main.data[key]
elapsed = time.time() - value["timestamp"]
if elapsed >= main.store_expiry:
expired.append(key)
#Timeout expired keys.
for key in list(set(expired)):
del main.data[key]
#Reset last_store_check.
main.last_store_check = time.time()
#Handle replies and requests.
message = json.loads(self.request[0].decode("utf-8").strip())
message_type = message["message_type"]
if debug:
print(message)
if message_type == "ping":
self.handle_ping(message)
elif message_type == "pong":
self.handle_pong(message)
elif message_type == "find_node":
self.handle_find(message)
elif message_type == "find_value":
self.handle_find(message, find_value=True)
elif message_type == "found_nodes":
self.handle_found_nodes(message)
elif message_type == "found_value":
self.handle_found_value(message)
elif message_type == "store":
self.handle_store(message)
elif message_type == "push":
self.handle_push(message)
client_host, client_port = self.client_address
peer_id = message["peer_id"]
new_peer = Peer(client_host, client_port, peer_id)
self.server.dht.buckets.insert(new_peer)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno, str(e))
return
def handle_ping(self, message):
client_host, client_port = self.client_address
id = message["peer_id"]
peer = Peer(client_host, client_port, id)
peer.pong(magic=message["magic"], socket=self.server.socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_pong(self, message):
#Is this a valid nonce?
main = self.server.dht
magic = message["magic"]
if magic not in main.ping_ids:
return
#Has the right node replied?
client_host, client_port = self.client_address
id = message["peer_id"]
peer = Peer(client_host, client_port, id)
astriple = peer.astriple()
if main.ping_ids[magic]["node"] != astriple:
return
#Remove pending ping.
del main.ping_ids[magic]
def handle_find(self, message, find_value=False):
key = message["id"]
id = message["peer_id"]
client_host, client_port = self.client_address
peer = Peer(client_host, client_port, id)
response_socket = self.request[1]
if find_value and (key in self.server.dht.data):
value = self.server.dht.data[key]
peer.found_value(id, value, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
else:
nearest_nodes = self.server.dht.buckets.nearest_nodes(id)
if not nearest_nodes:
nearest_nodes.append(self.server.dht.peer)
nearest_nodes = [nearest_peer.astriple() for nearest_peer in nearest_nodes]
peer.found_nodes(id, nearest_nodes, message["rpc_id"], socket=response_socket, peer_id=self.server.dht.peer.id, lock=self.server.send_lock)
def handle_found_nodes(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
del self.server.dht.rpc_ids[rpc_id]
nearest_nodes = [Peer(*peer) for peer in message["nearest_nodes"]]
shortlist.update(nearest_nodes)
def handle_found_value(self, message):
rpc_id = message["rpc_id"]
shortlist = self.server.dht.rpc_ids[rpc_id]
#Verify key is correct.
expected_key = hash_function(message["value"]["id"].encode("ascii") + message["value"]["content"].encode("ascii"))
if shortlist.key != expected_key:
return
del self.server.dht.rpc_ids[rpc_id]
shortlist.set_complete(message["value"])
def handle_store(self, message):
key = message["id"]
#Check message hasn't expired.
if self.server.dht.store_expiry:
elapsed = time.time() - message["value"]["timestamp"]
if elapsed >= self.server.dht.store_expiry:
return
#Future timestamps are invalid.
if elapsed < 0:
return
#Verify if message exists.
if key in self.server.dht.data:
already_exists = 1
return
else:
already_exists = 0
#Check key is correct.
expected_key = hash_function(message["value"]["id"].encode("ascii") + message["value"]["content"].encode("ascii"))
#Verify key is correct.
if key != expected_key:
return
#Content is too big.
if len(message["value"]["content"]) > self.server.dht.max_store_size:
return
#Check proof of work.
nonce = message["value"]["pow"]
message["value"]["pow"] = self.server.dht.pow_placeholder
if not self.server.dht.proof_of_work.is_valid(self.server.dht.value_to_str(message["value"]), nonce, self.server.dht.store_expiry):
return
message["value"]["pow"] = nonce
self.server.dht.data[key] = message["value"]
def handle_push(self, message):
pass
class DHTServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
def __init__(self, host_address, handler_cls):
socketserver.UDPServer.__init__(self, host_address, handler_cls)
self.send_lock = threading.Lock()
class DHT(object):
def __init__(self, host, port, key, id=None, boot_host=None, boot_port=None, wan_ip=None):
#Send node pings to least fresh node every n seconds.
self.ping_interval = 10
#Time to reply to a ping in seconds.
self.ping_expiry = 10
#How long to store keys for in seconds.
#Zero for no limit.
self.store_expiry = (60 * 60) * 12
#How often in seconds to check for expired keys.
self.store_check_interval = 1 * 60
#How often to broadcast which bind port we've taken.
self.broadcast_interval = 1
#Max amount of data per key - important field.
self.max_store_size = 1024 * 5
#Survey network for active DHT instances.
self.broadcast_port = 31337
broadcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
broadcast.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
broadcast.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
broadcast.bind(('', self.broadcast_port))
broadcast.setblocking(0)
#Wait for reply.
waited = 0
sleep_interval = 1
observed_ports = []
while waited < (self.broadcast_interval * 2) + 1:
waited += sleep_interval
r, w, e = select.select([broadcast], [], [], 0)
for s in r:
msg = s.recv(1024)
msg = msg.decode("utf-8")
ret = re.findall("^SPYDHT BIND ([0-9]+)$", msg)
if ret:
observed_port, = ret
observed_port = int(observed_port)
if observed_port not in observed_ports:
observed_ports.append(observed_port)
time.sleep(sleep_interval)
#Are there any valid ports left?
self.valid_bind_ports = [31000, 31001] #Per LAN.
allowed_ports = self.valid_bind_ports.copy()
for observed_port in observed_ports:
allowed_ports.remove(observed_port)
if not len(allowed_ports):
raise Exception("Maximum SPYDHT instances for this LAN exceeded! Try closing some instances of this software.")
#Indicate to LAN that this port is now reserved.
self.port = allowed_ports[0]
def broadcast_loop():
while 1:
msg = "SPYDHT BIND %s" % (str(self.port))
msg = msg.encode("ascii")
broadcast.sendto(msg, ('255.255.255.255', self.broadcast_port))
time.sleep(self.broadcast_interval)
self.broadcast_thread = threading.Thread(target=broadcast_loop)
self.broadcast_thread.start()
#Generic init.
self.wan_ip = wan_ip
if self.wan_ip == None:
raise Exception("WAN IP required.")
self.my_key = key
if not id:
id = id_from_addr(self.wan_ip, self.port)
self.last_ping = time.time()
self.ping_ids = {}
self.proof_of_work = ProofOfWork()
self.pow_placeholder = "ProleteR"
self.last_store_check = time.time()
self.peer = Peer(str(host), self.port, id)
self.data = {}
self.buckets = BucketSet(k, id_bits, self.peer.id, self.valid_bind_ports)
self.rpc_ids = {} # should probably have a lock for this
self.server = DHTServer(self.peer.address(), DHTRequestHandler)
self.server.dht = self
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.boot_peer = None
self.bootstrap(str(boot_host), boot_port)
def value_to_str(self, value):
s = str(value["id"])
s += str(value["content"])
s += str(value["timestamp"])
s += str(value["pow"])
return s
def iterative_find_nodes(self, key, boot_peer=None):
shortlist = Shortlist(k, key, self)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
if boot_peer:
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
#Because UDP is unreliable and we really need a response.
for i in range(0, 2):
boot_peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id)
time.sleep(1)
max_iterations = k * id_bits
iterations = 0
while ((not shortlist.complete()) or boot_peer) and iterations < max_iterations:
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_node(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) ######
iterations += 1
time.sleep(iteration_sleep)
boot_peer = None
return shortlist.results()
def iterative_find_value(self, key):
shortlist = Shortlist(k, key, self)
shortlist.update(self.buckets.nearest_nodes(key, limit=alpha))
max_iterations = k * id_bits
iterations = 0
while not shortlist.complete() and iterations < max_iterations:
nearest_nodes = shortlist.get_next_iteration(alpha)
for peer in nearest_nodes:
shortlist.mark(peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
peer.find_value(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id) #####
iterations += 1
time.sleep(iteration_sleep)
return shortlist.completion_result()
def bootstrap(self, boot_host, boot_port):
if boot_host and boot_port:
self.boot_peer = Peer(boot_host, boot_port, 0)
self.iterative_find_nodes(self.peer.id, boot_peer=self.boot_peer)
def __getitem__(self, key, bypass=5):
hashed_key = int(key, 16)
if hashed_key in self.data:
return self.data[hashed_key]["content"]
result = self.iterative_find_value(hashed_key)
if result:
return result["content"]
#Needed because UDP is unreliable and order isn't guaranteed.
if bypass != 0:
time.sleep(1)
return self.__getitem__(key, bypass - 1)
else:
#Fallback on asking the boot node.
shortlist = Shortlist(k, key, self)
shortlist.update([self.boot_peer])
shortlist.mark(self.boot_peer)
rpc_id = random.getrandbits(id_bits)
self.rpc_ids[rpc_id] = shortlist
self.boot_peer.find_value(key, rpc_id, socket=self.server.socket, peer_id=self.peer.id)
result = shortlist.completion_result()
if result:
return result["content"]
raise KeyError
def __setitem__(self, key, content):
content = str(content)
if type(key) != dict:
expected_key = hash_function(key.encode("ascii") + content.encode("ascii"))
hashed_key = expected_key
old_key = 0
else:
temp = key
key = temp["id"]
hashed_key = temp["old_key"]
expected_key = hash_function(key.encode("ascii") + content.encode("ascii"))
old_key = 1
nearest_nodes = self.iterative_find_nodes(hashed_key)
value = {
"id": key,
"timestamp": time.time(),
"content": content,
"pow": self.pow_placeholder
}
#Calculate proof of work.
nonce = self.proof_of_work.calculate(self.value_to_str(value), self.store_expiry)
value["pow"] = nonce
if not nearest_nodes:
#Update and delete.
if old_key:
if hashed_key in self.data:
del self.data[hashed_key]
hashed_key = expected_key
self.data[hashed_key] = value
"""
There's always at least one copy on the boot node so the data is always available. This isn't exactly distributed or in the spirit of a DHT but it will at least ensure more reliability in the face of churn.
"""
if self.boot_peer != None:
self.boot_peer.store(hashed_key, value, socket=self.server.socket, peer_id=self.peer.id)
for node in nearest_nodes:
node.store(hashed_key, value, socket=self.server.socket, peer_id=self.peer.id)
def tick():
pass
|
robertsdotpm/spydht
|
spydht/spydht.py
|
Python
|
bsd-2-clause
| 20,123
|
"""Compilation database module
"""
import clang.cindex
import logging
import os
import sys
# generator
def compile_commands(dirname):
assert(dirname and os.path.isdir(dirname))
compdb = clang.cindex.CompilationDatabase.fromDirectory(dirname)
yield compdb.getAllCompileCommands()
def get_all_compile_commands(dirname):
"""
Get an iterable object of each compile command having (dirname, arguments)
"""
assert(dirname and os.path.isdir(dirname))
for cmds in compile_commands(dirname):
for cmd in cmds:
yield (cmd.directory, ' '.join([arg for arg in cmd.arguments]))
if __name__ == '__main__':
libclang_set_library_file()
logging.basicConfig(level=logging.INFO)
dirname = sys.argv[1]
assert(dirname)
import pprint
pp = pprint.PrettyPrinter(indent=4)
for direc, arg in get_all_compile_commands(dirname):
print('{} => {}'.format(direc, arg))
|
inlinechan/stags
|
stags/compdb.py
|
Python
|
bsd-2-clause
| 933
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import doctest
import os.path
import pkg_resources
import random
import re
import sys
import tempfile
import unittest
import trac
import trac.tests.compat
from trac import util
from trac.util.tests import concurrency, datefmt, presentation, text, \
translation, html
class AtomicFileTestCase(unittest.TestCase):
def setUp(self):
self.path = os.path.join(tempfile.gettempdir(), 'trac-tempfile')
def tearDown(self):
try:
os.unlink(self.path)
except OSError:
pass
def test_non_existing(self):
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertTrue(f.closed)
self.assertEqual('test content', util.read_file(self.path))
def test_existing(self):
util.create_file(self.path, 'Some content')
self.assertEqual('Some content', util.read_file(self.path))
with util.AtomicFile(self.path) as f:
f.write('Some new content')
self.assertTrue(f.closed)
self.assertEqual('Some new content', util.read_file(self.path))
if util.can_rename_open_file:
def test_existing_open_for_reading(self):
util.create_file(self.path, 'Initial file content')
self.assertEqual('Initial file content', util.read_file(self.path))
with open(self.path) as rf:
with util.AtomicFile(self.path) as f:
f.write('Replaced content')
self.assertTrue(rf.closed)
self.assertTrue(f.closed)
self.assertEqual('Replaced content', util.read_file(self.path))
# FIXME: It is currently not possible to make this test pass on all
# platforms and with all locales. Typically, it will fail on Linux with
# LC_ALL=C.
# Python 3 adds sys.setfilesystemencoding(), which could be used here
# to remove the dependency on the locale. So the test is disabled until
# we require Python 3.
def _test_unicode_path(self):
self.path = os.path.join(tempfile.gettempdir(), u'träc-témpfilè')
with util.AtomicFile(self.path) as f:
f.write('test content')
self.assertTrue(f.closed)
self.assertEqual('test content', util.read_file(self.path))
class PathTestCase(unittest.TestCase):
def assert_below(self, path, parent):
self.assertTrue(util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def assert_not_below(self, path, parent):
self.assertFalse(util.is_path_below(path.replace('/', os.sep),
parent.replace('/', os.sep)))
def test_is_path_below(self):
self.assert_below('/svn/project1', '/svn/project1')
self.assert_below('/svn/project1/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/repos', '/svn/project1')
self.assert_below('/svn/project1/sub/../repos', '/svn/project1')
self.assert_not_below('/svn/project2/repos', '/svn/project1')
self.assert_not_below('/svn/project2/sub/repos', '/svn/project1')
self.assert_not_below('/svn/project1/../project2/repos',
'/svn/project1')
self.assertTrue(util.is_path_below('repos', os.path.join(os.getcwd())))
self.assertFalse(util.is_path_below('../sub/repos',
os.path.join(os.getcwd())))
class RandomTestCase(unittest.TestCase):
def setUp(self):
self.state = random.getstate()
def tearDown(self):
random.setstate(self.state)
def test_urandom(self):
"""urandom() returns random bytes"""
for i in xrange(129):
self.assertEqual(i, len(util.urandom(i)))
# For a large enough sample, each value should appear at least once
entropy = util.urandom(65536)
values = set(ord(c) for c in entropy)
self.assertEqual(256, len(values))
def test_hex_entropy(self):
"""hex_entropy() returns random hex digits"""
hex_digits = set('0123456789abcdef')
for i in xrange(129):
entropy = util.hex_entropy(i)
self.assertEqual(i, len(entropy))
self.assertEqual(set(), set(entropy) - hex_digits)
def test_hex_entropy_global_state(self):
"""hex_entropy() not affected by global random generator state"""
random.seed(0)
data = util.hex_entropy(64)
random.seed(0)
self.assertNotEqual(data, util.hex_entropy(64))
class ContentDispositionTestCase(unittest.TestCase):
def test_filename(self):
self.assertEqual('attachment; filename=myfile.txt',
util.content_disposition('attachment', 'myfile.txt'))
self.assertEqual('attachment; filename=a%20file.txt',
util.content_disposition('attachment', 'a file.txt'))
def test_no_filename(self):
self.assertEqual('inline', util.content_disposition('inline'))
self.assertEqual('attachment', util.content_disposition('attachment'))
def test_no_type(self):
self.assertEqual('filename=myfile.txt',
util.content_disposition(filename='myfile.txt'))
self.assertEqual('filename=a%20file.txt',
util.content_disposition(filename='a file.txt'))
class SafeReprTestCase(unittest.TestCase):
def test_normal_repr(self):
for x in ([1, 2, 3], "été", u"été"):
self.assertEqual(repr(x), util.safe_repr(x))
def test_buggy_repr(self):
class eh_ix(object):
def __repr__(self):
return 1 + "2"
self.assertRaises(Exception, repr, eh_ix())
sr = util.safe_repr(eh_ix())
sr = re.sub('[A-F0-9]{4,}', 'ADDRESS', sr)
sr = re.sub(r'__main__|trac\.util\.tests', 'MODULE', sr)
self.assertEqual("<MODULE.eh_ix object at 0xADDRESS "
"(repr() error: TypeError: unsupported operand "
"type(s) for +: 'int' and 'str')>", sr)
class SetuptoolsUtilsTestCase(unittest.TestCase):
def test_get_module_path(self):
self.assertEqual(util.get_module_path(trac),
util.get_module_path(util))
def test_get_pkginfo_trac(self):
pkginfo = util.get_pkginfo(trac)
self.assertEqual(trac.__version__, pkginfo.get('version'))
self.assertNotEqual({}, pkginfo)
def test_get_pkginfo_non_toplevel(self):
from trac import core
import tracopt
pkginfo = util.get_pkginfo(trac)
self.assertEqual(pkginfo, util.get_pkginfo(util))
self.assertEqual(pkginfo, util.get_pkginfo(core))
self.assertEqual(pkginfo, util.get_pkginfo(tracopt))
def test_get_pkginfo_genshi(self):
try:
import genshi
import genshi.core
dist = pkg_resources.get_distribution('Genshi')
except:
pass
else:
pkginfo = util.get_pkginfo(genshi)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(genshi.core))
def test_get_pkginfo_babel(self):
try:
import babel
import babel.core
dist = pkg_resources.get_distribution('Babel')
except:
pass
else:
pkginfo = util.get_pkginfo(babel)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(babel.core))
def test_get_pkginfo_mysqldb(self):
# MySQLdb's package name is "MySQL-Python"
try:
import MySQLdb
import MySQLdb.cursors
dist = pkg_resources.get_distribution('MySQL-Python')
dist.get_metadata('top_level.txt')
except:
pass
else:
pkginfo = util.get_pkginfo(MySQLdb)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(MySQLdb.cursors))
def test_get_pkginfo_psycopg2(self):
# python-psycopg2 deb package doesn't provide SOURCES.txt and
# top_level.txt
try:
import psycopg2
import psycopg2.extensions
dist = pkg_resources.get_distribution('psycopg2')
except:
pass
else:
pkginfo = util.get_pkginfo(psycopg2)
self.assertNotEqual({}, pkginfo)
self.assertEqual(pkginfo, util.get_pkginfo(psycopg2.extensions))
class LazyClass(object):
@util.lazy
def f(self):
return object()
class LazyTestCase(unittest.TestCase):
def setUp(self):
self.obj = LazyClass()
def test_lazy_get(self):
f = self.obj.f
self.assertTrue(self.obj.f is f)
def test_lazy_set(self):
self.obj.f = 2
self.assertEqual(2, self.obj.f)
def test_lazy_del(self):
f = self.obj.f
del self.obj.f
self.assertFalse(self.obj.f is f)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(AtomicFileTestCase))
suite.addTest(unittest.makeSuite(PathTestCase))
suite.addTest(unittest.makeSuite(RandomTestCase))
suite.addTest(unittest.makeSuite(ContentDispositionTestCase))
suite.addTest(unittest.makeSuite(SafeReprTestCase))
suite.addTest(unittest.makeSuite(SetuptoolsUtilsTestCase))
suite.addTest(unittest.makeSuite(LazyTestCase))
suite.addTest(concurrency.suite())
suite.addTest(datefmt.suite())
suite.addTest(presentation.suite())
suite.addTest(doctest.DocTestSuite(util))
suite.addTest(text.suite())
suite.addTest(translation.suite())
suite.addTest(html.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
jun66j5/trac-ja
|
trac/util/tests/__init__.py
|
Python
|
bsd-3-clause
| 10,347
|
#!/usr/bin/env python
from getpass import getpass
import sys
from flask.ext.security.utils import encrypt_password
from app import create_app
from app.models import AppUser, db, Service, Role
def main():
app = create_app()
with app.app_context():
db.metadata.create_all(db.engine)
services = Service.query.all()
staff_role = Role.query.filter_by(name='Staff').first()
print 'Enter full name: '
full_name = raw_input()
print 'Enter email address: '
email = raw_input()
print 'Enter phone number: '
phone_number = raw_input()
print 'Current organizations:'
for service in services:
print '%d %s' % (service.id, service.name)
print 'Enter the id of the organization to associate with this user: '
service_id = raw_input()
print 'Is this a staff user? Enter y or n: '
staff_user_yn = raw_input()
password = getpass()
assert password == getpass('Password (again):')
user = AppUser(
email=email,
password=encrypt_password(password),
service_id=service_id,
full_name=full_name,
phone_number=phone_number
)
db.session.add(user)
if staff_user_yn == 'y':
user.roles.append(staff_role)
db.session.commit()
print 'User added.'
if __name__ == '__main__':
sys.exit(main())
|
codeforamerica/rva-screening
|
add_data/add_user.py
|
Python
|
bsd-3-clause
| 1,445
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from .name_sanitization_utils import NameSanitizer, sanitize_block
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
@register_pass(namespace="common")
class sanitize_input_output_names(AbstractGraphPass):
"""
Sanitize the names of model input and output vars to make sure
that they are of the format as described in the NameSanitizer class, i.e.
of the format [a-zA-Z_][a-zA-Z0-9_]*
"""
def apply(self, prog):
sanitizer_vars = NameSanitizer(prefix="var_")
sanitizer_ops = NameSanitizer(prefix="op_")
# sanitize the input/output of the main block
sanitize_block(prog.functions["main"],
sanitizer_vars,
sanitizer_ops,
prog.main_input_types,
sanitize_model_inputs_outputs_only=True)
|
apple/coremltools
|
coremltools/converters/mil/mil/passes/sanitize_input_output_names.py
|
Python
|
bsd-3-clause
| 1,158
|
import WebMirror.OutputFilters.FilterBase
import WebMirror.OutputFilters.util.MessageConstructors as msgpackers
from WebMirror.OutputFilters.util.TitleParsers import extractTitle
import bs4
import re
import markdown
import time
import datetime
import calendar
import WebRequest
from settings import WATTPAD_REQUIRED_TAGS
from settings import WATTPAD_MASKED_TAGS
MIN_RATING = 5
########################################################################################################################
#
# ## ## ### #### ## ## ###### ## ### ###### ######
# ### ### ## ## ## ### ## ## ## ## ## ## ## ## ## ##
# #### #### ## ## ## #### ## ## ## ## ## ## ##
# ## ### ## ## ## ## ## ## ## ## ## ## ## ###### ######
# ## ## ######### ## ## #### ## ## ######### ## ##
# ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ##
# ## ## ## ## #### ## ## ###### ######## ## ## ###### ######
#
########################################################################################################################
BLOCK_IDS = {
}
IS_BETA = True
class WattPadSeriesPageFilter(WebMirror.OutputFilters.FilterBase.FilterBase):
wanted_mimetypes = [
'text/html',
]
want_priority = 50
loggerPath = "Main.Filter.WattPad"
@staticmethod
def wantsUrl(url):
if re.search(r"^https://www.wattpad.com/story/\d+.+$", url):
# print("WattPad Processor Wants url: '%s'" % url)
return True
return False
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pageUrl = kwargs['pageUrl']
self.content = kwargs['pgContent']
self.type = kwargs['type']
self.log.info("Processing WattPad Item")
super().__init__()
self.wg = WebRequest.WebGetRobust(logPath=self.loggerPath+".Web")
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractSeriesReleases(self, seriesPageUrl, metadata, soup):
title = metadata['title']
author = metadata['user']['name']
desc = metadata['description']
tags = metadata['tags']
# Apparently the description is rendered in a <pre> tag.
# Huh?
desc = markdown.markdown(desc, extensions=["mdx_linkify"])
title = title.strip()
# Siiiiiigh. Really?
title = title.replace("[#wattys2015]", "")
title = title.replace("(Wattys2015) ", "")
title = title.replace("#Wattys2015", "")
title = title.replace("Wattys2015", "")
title = title.strip()
if metadata['numParts'] < 3:
return []
if metadata['voteCount'] < 100:
return []
# Language ID 1 is english.
if metadata['language']['id'] != 1:
return []
# Allow blocking of item by ID
if metadata['id'] in BLOCK_IDS:
return []
# for some particularly stupid reasons, the item category tag is
# not included in the metadata.
# therefore, we parse it out from the page manually.
tagdiv = soup.find("div", class_="tags")
if tagdiv:
for tag in tagdiv.find_all("a", class_='tag'):
tags.append(tag.get_text())
tags = list(set([item.lower().strip().replace(" ", " ").replace(" ", "-") for item in tags]))
# Mask any content with any of the blocked tags.
if any([item in tags for item in WATTPAD_MASKED_TAGS]):
self.log.warning("Item has a masked tag. Not emitting any releases.")
self.log.warning("Tags: '%s'", tags)
return
# And check that at least one of the target tags is present.
if not any([item in tags for item in WATTPAD_REQUIRED_TAGS]):
self.log.warning("Item missing required tag. Not emitting any releases.")
self.log.warning("Tags: '%s'", tags)
return
seriesmeta = {}
extra = {}
extra['tags'] = tags[:]
extra['homepage'] = seriesPageUrl
extra['sourcesite'] = 'WattPad'
retval = []
index = 1
valid = 1
for release in metadata['parts']:
chp_title = release['title']
dt = datetime.datetime.strptime(release['modifyDate'], "%Y-%m-%dT%H:%M:%SZ" )
reldate = calendar.timegm(dt.timetuple())
raw_item = {}
raw_item['srcname'] = "WattPad"
raw_item['published'] = reldate
raw_item['linkUrl'] = release['url']
msg = msgpackers._buildReleaseMessage(raw_item, title, None, index, None, author=author, postfix=chp_title, tl_type='oel', extraData=extra, matchAuthor=True)
retval.append(msg)
# Check if there was substantive structure in the chapter
# name. Used as a crude heuristic for chapter validity.
# vol, chp, frag, post = extractTitle(chp_title)
# if any((vol, chp, frag)):
# # print("Valid: ", (vol, chp, frag))
# valid += 1
index += 1
# if valid < (index/2):
# print("Half the present chapters are have no numeric content?")
# return []
# Don't send the series metadata if we didn't find any chapters.
if not retval:
print("No chapters!")
return []
seriesmeta['title'] = title
seriesmeta['author'] = author
seriesmeta['tags'] = tags
seriesmeta['homepage'] = seriesPageUrl
seriesmeta['desc'] = desc
seriesmeta['tl_type'] = 'oel'
seriesmeta['sourcesite'] = 'WattPad'
pkt = msgpackers.createSeriesInfoPacket(seriesmeta, beta=IS_BETA, matchAuthor=True)
self.log.info("Wattpad scraper generated %s amqp messages!", len(retval) + 1)
self.amqp_put_item(pkt)
self.put_measurement(
measurement_name = 'chapter_releases',
measurement = len(retval),
fields = {},
extra_tags = {"site" : "WattPad"},
)
return retval
def sendReleases(self, releases):
self.log.info("Total releases found on page: %s", len(releases))
for release in releases:
pkt = msgpackers.createReleasePacket(release, beta=IS_BETA)
self.amqp_put_item(pkt)
def getJsonMetadata(self, soup):
# There are a couple of tags with the data-attr "story-id"
# Grab them all, and while we're at it, check they all match (they should)
story_id = soup.find_all(True, {'data-story-id' : True})
assert story_id, "No story ID tag found on page?"
pre = story_id.pop()['data-story-id']
for remaining in story_id:
if not pre == remaining['data-story-id']:
self.log.warning("Mismatched story-ids when fetching wattpad item metadata: '%s' - '%s'.", pre, remaining['data-story-id'])
return pre
def processPage(self, url, content):
soup = WebRequest.as_soup(self.content)
sid = self.getJsonMetadata(soup)
# The GET request url is somewhat ridiculous. Build
# it up in segments so we don't have a 500 char line
segments = [
"https://www.wattpad.com/api/v3/stories/{num}?include_deleted=0&".format(num=sid),
"fields=id%2Ctitle%2CvoteCount%2CmodifyDate%2CreadCount%2CcommentCount%2Cdescription",
"%2Curl%2Ccover%2Clanguage%2CisAdExempt%2Cuser(name%2Cusername%2Cavatar%2C"
"description%2Clocation%2Chighlight_colour%2CbackgroundUrl%2CnumLists%2C",
"numStoriesPublished%2CnumFollowing%2CnumFollowers%2Ctwitter)%2Ccompleted",
"%2CnumParts%2Cparts(id%2Ctitle%2Clength%2Curl%2Cdeleted%2Cdraft%2CmodifyDate)%2Ctags%2Ccategories",
"%2Crating%2Crankings%2Clanguage%2Ccopyright%2CsourceLink%2CfirstPartId%2Cdeleted%2Cdraft",
]
surl = "".join(segments)
print(url)
metadata = self.wg.getJson(surl, addlHeaders={'Referer': url})
releases = self.extractSeriesReleases(self.pageUrl, metadata, soup)
if releases:
self.sendReleases(releases)
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractContent(self):
# print("Call to extract!")
# print(self.amqpint)
self.processPage(self.pageUrl, self.content)
def test():
print("Test mode!")
import logSetup
import WebMirror.rules
import WebMirror.Engine
import multiprocessing
logSetup.initLogging()
c_lok = cookie_lock = multiprocessing.Lock()
engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok)
engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fiction/3021'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/latest-updates'))
if __name__ == "__main__":
test()
|
fake-name/ReadableWebProxy
|
WebMirror/OutputFilters/WattPad/WattPadSeriesPageFilter.py
|
Python
|
bsd-3-clause
| 8,763
|
from django import http
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext
from django.views.decorators.vary import vary_on_headers
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon, Category
from olympia.amo.decorators import json_view
from olympia.amo.templatetags.jinja_helpers import locale_url, urlparams
from olympia.amo.utils import render, sorted_groupby
from olympia.bandwagon.models import Collection
from olympia.bandwagon.views import get_filter as get_filter_view
from olympia.browse.views import personas_listing as personas_listing_view
from olympia.versions.compare import dict_from_int, version_dict, version_int
from .forms import ESSearchForm, SecondarySearchForm
DEFAULT_NUM_COLLECTIONS = 20
DEFAULT_NUM_PERSONAS = 21 # Results appear in a grid of 3 personas x 7 rows.
log = olympia.core.logger.getLogger('z.search')
def _personas(request):
"""Handle the request for persona searches."""
initial = dict(request.GET.items())
# Ignore these filters since return the same results for Firefox
# as for Thunderbird, etc.
initial.update(appver=None, platform=None)
form = ESSearchForm(initial, type=amo.ADDON_PERSONA)
form.is_valid()
qs = Addon.search_public()
filters = ['sort']
mapping = {
'downloads': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'updated': '-last_updated',
'hotness': '-hotness'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-average_daily_users',
types=[amo.ADDON_PERSONA])
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_PERSONAS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
categories, filter, base, category = personas_listing_view(request)
context = {
'pager': pager,
'form': form,
'categories': categories,
'query': form_data,
'filter': filter,
'search_placeholder': 'themes'}
return render(request, 'search/personas.html', context)
def _collections(request):
"""Handle the request for collections."""
# Sorting by relevance isn't an option. Instead the default is `weekly`.
initial = {'sort': 'weekly'}
# Update with GET variables.
initial.update(request.GET.items())
# Ignore appver/platform and set default number of collections per page.
initial.update(appver=None, platform=None, pp=DEFAULT_NUM_COLLECTIONS)
form = SecondarySearchForm(initial)
form.is_valid()
qs = Collection.search().filter(listed=True, app=request.APP.id)
filters = ['sort']
mapping = {
'weekly': '-weekly_subscribers',
'monthly': '-monthly_subscribers',
'all': '-subscribers',
'rating': '-rating',
'created': '-created',
'name': 'name_sort',
'updated': '-modified'}
results = _filter_search(request, qs, form.cleaned_data, filters,
sorting=mapping,
sorting_default='-weekly_subscribers',
types=amo.COLLECTION_SEARCH_CHOICES)
form_data = form.cleaned_data.get('q', '')
search_opts = {}
search_opts['limit'] = form.cleaned_data.get('pp', DEFAULT_NUM_COLLECTIONS)
page = form.cleaned_data.get('page') or 1
search_opts['offset'] = (page - 1) * search_opts['limit']
search_opts['sort'] = form.cleaned_data.get('sort')
pager = amo.utils.paginate(request, results, per_page=search_opts['limit'])
context = {
'pager': pager,
'form': form,
'query': form_data,
'opts': search_opts,
'filter': get_filter_view(request),
'search_placeholder': 'collections'}
return render(request, 'search/collections.html', context)
class BaseAjaxSearch(object):
"""Generates a list of dictionaries of add-on objects based on
ID or name matches. Safe to be served to a JSON-friendly view.
Sample output:
[
{
"id": 1865,
"name": "Adblock Plus",
"url": "http://path/to/details/page",
"icons": {
"32": "http://path/to/icon-32",
"64": "http://path/to/icon-64"
}
},
...
]
"""
def __init__(self, request, excluded_ids=(), ratings=False):
self.request = request
self.excluded_ids = excluded_ids
self.src = getattr(self, 'src', None)
self.types = getattr(self, 'types', amo.ADDON_TYPES.keys())
self.limit = 10
self.key = 'q' # Name of search field.
self.ratings = ratings
# Mapping of JSON key => add-on property.
default_fields = {
'id': 'id',
'name': 'name',
'url': 'get_url_path',
'icons': {
'32': ('get_icon_url', 32),
'64': ('get_icon_url', 64)
}
}
self.fields = getattr(self, 'fields', default_fields)
if self.ratings:
self.fields['rating'] = 'average_rating'
def queryset(self):
"""Get items based on ID or search by name."""
results = Addon.objects.none()
q = self.request.GET.get(self.key)
if q:
try:
pk = int(q)
except ValueError:
pk = None
qs = None
if pk:
qs = Addon.objects.public().filter(id=int(q))
elif len(q) > 2:
qs = Addon.search_public().filter_query_string(q.lower())
if qs:
results = qs.filter(type__in=self.types)
return results
def _build_fields(self, item, fields):
data = {}
for key, prop in fields.iteritems():
if isinstance(prop, dict):
data[key] = self._build_fields(item, prop)
else:
# prop is a tuple like: ('method', 'arg1, 'argN').
if isinstance(prop, tuple):
val = getattr(item, prop[0])(*prop[1:])
else:
val = getattr(item, prop, '')
if callable(val):
val = val()
data[key] = unicode(val)
return data
def build_list(self):
"""Populate a list of dictionaries based on label => property."""
results = []
for item in self.queryset()[:self.limit]:
if item.id in self.excluded_ids:
continue
d = self._build_fields(item, self.fields)
if self.src and 'url' in d:
d['url'] = urlparams(d['url'], src=self.src)
results.append(d)
return results
@property
def items(self):
return self.build_list()
class SearchSuggestionsAjax(BaseAjaxSearch):
src = 'ss'
class AddonSuggestionsAjax(SearchSuggestionsAjax):
# No personas.
types = [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP]
class PersonaSuggestionsAjax(SearchSuggestionsAjax):
types = [amo.ADDON_PERSONA]
@json_view
@non_atomic_requests
def ajax_search(request):
"""This is currently used only to return add-ons for populating a
new collection. Themes (formerly Personas) are included by default, so
this can be used elsewhere.
"""
search_obj = BaseAjaxSearch(request)
search_obj.types = amo.ADDON_SEARCH_TYPES
return search_obj.items
@json_view
@non_atomic_requests
def ajax_search_suggestions(request):
cat = request.GET.get('cat', 'all')
suggesterClass = {
'all': AddonSuggestionsAjax,
'themes': PersonaSuggestionsAjax,
}.get(cat, AddonSuggestionsAjax)
suggester = suggesterClass(request, ratings=False)
return _build_suggestions(
request,
cat,
suggester)
def _build_suggestions(request, cat, suggester):
results = []
q = request.GET.get('q')
if q and (q.isdigit() or len(q) > 2):
q_ = q.lower()
if cat != 'apps':
# Applications.
for a in amo.APP_USAGE:
name_ = unicode(a.pretty).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': a.id,
'name': ugettext(u'{0} Add-ons').format(a.pretty),
'url': locale_url(a.short),
'cls': 'app ' + a.short
})
# Categories.
cats = Category.objects
cats = cats.filter(Q(application=request.APP.id) |
Q(type=amo.ADDON_SEARCH))
if cat == 'themes':
cats = cats.filter(type=amo.ADDON_PERSONA)
else:
cats = cats.exclude(type=amo.ADDON_PERSONA)
for c in cats:
if not c.name:
continue
name_ = unicode(c.name).lower()
word_matches = [w for w in q_.split() if name_ in w]
if q_ in name_ or word_matches:
results.append({
'id': c.id,
'name': unicode(c.name),
'url': c.get_url_path(),
'cls': 'cat'
})
results += suggester.items
return results
def _filter_search(request, qs, query, filters, sorting,
sorting_default='-weekly_downloads', types=None):
"""Filter an ES queryset based on a list of filters."""
if types is None:
types = []
APP = request.APP
# Intersection of the form fields present and the filters we want to apply.
show = [f for f in filters if query.get(f)]
if query.get('q'):
qs = qs.filter_query_string(query['q'])
if 'platform' in show and query['platform'] in amo.PLATFORM_DICT:
ps = (amo.PLATFORM_DICT[query['platform']].id, amo.PLATFORM_ALL.id)
# If we've selected "All Systems" don't filter by platform.
if ps[0] != ps[1]:
qs = qs.filter(platforms__in=ps)
if 'appver' in show:
# Get a min version less than X.0.
low = version_int(query['appver'])
# Get a max version greater than X.0a.
high = version_int(query['appver'] + 'a')
# Note: when strict compatibility is not enabled on add-ons, we
# fake the max version we index in compatible_apps.
qs = qs.filter(**{
'current_version.compatible_apps.%s.max__gte' % APP.id: high,
'current_version.compatible_apps.%s.min__lte' % APP.id: low
})
if 'atype' in show and query['atype'] in amo.ADDON_TYPES:
qs = qs.filter(type=query['atype'])
else:
qs = qs.filter(type__in=types)
if 'cat' in show:
cat = (Category.objects.filter(id=query['cat'])
.filter(Q(application=APP.id) | Q(type=amo.ADDON_SEARCH)))
if not cat.exists():
show.remove('cat')
if 'cat' in show:
qs = qs.filter(category=query['cat'])
if 'tag' in show:
qs = qs.filter(tags=query['tag'])
if 'sort' in show:
qs = qs.order_by(sorting[query['sort']])
elif not query.get('q'):
# Sort by a default if there was no query so results are predictable.
qs = qs.order_by(sorting_default)
return qs
@vary_on_headers('X-PJAX')
@non_atomic_requests
def search(request, tag_name=None):
APP = request.APP
types = (amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP)
category = request.GET.get('cat')
if category == 'collections':
extra_params = {'sort': {'newest': 'created'}}
else:
extra_params = None
fixed = fix_search_query(request.GET, extra_params=extra_params)
if fixed is not request.GET:
# We generally want a 301, except if it's a "type", because that's only
# here to support the new frontend, so a permanent redirect could mess
# things up when the user is going back and forth between the old and
# new frontend. https://github.com/mozilla/addons-server/issues/6846
status = 302 if 'type' in request.GET else 301
return http.HttpResponseRedirect(
urlparams(request.path, **fixed), status=status)
facets = request.GET.copy()
# In order to differentiate between "all versions" and an undefined value,
# we use "any" instead of "" in the frontend.
if 'appver' in facets and facets['appver'] == 'any':
facets['appver'] = ''
form = ESSearchForm(facets or {})
form.is_valid() # Let the form try to clean data.
form_data = form.cleaned_data
if tag_name:
form_data['tag'] = tag_name
if category == 'collections':
return _collections(request)
elif category == 'themes' or form_data.get('atype') == amo.ADDON_PERSONA:
return _personas(request)
sort, extra_sort = split_choices(form.sort_choices, 'created')
if form_data.get('atype') == amo.ADDON_SEARCH:
# Search add-ons should not be searched by ADU, so replace 'Users'
# sort with 'Weekly Downloads'.
sort, extra_sort = list(sort), list(extra_sort)
sort[1] = extra_sort[1]
del extra_sort[1]
# Perform search, using aggregation so that we can build the facets UI.
# Note that we don't need to aggregate on platforms, that facet it built
# from our constants directly, using the current application for this
# request (request.APP).
appversion_field = 'current_version.compatible_apps.%s.max' % APP.id
qs = (Addon.search_public().filter(app=APP.id)
.aggregate(tags={'terms': {'field': 'tags'}},
appversions={'terms': {'field': appversion_field}},
categories={'terms': {'field': 'category', 'size': 200}})
)
filters = ['atype', 'appver', 'cat', 'sort', 'tag', 'platform']
mapping = {'users': '-average_daily_users',
'rating': '-bayesian_rating',
'created': '-created',
'name': 'name.raw',
'downloads': '-weekly_downloads',
'updated': '-last_updated',
'hotness': '-hotness'}
qs = _filter_search(request, qs, form_data, filters, mapping, types=types)
pager = amo.utils.paginate(request, qs)
ctx = {
'is_pjax': request.META.get('HTTP_X_PJAX'),
'pager': pager,
'query': form_data,
'form': form,
'sort_opts': sort,
'extra_sort_opts': extra_sort,
'sorting': sort_sidebar(request, form_data, form),
'sort': form_data.get('sort'),
}
if not ctx['is_pjax']:
aggregations = pager.object_list.aggregations
ctx.update({
'tag': tag_name,
'categories': category_sidebar(request, form_data, aggregations),
'platforms': platform_sidebar(request, form_data),
'versions': version_sidebar(request, form_data, aggregations),
'tags': tag_sidebar(request, form_data, aggregations),
})
return render(request, 'search/results.html', ctx)
class FacetLink(object):
def __init__(self, text, urlparams, selected=False, children=None):
self.text = text
self.urlparams = urlparams
self.selected = selected
self.children = children or []
def sort_sidebar(request, form_data, form):
sort = form_data.get('sort')
return [FacetLink(text, {'sort': key}, key == sort)
for key, text in form.sort_choices]
def category_sidebar(request, form_data, aggregations):
APP = request.APP
qatype, qcat = form_data.get('atype'), form_data.get('cat')
cats = [f['key'] for f in aggregations['categories']]
categories = Category.objects.filter(id__in=cats)
if qatype in amo.ADDON_TYPES:
categories = categories.filter(type=qatype)
# Search categories don't have an application.
categories = categories.filter(Q(application=APP.id) |
Q(type=amo.ADDON_SEARCH))
# If category is listed as a facet but type is not, then show All.
if qcat in cats and not qatype:
qatype = True
# If category is not listed as a facet NOR available for this application,
# then show All.
if qcat not in categories.values_list('id', flat=True):
qatype = qcat = None
categories = [(_atype, sorted(_cats, key=lambda x: x.name))
for _atype, _cats in sorted_groupby(categories, 'type')]
rv = []
cat_params = {'cat': None}
all_label = ugettext(u'All Add-ons')
rv = [FacetLink(all_label, {'atype': None, 'cat': None}, not qatype)]
for addon_type, cats in categories:
selected = addon_type == qatype and not qcat
# Build the linkparams.
cat_params = cat_params.copy()
cat_params.update(atype=addon_type)
link = FacetLink(amo.ADDON_TYPES[addon_type],
cat_params, selected)
link.children = [
FacetLink(c.name, dict(cat_params, cat=c.id), c.id == qcat)
for c in cats]
rv.append(link)
return rv
def version_sidebar(request, form_data, aggregations):
appver = ''
# If appver is in the request, we read it cleaned via form_data.
if 'appver' in request.GET or form_data.get('appver'):
appver = form_data.get('appver')
app = unicode(request.APP.pretty)
exclude_versions = getattr(request.APP, 'exclude_versions', [])
# L10n: {0} is an application, such as Firefox. This means "any version of
# Firefox."
rv = [FacetLink(
ugettext(u'Any {0}').format(app), {'appver': 'any'}, not appver)]
vs = [dict_from_int(f['key']) for f in aggregations['appversions']]
# Insert the filtered app version even if it's not a facet.
av_dict = version_dict(appver)
if av_dict and av_dict not in vs and av_dict['major']:
vs.append(av_dict)
# Valid versions must be in the form of `major.minor`.
vs = set((v['major'], v['minor1'] if v['minor1'] not in (None, 99) else 0)
for v in vs)
versions = ['%s.%s' % v for v in sorted(vs, reverse=True)]
for version, floated in zip(versions, map(float, versions)):
if (floated not in exclude_versions and
floated > request.APP.min_display_version):
rv.append(FacetLink('%s %s' % (app, version), {'appver': version},
appver == version))
return rv
def platform_sidebar(request, form_data):
qplatform = form_data.get('platform')
app_platforms = request.APP.platforms.values()
ALL = app_platforms.pop(0)
# The default is to show "All Systems."
selected = amo.PLATFORM_DICT.get(qplatform, ALL)
if selected != ALL and selected not in app_platforms:
# Insert the filtered platform even if it's not a facet.
app_platforms.append(selected)
# L10n: "All Systems" means show everything regardless of platform.
rv = [FacetLink(ugettext(u'All Systems'), {'platform': ALL.shortname},
selected == ALL)]
for platform in app_platforms:
rv.append(FacetLink(platform.name, {'platform': platform.shortname},
platform == selected))
return rv
def tag_sidebar(request, form_data, aggregations):
qtag = form_data.get('tag')
tags = [facet['key'] for facet in aggregations['tags']]
rv = [FacetLink(ugettext(u'All Tags'), {'tag': None}, not qtag)]
rv += [FacetLink(tag, {'tag': tag}, tag == qtag) for tag in tags]
if qtag and qtag not in tags:
rv += [FacetLink(qtag, {'tag': qtag}, True)]
return rv
def fix_search_query(query, extra_params=None):
rv = {force_bytes(k): v for k, v in query.items()}
changed = False
# Change old keys to new names.
keys = {
'lver': 'appver',
'pid': 'platform',
'type': 'atype',
}
for old, new in keys.items():
if old in query:
rv[new] = rv.pop(old)
changed = True
# Change old parameter values to new values.
params = {
'sort': {
'newest': 'updated',
'popularity': 'downloads',
'weeklydownloads': 'users',
'averagerating': 'rating',
'sortby': 'sort',
},
'platform': {
str(p.id): p.shortname
for p in amo.PLATFORMS.values()
},
'atype': {k: str(v) for k, v in amo.ADDON_SEARCH_SLUGS.items()},
}
if extra_params:
params.update(extra_params)
for key, fixes in params.items():
if key in rv and rv[key] in fixes:
rv[key] = fixes[rv[key]]
changed = True
return rv if changed else query
def split_choices(choices, split):
"""Split a list of [(key, title)] pairs after key == split."""
index = [idx for idx, (key, title) in enumerate(choices)
if key == split]
if index:
index = index[0] + 1
return choices[:index], choices[index:]
else:
return choices, []
|
harry-7/addons-server
|
src/olympia/search/views.py
|
Python
|
bsd-3-clause
| 21,633
|
import functools
import logbook
import werkzeug.exceptions
from flask import make_response, request
from sqlalchemy.orm import class_mapper
from ._compat import httplib, iteritems, PY2
_logger = logbook.Logger(__name__)
class Parameter(object):
optional = False
def __init__(self, parameter_types):
super(Parameter, self).__init__()
if parameter_types is str and PY2:
parameter_types = (str, unicode)
if not isinstance(parameter_types, tuple):
parameter_types = (parameter_types,)
self.types = parameter_types
def coerce(self, value):
return self.types[0](value)
class Optional(Parameter):
optional = True
def get_request_input(schema):
data = request.json
convert = False
if data is None:
data = dict(iteritems(request.form))
convert = True
returned = {}
missing = set()
for param_name, param in iteritems(schema):
if not isinstance(param, Parameter):
param = Parameter(param)
if param_name not in data:
if not param.optional:
missing.add(param_name)
continue
param_value = data[param_name]
if convert:
try:
param_value = param.coerce(param_value)
except ValueError:
_logger.debug("Cannot coerce parameter {0} ({1!r})", param_name, param_value)
error_abort_invalid_type(param_name, param_value)
if not isinstance(param_value, param.types):
_logger.debug("Parameter {0} is of invalid type {1!r}", param_name, param_value)
error_abort_invalid_type(param_name, param_value)
returned[param_name] = param_value
if missing:
_logger.debug("The following parameters are missing: {0}", missing)
error_abort(httplib.BAD_REQUEST, "The following parameters are missing: {0}".format(", ".join(sorted(missing))))
return returned
def error_abort_invalid_type(param_name, param_value):
error_abort(httplib.BAD_REQUEST, "Invalid parameter value for {0}: {1!r}".format(param_name, param_value))
def error_abort(code, message):
raise HTTPException(code, message)
class HTTPException(werkzeug.exceptions.HTTPException):
def __init__(self, code, message):
super(HTTPException, self).__init__()
self.code = code
self.message = message
def get_response(self, _): # pragma: nocover
return make_response((self.message, self.code, {}))
def takes_schema_args(**schema):
def decorator(func):
@functools.wraps(func)
def new_func():
return func(**get_request_input(schema))
return new_func
return decorator
def dictify_model(obj):
"""
Turns an SQLAlchemy object to a JSON dict
"""
return dict((column.key, getattr(obj, column.key)) for column in class_mapper(obj.__class__).columns)
|
vmalloc/weber-utils
|
weber_utils/request_utils.py
|
Python
|
bsd-3-clause
| 2,920
|
# Future Imports for py2/3 backwards compat.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from lxml import etree
from io import StringIO
from future import standard_library
standard_library.install_aliases()
def create_element(tag, optional_attributes=None):
request = etree.Element(tag)
if optional_attributes:
for tag, value in iter(optional_attributes.items()):
request.attrib[tag] = "{0}".format(value)
return request
def get_attribute(xml_data, attribute_name, default_value=None):
if xml_data is None:
return default_value
return xml_data.attrib.get(attribute_name, default_value)
def get_children_of(xml_data, element_name):
element = get_element(xml_data, element_name, default_value=None)
return element.getchildren() if element is not None else ()
def get_element(xml_data, element_name, default_value=None):
if xml_data is None:
return default_value
return xml_data.find(element_name)
def get_content_of(xml_data, element_name, default_value=None):
if xml_data is None:
return default_value
element = xml_data.find(element_name)
if element is None:
return default_value
if element.text is None:
return default_value
return element.text
def as_string(xml_data):
return etree.tostring(xml_data)
def from_large_string(s):
parser = etree.XMLParser(huge_tree=True)
return etree.parse(StringIO(s), parser).getroot()
# TODO: when downloading reports we shouldn't use XML, the from_large_string should only be needed in rare cases
# this is actually a problem of GenerateScanReport as it shouldn't be using Execute_APIv1d1
def as_xml(s):
# Note:
# There is a bug in the StartUpdateResponse, in case of a failure (no internet connection),
# two StartUpdateResponse XML objects are returned, one indicating failure, one indicating success.
# We handle this bug here (wrong place?!), by embedding the returned XML in a single object
# and returning the first element after conversion.
if s.startswith('<?'):
return from_large_string(s)
s = '<_>' + s + '</_>'
return from_large_string(s).getchildren()[0]
|
derpadoo/nexpose-client-python
|
nexpose/xml_utils.py
|
Python
|
bsd-3-clause
| 2,258
|
"""
UI for controlling how api classes and mel commands are combined into pymel classes.
This UI modifies factories.apiToMelData which is pickled out to apiMelBridge.
It controls:
which mel methods correspond to api methods
disabling of api methods
preference for overloaded methods (since currently only one overloaded method is supported)
renaming of apiMethod
"""
from __future__ import with_statement
import inspect, re, os
from pymel.all import *
import pymel.internal.factories as factories
import logging
logger = logging.getLogger(__name__)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
FRAME_WIDTH = 800
VERBOSE = True
class PymelControlPanel(object):
def __init__(self):
# key is a tuple of (class, method)
self.classList = sorted( list( set( [ key[0] for key in factories.apiToMelData.keys()] ) ) )
self.classFrames={}
self.processClassFrames()
self.buildUI()
def buildUI(self):
self.win = window(title='Pymel Control Panel')
self.win.show()
with paneLayout(configuration='vertical3', paneSize=([1,20,100], [3,20,100]) ) as self.pane:
# Lef Column: Api Classes
self.classScrollList = textScrollList('apiClassList')
# Center Column: Api Methods
# Would LIKE to do it like this, but there is currently a bug with
# objectType UI, such that even if
# layout('window4|paneLayout5', q=1, exists=1) == True
# when you run:
# objectTypeUI('window4|paneLayout5')
# you will get an error:
# RuntimeError: objectTypeUI: Object 'window4|paneLayout5' not found.
# with formLayout() as apiForm:
# #with scrollLayout() as scroll:
# with tabLayout('apiMethodCol') as self.apiMethodCol:
# pass
# status = helpLine(h=60)
# So, instead, we do it old-school...
apiForm = formLayout()
self.apiMethodCol = tabLayout('apiMethodCol')
setParent(apiForm)
status = cmds.helpLine(h=60)
setParent(self.pane)
apiForm.attachForm( self.apiMethodCol, 'top', 5 )
apiForm.attachForm( self.apiMethodCol, 'left', 5 )
apiForm.attachForm( self.apiMethodCol, 'right', 5 )
apiForm.attachControl( self.apiMethodCol, 'bottom', 5, status )
apiForm.attachPosition( status, 'bottom', 5, 20 )
apiForm.attachForm( status, 'bottom', 5 )
apiForm.attachForm( status, 'left', 5 )
apiForm.attachForm( status, 'right', 5 )
# Right Column: Mel Methods
melForm = formLayout()
label1 = text( label='Unassigned Mel Methods' )
self.unassignedMelMethodLister = textScrollList()
label2 = text( label='Assigned Mel Methods' )
self.assignedMelMethodLister = textScrollList()
label3 = text( label='Disabled Mel Methods' )
self.disabledMelMethodLister = textScrollList()
setParent(self.pane)
melForm.attachForm( label1, 'top', 5 )
melForm.attachForm( label1, 'left', 5 )
melForm.attachForm( label1, 'right', 5 )
melForm.attachControl( self.unassignedMelMethodLister, 'top', 0, label1 )
melForm.attachForm( self.unassignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.unassignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.unassignedMelMethodLister, 'bottom', 5, 33 )
melForm.attachControl( label2, 'top', 5, self.unassignedMelMethodLister)
melForm.attachForm( label2, 'left', 5 )
melForm.attachForm( label2, 'right', 5 )
melForm.attachControl( self.assignedMelMethodLister, 'top', 0, label2 )
melForm.attachForm( self.assignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.assignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.assignedMelMethodLister, 'bottom', 5, 66 )
melForm.attachControl( label3, 'top', 5, self.assignedMelMethodLister)
melForm.attachForm( label3, 'left', 5 )
melForm.attachForm( label3, 'right', 5 )
melForm.attachControl( self.disabledMelMethodLister, 'top', 0, label3 )
melForm.attachForm( self.disabledMelMethodLister, 'left', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'right', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'bottom', 5 )
setParent('..')
popupMenu(parent=self.unassignedMelMethodLister, button=3 )
menuItem(l='disable', c=Callback( PymelControlPanel.disableMelMethod, self, self.unassignedMelMethodLister ) )
popupMenu(parent=self.assignedMelMethodLister, button=3 )
menuItem(l='disable', c=Callback( PymelControlPanel.disableMelMethod, self, self.assignedMelMethodLister ) )
popupMenu(parent=self.disabledMelMethodLister, button=3 )
menuItem(l='enable', c=Callback( PymelControlPanel.enableMelMethod))
self.classScrollList.extend( self.classList )
self.classScrollList.selectCommand( lambda: self.apiClassList_selectCB() )
scriptJob(uiDeleted=[str(self.win),cacheResults])
self.win.show()
def disableMelMethod(self, menu):
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.disabledMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ]['melEnabled'] = False
def enableMelMethod(self):
menu = self.disabledMelMethodLister
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.unassignedMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ].pop('melEnabled')
@staticmethod
def getMelMethods(className):
"""get all mel-derived methods for this class"""
reg = re.compile('(.*[a-z])([XYZ])$')
newlist = []
origlist = factories.classToMelMap[className]
for method in origlist:
m = reg.search(method)
if m:
# strip off the XYZ component and replace with *
newname = m.group(1) + '*'
if newname not in newlist:
newlist.append(newname)
else:
newlist.append(method)
return sorted(newlist)
def apiClassList_selectCB(self, *args):
sel = self.classScrollList.getSelectItem()
if sel:
self.buildClassColumn(sel[0])
def assignMelMethod(self, method):
#print "method %s is now assigned" % method
if method in util.listForNone( self.unassignedMelMethodLister.getAllItems() ):
self.unassignedMelMethodLister.removeItem(method)
self.assignedMelMethodLister.append( method )
def unassignMelMethod(self, method):
#print "method %s is now unassigned" % method
if method in util.listForNone( self.assignedMelMethodLister.getAllItems() ):
self.assignedMelMethodLister.removeItem(method)
self.unassignedMelMethodLister.append( method )
def processClassFrames(self):
"""
This triggers the generation of all the defaults for `factories.apiToMelData`, but it does
not create any UI elements. It creates `ClassFrame` instances, which in turn create
`MethodRow` instances, but the creation of UI elements is delayed until a particular
configuration is requested via `buildClassColumn`.
"""
logger.info( 'processing all classes...' )
for className in self.classList:
melMethods = self.getMelMethods(className)
logger.debug( '%s: mel methods: %s' % (className, melMethods) )
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName and apiClsName not in ['list']:
if clsName not in self.classFrames:
frame = ClassFrame( self, clsName, apiClsName)
self.classFrames[clsName] = frame
# temporarily disable the melName updating until we figure out how to deal
# with base classes that are the parents of many others, and which therefore end up with
# methods derived from many different mel commands, which are only applicable for the inherited classes
# not for the base class on its own. ( see ObjectSet and Character, for an example, specifically 'getIntersection' method )
#self.classFrames[clsName].updateMelNames( melMethods )
logger.info( 'done processing classes' )
def buildClassColumn(self, className ):
"""
Build an info column for a class. This column will include processed `ClassFrame`s for it and its parent classes
"""
setParent(self.apiMethodCol)
self.apiMethodCol.clear()
self.unassignedMelMethodLister.removeAll()
self.assignedMelMethodLister.removeAll()
self.disabledMelMethodLister.removeAll()
melMethods = self.getMelMethods(className)
for method in melMethods:
# fix
if (className, method) in factories.apiToMelData and factories.apiToMelData[ (className, method) ] == {'enabled':False}:
d = factories.apiToMelData.pop( (className, method) )
d.pop('enabled')
d['melEnabled'] = False
if (className, method) in factories.apiToMelData and factories.apiToMelData[(className, method)].get('melEnabled',True) == False:
self.disabledMelMethodLister.append( method )
else:
self.unassignedMelMethodLister.append( method )
#filter = set( ['double', 'MVector'] )
filter = []
count = 0
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName:
#print cls
if clsName in self.classFrames:
logger.debug( "building UI for %s", clsName )
frame = self.classFrames[clsName].buildUI(filter)
self.apiMethodCol.setTabLabel( [frame, clsName] )
count+=1
#frame.setVisible(False)
#if i != len(mro)-1:
# frame.setCollapse(True)
else:
logger.debug( "skipping %s", clsName )
self.apiMethodCol.setSelectTabIndex(count)
#self.classFrames[className].frame.setCollapse(False)
class ClassFrame(object):
def __init__(self, parent, className, apiClassName ):
self.parent = parent
self.className = className
self.apiClassName = apiClassName
self.rows = {}
self.classInfo = factories.apiClassInfo[apiClassName]['methods']
for method in self.classInfo.keys():
row = MethodRow( self, self.className, self.apiClassName, method, self.classInfo[method] )
self.rows[method] = row
def updateMelNames(self, melMethods):
logger.debug( '%s: updating melNames' % self.className )
for rowName, row in self.rows.items():
row.updateMelNames( melMethods )
def buildUI(self, filter=None):
count = 0
#self.form = formLayout()
with frameLayout(collapsable=False, label='%s (%s)' % (self.className, self.apiClassName),
width = FRAME_WIDTH) as self.frame:
#labelAlign='top')
with tabLayout() as tab:
invertibles = factories.apiClassInfo[self.apiClassName]['invertibles']
usedMethods = []
with formLayout() as pairdForm:
tab.setTabLabel( [pairdForm, 'Paired'] )
with scrollLayout() as pairedScroll:
with columnLayout(visible=False, adjustableColumn=True) as pairedCol:
for setMethod, getMethod in invertibles:
setParent(pairedCol) # column
frame = frameLayout(label = '%s / %s' % (setMethod, getMethod),
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = columnLayout()
pairCount = 0
pairCount += self.rows[setMethod].buildUI(filter)
pairCount += self.rows[getMethod].buildUI(filter)
usedMethods += [setMethod, getMethod]
if pairCount == 0:
#deleteUI(col2)
frame.setVisible(False)
frame.setHeight(1)
count += pairCount
pairedCol.setVisible(True)
pairdForm.attachForm( pairedScroll, 'top', 5 )
pairdForm.attachForm( pairedScroll, 'left', 5 )
pairdForm.attachForm( pairedScroll, 'right', 5 )
pairdForm.attachForm( pairedScroll, 'bottom', 5 )
with formLayout() as unpairedForm:
tab.setTabLabel( [unpairedForm, 'Unpaired'] )
with scrollLayout() as unpairedScroll:
with columnLayout(visible=False ) as unpairedCol:
# For some reason, on linux, the unpairedCol height is wrong...
# track + set it ourselves
unpairedHeight = 10 # a little extra buffer...
#rowSpace = unpairedCol.getRowSpacing()
for methodName in sorted( self.classInfo.keys() ):
setParent(unpairedCol)
if methodName not in usedMethods:
frame = frameLayout(label = methodName,
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = columnLayout()
count += self.rows[methodName].buildUI(filter)
unpairedHeight += self.rows[methodName].frame.getHeight()# + rowSpace
unpairedCol.setHeight(unpairedHeight)
#self.form.attachForm( self.frame, 'left', 2)
#self.form.attachForm( self.frame, 'right', 2)
#self.form.attachForm( self.frame, 'top', 2)
#self.form.attachForm( self.frame, 'bottom', 2)
unpairedCol.setVisible(True)
unpairedForm.attachForm( unpairedScroll, 'top', 5 )
unpairedForm.attachForm( unpairedScroll, 'left', 5 )
unpairedForm.attachForm( unpairedScroll, 'right', 5 )
unpairedForm.attachForm( unpairedScroll, 'bottom', 5 )
return self.frame
class MethodRow(object):
def __init__(self, parent, className, apiClassName, apiMethodName, methodInfoList):
self.parent = parent
self.className = className
self.methodName = methodInfoList[0].get('pymelName', apiMethodName)
self.apiClassName = apiClassName
self.apiMethodName = apiMethodName
self.methodInfoList = methodInfoList
self.data = factories._getApiOverrideNameAndData(self.className, self.methodName)[1]
self.classInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName]
try:
enabledArray = self.getEnabledArray()
except:
print self.apiClassName, self.apiMethodName
raise
# DEFAULT VALUES
# correct old values
# we no longer store positive values, only negative -- meaning methods will be enabled by default
# if 'enabled' in self.data and ( self.data['enabled'] == True or sum(enabledArray) == 0 ):
# logger.debug( '%s.%s: enabled array: %s' % ( self.className, self.methodName, enabledArray ) )
# logger.debug( '%s.%s: removing enabled entry' % ( self.className, self.methodName) )
# self.data.pop('enabled', None)
# enabled
# if not self.data.has_key( 'enabled' ):
# self.data['enabled'] = True
if self.methodName in factories.EXCLUDE_METHODS : # or sum(enabledArray) == 0:
self.data['enabled'] = False
# useName mode
if not self.data.has_key( 'useName' ):
self.data['useName'] = 'API'
else:
# correct old values
useNameVal = self.data['useName']
if useNameVal == True:
self.data['useName'] = 'API'
elif useNameVal == False:
self.data['useName'] = 'MEL'
elif useNameVal not in ['MEL', 'API']:
self.data['useName'] = str(useNameVal)
# correct old values
if self.data.has_key('overloadPrecedence'):
self.data['overloadIndex'] = self.data.pop('overloadPrecedence')
# correct old values
if self.data.has_key('melName'):
#logger.debug( "correcting melName %s %s %s" % (self.className, self.methodName, str(self.data['melName']) ) )
self.data['melName'] = str(self.data['melName'])
overloadId = self.data.get('overloadIndex', 0)
if overloadId is None:
# in a previous test, it was determined there were no wrappable overload methods,
# but there may be now. try again.
overloadId = 0
# ensure we don't use a value that is not valid
for i in range(overloadId, len(enabledArray)+1):
try:
if enabledArray[i]:
break
except IndexError: # went too far, so none are valid
overloadId = None
# if val is None:
# # nothing valid
# self.data.pop('overloadIndex', None)
# else:
self.data['overloadIndex'] = overloadId
def crossReference(self, melName):
""" create an entry for the melName which points to the data being tracked for the api name"""
factories.apiToMelData[ (self.className, melName ) ] = self.data
def uncrossReference(self, melName):
factories.apiToMelData.pop( (self.className, melName ) )
def updateMelNames(self, melMethods):
# melName
if not self.data.has_key( 'melName' ):
match = None
for method in melMethods:
methreg = re.compile(method.replace('*', '.{0,1}') + '$')
#print self.methodName, methreg
if methreg.match( self.methodName ):
match = str(method)
break
if match:
logger.debug( "%s.%s: adding melName %s" % ( self.className, self.methodName, match ) )
self.data['melName'] = match
self.crossReference( match )
def buildUI(self, filter=None):
if filter:
match = False
for i, info in enumerate( self.methodInfoList):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
if filter.intersection( argUtil.getInputTypes() + argUtil.getOutputTypes() ):
match = True
break
if match == False:
return False
self.layout = { 'columnAlign' : [1,'right'],
'columnAttach' : [1,'right',8] }
#print className, self.methodName, melMethods
isOverloaded = len(self.methodInfoList)>1
self.frame = frameLayout( w=FRAME_WIDTH, labelVisible=False, collapsable=False)
logger.debug("building row for %s - %s" % (self.methodName, self.frame))
col = columnLayout()
enabledArray = []
self.rows = []
self.overloadPrecedenceColl = None
self.enabledChBx = checkBox(label=self.methodName,
changeCommand=CallbackWithArgs( MethodRow.enableCB, self ) )
if isOverloaded:
self.overloadPrecedenceColl = radioCollection()
for i in range( len(self.methodInfoList) ) :
self.createMethodInstance(i)
else:
#row = rowLayout( self.methodName + '_rowMain', nc=2, cw2=[200, 400] )
#self.enabledChBx = checkBox(label=self.methodName, changeCommand=CallbackWithArgs( MethodRow.enableCB, self ) )
#text(label='')
self.createMethodInstance(0)
#setParent('..')
setParent(col)
separator(w=800, h=6)
#self.row = rowLayout( self.methodName + '_rowSettings', nc=4, cw4=[200, 160, 180, 160] )
#self.rows.append(row)
self.row = rowLayout( self.methodName + '_rowSettings', nc=2, cw2=[200, 220], **self.layout )
self.rows.append(self.row)
# create ui elements
text(label='Mel Equivalent')
self.melNameTextField = textField(w=170, editable=False)
self.melNameOptMenu = popupMenu(parent=self.melNameTextField,
button=1,
postMenuCommand=Callback( MethodRow.populateMelNameMenu, self ) )
setParent('..')
self.row2 = rowLayout( self.methodName + '_rowSettings2', nc=3, cw3=[200, 180, 240], **self.layout )
self.rows.append(self.row2)
text(label='Use Name')
self.nameMode = radioButtonGrp(label='', nrb=3, cw4=[1,50,50,50], labelArray3=['api', 'mel', 'other'] )
self.altNameText = textField(w=170, enable=False)
self.altNameText.changeCommand( CallbackWithArgs( MethodRow.alternateNameCB, self ) )
self.nameMode.onCommand( Callback( MethodRow.nameTypeCB, self ) )
isEnabled = self.data.get('enabled', True)
# UI SETUP
melName = self.data.get('melName', '')
try:
#self.melNameOptMenu.setValue( melName )
self.melNameTextField.setText(melName)
if melName != '':
self.parent.parent.assignMelMethod( melName )
except RuntimeError:
# it is possible for a method name to be listed here that was set from a different view,
# where this class was a super class and more mel commands were available. expand the option list,
# and make this frame read-only
menuItem( label=melName, parent=self.melNameOptMenu )
self.melNameOptMenu.setValue( melName )
logger.debug( "making %s frame read-only" % self.methodName )
self.frame.setEnable(False)
self.enabledChBx.setValue( isEnabled )
self.row.setEnable( isEnabled )
self.row2.setEnable( isEnabled )
name = self.data['useName']
if name == 'API' :
self.nameMode.setSelect( 1 )
self.altNameText.setEnable(False)
elif name == 'MEL' :
self.nameMode.setSelect( 2 )
self.altNameText.setEnable(False)
else :
self.nameMode.setSelect( 3 )
self.altNameText.setText(name)
self.altNameText.setEnable(True)
if self.overloadPrecedenceColl:
items = self.overloadPrecedenceColl.getCollectionItemArray()
try:
val = self.data['overloadIndex']
if val is None:
logger.info( "no wrappable options for method %s" % self.methodName )
self.frame.setEnable( False )
else:
self.overloadPrecedenceColl.setSelect( items[ val ] )
except:
pass
# # ensure we don't use a value that is not valid
# for val in range(val, len(enabledArray)+1):
# try:
# if enabledArray[val]:
# break
# except IndexError:
# val = None
# if val is not None:
# self.overloadPrecedenceColl.setSelect( items[ val ] )
setParent('..')
setParent('..') # frame
setParent('..') # column
return True
def enableCB(self, *args ):
logger.debug( 'setting enabled to %s' % args[0] )
if args[0] == False:
self.data['enabled'] = False
else:
self.data.pop('enabled', None)
self.row.setEnable( args[0] )
def nameTypeCB(self ):
logger.info( 'setting name type' )
selected = self.nameMode.getSelect()
if selected == 1:
val = 'API'
self.altNameText.setEnable(False)
elif selected == 2:
val = 'MEL'
self.altNameText.setEnable(False)
else:
val = str(self.altNameText.getText())
self.altNameText.setEnable(True)
logger.debug( 'data %s' % self.data )
self.data['useName'] = val
def alternateNameCB(self, *args ):
self.data['useName'] = str(args[0])
# def formatAnnotation(self, apiClassName, methodName ):
# defs = []
# try:
# for methodInfo in factories.apiClassInfo[apiClassName]['methods'][methodName] :
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# defs.append( '%s( %s )' % ( methodName, args ) )
# return '\n'.join( defs )
# except KeyError:
# print "could not find documentation for", apiClassName, methodName
def overloadPrecedenceCB(self, i):
logger.debug( 'overloadPrecedenceCB' )
self.data['overloadIndex'] = i
def melNameChangedCB(self, newMelName):
oldMelName = str(self.melNameTextField.getText())
if oldMelName:
self.uncrossReference( oldMelName )
if newMelName == '[None]':
print "removing melName"
self.data.pop('melName',None)
self.parent.parent.unassignMelMethod( oldMelName )
self.melNameTextField.setText('')
else:
print "adding melName", newMelName
self.crossReference( newMelName )
self.data['melName'] = newMelName
self.parent.parent.assignMelMethod( newMelName )
self.melNameTextField.setText(newMelName)
def populateMelNameMenu(self):
"""called to populate the popup menu for choosing the mel equivalent to an api method"""
self.melNameOptMenu.deleteAllItems()
menuItem(parent=self.melNameOptMenu, label='[None]', command=Callback( MethodRow.melNameChangedCB, self, '[None]' ))
# need to add a listForNone to this in windows
items = self.parent.parent.unassignedMelMethodLister.getAllItems()
if items:
for method in items:
menuItem(parent=self.melNameOptMenu, label=method, command=Callback( MethodRow.melNameChangedCB, self, str(method) ))
def getEnabledArray(self):
"""returns an array of booleans that correspond to each override method and whether they can be wrapped"""
array = []
for i, info in enumerate( self.methodInfoList ):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
array.append( argUtil.canBeWrapped() )
return array
def createMethodInstance(self, i ):
#setUITemplate('attributeEditorTemplate', pushTemplate=1)
rowSpacing = [30, 20, 400]
defs = []
#try:
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
proto = argUtil.getPrototype( className=False, outputs=True, defaults=False )
enable = argUtil.canBeWrapped()
if argUtil.isDeprecated():
text(l='DEPRECATED')
# main info row
row = rowLayout( '%s_rowMain%s' % (self.methodName,i), nc=3, cw3=rowSpacing, enable=enable )
self.rows.append(row)
text(label='')
if self.overloadPrecedenceColl is not None:
# toggle for overloaded methods
radioButton(label='', collection=self.overloadPrecedenceColl,
enable = enable,
onCommand=Callback( MethodRow.overloadPrecedenceCB, self, i ))
text( l='', #l=proto,
annotation = self.methodInfoList[i]['doc'],
enable = enable)
setParent('..')
try:
argList = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][i]['args']
except (KeyError, IndexError):
argList = self.methodInfoList[i]['args']
returnType = self.methodInfoList[i]['returnType']
types = self.methodInfoList[i]['types']
args = []
for arg , type, direction in argList:
type = str(types[arg])
assert arg != 'return'
self._makeArgRow( i, type, arg, direction, self.methodInfoList[i]['argInfo'][arg]['doc'] )
if returnType:
self._makeArgRow( i, returnType, 'return', 'return', self.methodInfoList[i]['returnInfo']['doc'] )
separator(w=800, h=14)
return enable
# methodInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName][overloadNum]
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# return '( %s ) --> ' % ( args )
#except:
# print "could not find documentation for", apiClassName, methodName
def setUnitType(self, methodIndex, argName, unitType ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if argName == 'return':
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'returnInfo' : {} }
methodOverrides[self.apiMethodName][methodIndex]['returnInfo']['unitType'] = unitType
else:
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'argInfo' : {} }
if argName not in methodOverrides[self.apiMethodName][methodIndex]['argInfo']:
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName] = {}
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName]['unitType'] = unitType
def setDirection(self, methodIndex, argName, direction ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { }
try:
argList = methodOverrides[self.apiMethodName][methodIndex]['args']
except KeyError:
argList = self.methodInfoList[methodIndex]['args']
newArgList = []
inArgs = []
outArgs = []
for i_argName, i_argType, i_direction in argList:
if i_argName == argName:
argInfo = ( i_argName, i_argType, direction )
else:
argInfo = ( i_argName, i_argType, i_direction )
if argInfo[2] == 'in':
inArgs.append( i_argName )
else:
outArgs.append( i_argName )
newArgList.append( argInfo )
methodOverrides[self.apiMethodName][methodIndex] = { }
methodOverrides[self.apiMethodName][methodIndex]['args'] = newArgList
methodOverrides[self.apiMethodName][methodIndex]['inArgs'] = inArgs
methodOverrides[self.apiMethodName][methodIndex]['outArgs'] = outArgs
def _makeArgRow(self, methodIndex, type, argName, direction, annotation=''):
COL1_WIDTH = 260
COL2_WIDTH = 120
rowLayout( nc=4, cw4=[COL1_WIDTH,COL2_WIDTH, 70, 150], **self.layout )
label = str(type)
text( l=label, ann=annotation )
text( l=argName, ann=annotation )
if direction == 'return':
text( l='(result)' )
else:
direction_om = optionMenu(l='', w=60, ann=annotation, cc=CallbackWithArgs( MethodRow.setDirection, self, methodIndex, argName ) )
for unit in ['in', 'out']:
menuItem(l=unit)
direction_om.setValue(direction)
if self._isPotentialUnitType(type) :
om = optionMenu(l='', ann=annotation, cc=CallbackWithArgs( MethodRow.setUnitType, self, methodIndex, argName ) )
for unit in ['unitless', 'linear', 'angular', 'time']:
menuItem(l=unit)
if argName == 'return':
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['returnInfo']['unitType']
except KeyError:
pass
else:
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['argInfo'][argName]['unitType']
except KeyError:
pass
try:
om.setValue(value)
except: pass
else:
text( l='', ann=annotation )
setParent('..')
def _isPotentialUnitType(self, type):
type = str(type)
return type == 'MVector' or type.startswith('double')
def _getClass(className):
for module in [core.nodetypes, core.datatypes, core.general]:
try:
pymelClass = getattr(module, className)
return pymelClass
except AttributeError:
pass
def getApiClassName( className ):
pymelClass = _getClass(className)
if pymelClass:
apiClass = None
apiClassName = None
#if cls.__name__ not in ['object']:
try:
apiClass = pymelClass.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = pymelClass.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
return apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def getClassHierarchy( className ):
pymelClass = _getClass(className)
if pymelClass:
mro = list( inspect.getmro(pymelClass) )
mro.reverse()
for i, cls in enumerate(mro):
#if cls.__name__ not in ['object']:
try:
apiClass = cls.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = cls.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
yield cls.__name__, apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def setManualDefaults():
# set some defaults
# TODO : allow these defaults to be controlled via the UI
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setScalePivot', 0, 'defaults', 'balance' ), True )
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotatePivot', 0, 'defaults', 'balance' ), True )
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotateOrientation', 0, 'defaults', 'balance' ), True )
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnSet', 'methods', 'getMembers', 0, 'defaults', 'flatten' ), False )
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnDagNode', 'methods', 'instanceCount', 0, 'defaults', 'total' ), True )
util.setCascadingDictItem( factories.apiClassOverrides, ('MFnMesh', 'methods', 'createColorSetWithName', 1, 'defaults', 'modifier' ), None )
# add some manual invertibles: THESE MUST BE THE API NAMES
invertibles = [ ('MPlug', 0, 'setCaching', 'isCachingFlagSet') ,
('MPlug', 0, 'setChannelBox', 'isChannelBoxFlagSet'),
('MFnTransform', 0, 'enableLimit', 'isLimited'),
('MFnTransform', 0, 'setLimit', 'limitValue'),
('MFnTransform', 0, 'set', 'transformation'),
('MFnRadialField', 0, 'setType', 'radialType')
]
for className, methodIndex, setter, getter in invertibles:
# append to the class-level invertibles list
curr = util.getCascadingDictItem( factories.apiClassInfo, (className, 'invertibles' ), [] )
pair = (setter, getter)
if pair not in curr:
curr.append( pair )
util.setCascadingDictItem( factories.apiClassOverrides, (className, 'invertibles'), curr )
# add the individual method entries
util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), (getter, True) )
util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), (setter, False) )
nonInvertibles = [ ( 'MFnMesh', 0, 'setFaceVertexNormals', 'getFaceVertexNormals' ),
( 'MFnMesh', 0, 'setFaceVertexNormal', 'getFaceVertexNormal' ) ]
for className, methodIndex, setter, getter in nonInvertibles:
util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), None )
util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), None )
fixSpace()
def fixSpace():
"fix the Space enumerator"
enum = util.getCascadingDictItem( factories.apiClassInfo, ('MSpace', 'pymelEnums', 'Space') )
keys = enum._keys.copy()
#print keys
val = keys.pop('postTransform', None)
if val is not None:
keys['object'] = val
newEnum = util.Enum( 'Space', keys )
util.setCascadingDictItem( factories.apiClassOverrides, ('MSpace', 'pymelEnums', 'Space'), newEnum )
else:
logger.warning( "could not fix Space")
def cacheResults():
#return
res = confirmDialog( title='Cache Results?',
message="Would you like to write your changes to disk? If you choose 'No' your changes will be lost when you restart Maya.",
button=['Yes','No'],
cancelButton='No',
defaultButton='Yes')
print res
if res == 'Yes':
doCacheResults()
def doCacheResults():
print "---"
print "adding manual defaults"
setManualDefaults()
print "merging dictionaries"
# update apiClasIfno with the sparse data stored in apiClassOverrides
factories.mergeApiClassOverrides()
print "saving api cache"
factories.saveApiCache()
print "saving bridge"
factories.saveApiMelBridgeCache()
print "---"
|
CountZer0/PipelineConstructionSet
|
python/maya/site-packages/pymel-1.0.3/maintenance/pymelControlPanel.py
|
Python
|
bsd-3-clause
| 41,766
|
from django.conf.urls import patterns, url, include
from . import views
urlpatterns = patterns(
'',
url(r'^accounts/ClientLogin$', views.login, name='login'),
url(r'^reader/api/0/', include('feedhq.reader.api_urls')),
url(r'^reader/atom/(?P<content_id>.+)?$', views.stream_contents,
{'output': 'atom'}, name='atom_contents'),
)
|
vincentbernat/feedhq
|
feedhq/reader/urls.py
|
Python
|
bsd-3-clause
| 356
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008-2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import re
import pwd
import sys
import crypt
import errno
import random
import select
import struct
import subprocess
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
def progstats():
pw = pwd.getpwuid(os.getuid())
response = format('Process ID %i running as user %q and as group %q '
'from directory %q with the command line %q. '
'Running on Python %s.',
os.getpid(), pw[0], pw[3],
os.getcwd(), ' '.join(sys.argv),
sys.version.translate(utils.str.chars, '\r\n'))
return response
class TimeoutError(IOError):
pass
def pipeReadline(fd, timeout=2):
(r, _, _) = select.select([fd], [], [], timeout)
if r:
return r[0].readline()
else:
raise TimeoutError
class Unix(callbacks.Plugin):
def errno(self, irc, msg, args, s):
"""<error number or code>
Returns the number of an errno code, or the errno code of a number.
"""
try:
i = int(s)
name = errno.errorcode[i]
except ValueError:
name = s.upper()
try:
i = getattr(errno, name)
except AttributeError:
irc.reply('I can\'t find the errno number for that code.')
return
except KeyError:
name = '(unknown)'
irc.reply(format('%s (#%i): %s', name, i, os.strerror(i)))
errno = wrap(errno, ['something'])
def progstats(self, irc, msg, args):
"""takes no arguments
Returns various unix-y information on the running supybot process.
"""
irc.reply(progstats())
def pid(self, irc, msg, args):
"""takes no arguments
Returns the current pid of the process for this Supybot.
"""
irc.reply(format('%i', os.getpid()), private=True)
pid = wrap(pid, [('checkCapability', 'owner')])
_cryptre = re.compile(r'[./0-9A-Za-z]')
def crypt(self, irc, msg, args, password, salt):
"""<password> [<salt>]
Returns the resulting of doing a crypt() on <password> If <salt> is
not given, uses a random salt. If running on a glibc2 system,
prepending '$1$' to your salt will cause crypt to return an MD5sum
based crypt rather than the standard DES based crypt.
"""
def makeSalt():
s = '\x00'
while self._cryptre.sub('', s) != '':
s = struct.pack('<h', random.randrange(-(2**15), 2**15))
return s
if not salt:
salt = makeSalt()
irc.reply(crypt.crypt(password, salt))
crypt = wrap(crypt, ['something', additional('something')])
def spell(self, irc, msg, args, word):
"""<word>
Returns the result of passing <word> to aspell/ispell. The results
shown are sorted from best to worst in terms of being a likely match
for the spelling of <word>.
"""
# We are only checking the first word
spellCmd = self.registryValue('spell.command')
if not spellCmd:
irc.error('The spell checking command is not configured. If one '
'is installed, reconfigure '
'supybot.plugins.Unix.spell.command appropriately.',
Raise=True)
if word and not word[0].isalpha():
irc.error('<word> must begin with an alphabet character.')
return
if ' ' in word:
irc.error('Spaces aren\'t allowed in the word.')
return
try:
inst = subprocess.Popen([spellCmd, '-a'], close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
except OSError, e:
irc.error(e, Raise=True)
ret = inst.poll()
if ret is not None:
s = inst.stderr.readline()
if not s:
s = inst.stdout.readline()
s = s.rstrip('\r\n')
s = s.lstrip('Error: ')
irc.error(s, Raise=True)
(out, err) = inst.communicate(word)
inst.wait()
lines = filter(None, out.splitlines())
lines.pop(0) # Banner
if not lines:
irc.error('No results found.', Raise=True)
line = lines.pop(0)
line2 = ''
if lines:
line2 = lines.pop(0)
# parse the output
# aspell will sometimes list spelling suggestions after a '*' or '+'
# line for complex words.
if line[0] in '*+' and line2:
line = line2
if line[0] in '*+':
resp = format('%q may be spelled correctly.', word)
elif line[0] == '#':
resp = format('I could not find an alternate spelling for %q',word)
elif line[0] == '&':
matches = line.split(':')[1].strip()
resp = format('Possible spellings for %q: %L.',
word, matches.split(', '))
else:
resp = 'Something unexpected was seen in the [ai]spell output.'
irc.reply(resp)
spell = wrap(spell, ['something'])
def fortune(self, irc, msg, args):
"""takes no arguments
Returns a fortune from the *nix fortune program.
"""
fortuneCmd = self.registryValue('fortune.command')
if fortuneCmd:
args = [fortuneCmd]
if self.registryValue('fortune.short'):
args.append('-s')
if self.registryValue('fortune.equal'):
args.append('-e')
if self.registryValue('fortune.offensive'):
args.append('-a')
args.extend(self.registryValue('fortune.files'))
try:
inst = subprocess.Popen(args, close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=file(os.devnull))
except OSError, e:
irc.error('It seems the configured fortune command was '
'not available.', Raise=True)
(out, err) = inst.communicate()
inst.wait()
lines = out.splitlines()
lines = map(str.rstrip, lines)
lines = filter(None, lines)
irc.replies(lines, joiner=' ')
else:
irc.error('The fortune command is not configured. If fortune is '
'installed on this system, reconfigure the '
'supybot.plugins.Unix.fortune.command configuration '
'variable appropriately.')
def wtf(self, irc, msg, args, _, something):
"""[is] <something>
Returns wtf <something> is. 'wtf' is a *nix command that first
appeared in NetBSD 1.5. In most *nices, it's available in some sort
of 'bsdgames' package.
"""
wtfCmd = self.registryValue('wtf.command')
if wtfCmd:
something = something.rstrip('?')
try:
inst = subprocess.Popen([wtfCmd, something], close_fds=True,
stdout=subprocess.PIPE,
stderr=file(os.devnull),
stdin=file(os.devnull))
except OSError:
irc.error('It seems the configured wtf command was not '
'available.', Raise=True)
(out, _) = inst.communicate()
inst.wait()
if out:
response = out.splitlines()[0].strip()
response = utils.str.normalizeWhitespace(response)
irc.reply(response)
else:
irc.error('The wtf command is not configured. If it is installed '
'on this system, reconfigure the '
'supybot.plugins.Unix.wtf.command configuration '
'variable appropriately.')
wtf = wrap(wtf, [optional(('literal', ['is'])), 'something'])
Class = Unix
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
prashantpawar/supybot-rothbot
|
plugins/Unix/plugin.py
|
Python
|
bsd-3-clause
| 9,991
|
# Register a SIGTERM signal handler in order for 'coverage' tool
# to function properly
import signal
import sys
def signal_handler(signum, frame):
print('Handled Linux signal number:', signum)
sys.exit()
signal.signal(signal.SIGTERM, signal_handler)
# Run the example
import minimaltaxiapp
|
caran/SecureGateway
|
examples/minimal/minimaltaxiapp_with_signalhandler.py
|
Python
|
bsd-3-clause
| 304
|
from datetime import datetime
from django.db import models
from django.utils import timezone
class Trip(models.Model):
# real fields
scenic = models.BooleanField(blank=True, default=False)
start_ts = models.DateTimeField(blank=True, null=True, default=timezone.now)
status = models.CharField(
blank=True,
default='unconfirmed',
max_length=200)
|
tosfan4ever/hacktheplanet
|
api/models/_trip.py
|
Python
|
bsd-3-clause
| 358
|
"""Common imports for generated clouduseraccounts client library."""
# pylint:disable=wildcard-import
import pkgutil
from googlecloudsdk.third_party.apitools.base.py import *
from googlecloudsdk.third_party.apis.clouduseraccounts.beta.clouduseraccounts_beta_client import *
from googlecloudsdk.third_party.apis.clouduseraccounts.beta.clouduseraccounts_beta_messages import *
__path__ = pkgutil.extend_path(__path__, __name__)
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/clouduseraccounts/beta/__init__.py
|
Python
|
bsd-3-clause
| 429
|
# encoding: latin2
"""Algorithm utilities
G{packagetree core}
"""
__author__ = "Juan C. Duque"
__credits__ = "Copyright (c) 2009-11 Juan C. Duque"
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
import copy
import numpy
import dist2Regions
import objFunctions
import distanceFunctions
import selectionTypeFunctions
class AreaManager:
"""
This class contains operations at areal level, including the generation of
instances of areas, a wide range of area2area and area2region distance
functions.
"""
def __init__(self, w, y, distanceType="EuclideanSquared", variance="false"):
"""
@type w: dictionary
@param w: With B{key} = area Id, and B{value} = list with Ids of neighbours of
each area.
@type y: dictionary
@param y: With B{key} = area Id, and B{value} = list with attribute
values.
@type distanceType: string
@keyword distanceType: Function to calculate the distance between areas. Default value I{distanceType = 'EuclideanSquared'}.
@type variance: boolean
@keyword variance: Boolean indicating if the data have variance matrix. Default value I{variance = 'false'}.
"""
self.y = y
self.areas = {}
self.noNeighs = set([])
self.variance = variance
self.distanceType = distanceType
self.createAreas(w, y)
self.distanceStatDispatcher = dist2Regions.distanceStatDispatcher
def createAreas(self, w, y):
"""
Creates instances of areas based on a sparse weights matrix (w) and a
data array (y).
"""
n = len(self.y)
self.distances = {}
noNeighs = []
for key in range(n):
data = y[key]
try:
neighbours = w[key]
except:
neighbours = {}
w[key] = {}
if len(w[key]) == 0:
self.noNeighs = self.noNeighs | set([key])
a = AreaCl(key, neighbours, data, self.variance)
self.areas[key] = a
if len(self.noNeighs) > 0:
print "Disconnected areas neighs: ", list(self.noNeighs)
def returnDistance2Area(self, area, otherArea):
"""
Returns the distance between two areas
"""
try:
if area.id < otherArea.id:
dist = self.distances[(area.id, otherArea.id)]
else:
dist = self.distances[(otherArea.id, area.id)]
except:
dist = 0.0
return dist
def getDataAverage(self, areaList, dataIndex):
"""
Returns the attribute centroid of a set of areas
"""
dataAvg = len(dataIndex) * [0.0]
for aID in areaList:
i = 0
for index in dataIndex:
dataAvg[i] += self.areas[aID].data[index] /len(areaList)
i += 1
return dataAvg
def getDistance2Region(self, area, areaList, distanceStat="Centroid", weights=[], indexData=[]):
"""
Returns the distance from an area to a region (defined as a list of
area IDs)
"""
if isinstance(distanceStat,str):
if len(indexData) == 0:
indexData = range(len(area.data))
return self.distanceStatDispatcher[distanceStat](self,area, areaList, indexData)
else:
distance = 0.0
i = 0
for dS in distanceStat:
if len(indexData) == 0:
indexDataDS = range(len(area.data))
else:
indexDataDS = indexData[i]
if len(weights) > 0:
distance += weights[i]
self.distanceStatDispatcher[dS](self,area, areaList, indexDataDS)
else:
distance += self.distanceStatDispatcher[dS](self,area, areaList, indexDataDS)
i += 1
return distance
def getDistance2AreaMin(self, area, areaList):
"""
Return the ID of the area whitin a region that is closest to an area
outside the region
"""
areaMin = -1;
distanceMin = 1e300
for aID in areaList:
if self.distances[area.id, aID] < distanceMin:
areaMin = aID
distanceMin = self.distances[area.id, aID]
return areaMin
def checkFeasibility(self, solution):
"""
Checks feasibility of a candidate solution
"""
n = len(solution)
regions = {}
for i in range(n):
try:
regions[solution[i]] = regions[solution[i]] + [i]
except:
regions[solution[i]] = [i]
feasible = 1
r = len(regions)
for i in range(r):
newRegion = set([])
areas2Eval = regions[i]
if len(areas2Eval) > 1:
for area in areas2Eval:
newRegion = newRegion | (set(self.areas[area].neighs) & set(areas2Eval))
if set(areas2Eval) -newRegion != set([]):
feasible = 0
break
return feasible
class BasicMemory:
"""
Keeps the minimum amount of information about a given solution. It keeps the
Objective function value (self.objInfo) and the region each area has been assigned to
(self.regions)
"""
def __init__(self, objInfo=99999999E10, regions={}):
"""
@type objInfo: float
@keyword objInfo: Objective function value.
@type regions: list
@keyword regions: list of Region´s IDs
values.
"""
self.objInfo = objInfo
self.regions = regions
def updateBasicMemory(self, rm):
"""
Updates BasicMemory when a solution is modified.
"""
self.objInfo = rm.objInfo
self.regions = rm.returnRegions()
class ExtendedMemory(BasicMemory):
"""
This memory is designed to allow the algorithm to go back to a given solution
(different from the current solution). It gives to RegionManager all the information that must be
available in order to continue an iteration process.
"""
def __init__(self, objInfo=99999999E10, area2Region={}, region2Area={},
intraBorderingAreas={}):
"""
@type objInfo: float
@keyword objInfo: Objective function value
@type area2region: dictionairy
@keyword area2region: Region to which the area is in.
@type region2area: dictionary
@keyword region2area: areas within the region.
@type intraBorderingAreas: dictionary
@keyword intraBorderingAreas: areas in the border of the region.
"""
BasicMemory.__init__(self, objInfo, {})
self.area2Region = area2Region
self.region2Area = region2Area
self.intraBorderingAreas = intraBorderingAreas
def updateExtendedMemory(self, rm):
"""
Updates ExtendedMemory when a solution is modified.
"""
BasicMemory.updateBasicMemory(self, rm)
self.area2Region = rm.area2Region
self.region2Area = rm.region2Area
self.intraBorderingAreas = rm.intraBorderingAreas
class RegionMaker:
"""
This class deals with a large amount of methods required during both the
construction and local search phases. This class takes the area instances and
coordinate them during the solution process. It also send information to
Memory when needed.
"""
def __init__(self, am, pRegions=2, initialSolution=[],
seedSelection = "kmeans",
distanceType = "EuclideanSquared",
distanceStat = "Centroid",
selectionType = "Minimum",
alpha = 0.2,
numRegionsType = "Exogenous",
objectiveFunctionType = "SS",
threshold = 0.0,
weightsDistanceStat = [],
weightsObjectiveFunctionType = [],
indexDataStat = [],
indexDataOF = []):
"""
@type am: AreaManager
@param am: Area manager object.
@type pRegions: integer
@keyword pRegions: Number of regions in scheme
@type seeds: list
@keyword seeds: List of area IDs for initial seeds.
@type distanceType: string
@keyword distanceType: Type of distance to be used, by default "EuclideanSquared"
@type distanceStat: string
@keyword distanceStat: Type of conversion used for summarizing distance, by defaults "Average"
@type selectionType: string
@keyword selectionType: Type of selection criterion for construction phase, by defaults "Minimum"
@type alpha: float.
@keyword alpha: float equal or between the interval [0,1]; for GRASP selection only.
@type numRegionsType: strigng
@keyword numRegionsType: Type of constructive method (Exogenous, EndogenousThreshold,
EndogenousRange), by default "Exogenous"
@type objectiveFunctionType: string
@keyword objectiveFunctionType: Methosd to calculate the objective function, by default "Total"
@type threshold: float
@keyword threshold: Minimum population threshold to be satisfied for each region
# FIXME: estos atributos no se que son y lee porfa las funciones de esta clase que no estan muy completas las descripciones pues no sabia bien que hacian algunas.
@type weightsDistanceStat: list
@keyword weightsDistanceStat:
@type weightsObjectiveFunctionStat: list
@keyword weightsObjectiveFunctionStat:
@type indexDataStat = list
@keyword indexDataStat:
@type indexDataOf = list
@keyword indexDataOf:
"""
self.am = am
self.areas = copy.deepcopy(am.areas)
self.distanceType = distanceType
self.distanceStat = distanceStat
self.weightsDistanceStat = weightsDistanceStat
self.indexDataStat = indexDataStat
self.weightsObjectiveFunctionType = weightsObjectiveFunctionType
self.indexDataOF = indexDataOF
self.selectionType = selectionType
self.objectiveFunctionType = objectiveFunctionType
self.n = len(self.areas)
self.unassignedAreas = self.areas.keys()
self.assignedAreas = []
self.area2Region = {}
self.region2Area = {}
self.potentialRegions4Area = {}
self.intraBorderingAreas = {}
self.candidateInfo = {}
self.externalNeighs = set([])
self.alpha = alpha
self.numRegionsType = numRegionsType
self.objectiveFunctionTypeDispatcher = objFunctions.objectiveFunctionTypeDispatcher
self.selectionTypeDispatcher = selectionTypeFunctions.selectionTypeDispatcher
self.neighSolutions = {(0,0): 9999}
self.regionMoves = set([])
self.distances = {}
self.NRegion = []
self.N = 0
self.data = {}
self.objInfo = -1
self.assignAreasNoNeighs()
# PREDEFINED NUMBER OF REGIONS
if self.numRegionsType == "Exogenous":
if initialSolution == []:
self.pRegions = pRegions
seeds = self.kmeansInit()
self.setSeeds(seeds)
c = 0
while len(self.unassignedAreas) != 0:
self.constructRegions()
c += 1
self.objInfo = self.getObj()
elif initialSolution != []:
self.pRegions = len(numpy.unique(initialSolution))
seeds = []
for s in numpy.unique(initialSolution):
seeds.append(initialSolution.index(s))
self.setSeeds(seeds)
regions2create = dict.fromkeys(range(len(seeds)))
c = 0
for i in initialSolution:
try:
regions2create[i].append(c)
except:
regions2create[i] = [c]
c += 1
c = 0
for i in regions2create.keys():
self.unassignedAreas = regions2create[i][1:]
while len(self.unassignedAreas) != 0:
self.constructRegions(filteredCandidates=self.unassignedAreas,
filteredReg=i)
c += 1
self.objInfo = self.getObj()
# NUMBER OF REGIONS IS ENDOGENOUS WITH A THRESHOLD VALUE
if self.numRegionsType == "EndogenousThreshold":
self.constructionStage = "growing"
try:
self.areas[areas.keys()[0]].thresholdVar
except:
self.extractThresholdVar()
self.regionalThreshold = threshold
c = 0
self.feasibleRegions = {}
self.regionValue = {}
seeds = []
for aID in self.areas:
if self.areas[aID].thresholdVar >= self.regionalThreshold:
seed = aID
seeds = seeds + [seed]
self.regionValue[c] = self.areas[seed].thresholdVar
self.feasibleRegions[c] = [seed]
self.removeRegionAsCandidate()
c += 1
self.setSeeds(seeds)
while len(self.unassignedAreas) != 0:
numpy.random.shuffle(self.unassignedAreas)
vals = []
for index in self.unassignedAreas:
vals += [self.areas[index].thresholdVar]
seed = self.unassignedAreas[0]
self.setSeeds([seed], c)
self.regionValue[c] = self.areas[seed].thresholdVar
if self.regionValue[c] >= self.regionalThreshold:
self.feasibleRegions[c] = [seed]
self.removeRegionAsCandidate()
c += 1
else:
feasibleThreshold = 1
while self.regionValue[c] < self.regionalThreshold:
self.addedArea = -1
try:
self.constructRegions()
self.regionValue[c] += self.areas[self.addedArea].thresholdVar
except:
feasibleThreshold = 0
break
if feasibleThreshold == 1:
self.feasibleRegions[c] = self.region2Area[c]
self.removeRegionAsCandidate()
c += 1
# NUMBER OF REGIONS IS ENDOGENOUS WITH A RANGE VALUE
if self.numRegionsType == "EndogenousRange":
self.constructionStage = "growing" # there are two values for constructionStage: "growing" and "enclaves"
try:
self.areas[areas.keys()[0]].thresholdVar
except:
self.extractThresholdVar()
self.regionalThreshold = threshold
c = 0
self.feasibleRegions = {}
while len(self.unassignedAreas) != 0:
# select seed
numpy.random.shuffle(self.unassignedAreas)
seed = self.unassignedAreas[0]
self.setSeeds([seed],c)
# regionRange contains the current range per region
# regionalThreshold is the predefined threshold value
self.regionRange = {}
maxValue = self.areas[seed].thresholdVar
minValue = self.areas[seed].thresholdVar
currentRange = maxValue - minValue
self.regionRange[c] = currentRange
# grow region if possible
stop = 0
while stop == 0:
upplim = maxValue + self.regionalThreshold - currentRange
lowlim = minValue - self.regionalThreshold + currentRange
feasibleNeigh = 0
toRemove = []
for ext in self.externalNeighs:
if self.areas[ext].thresholdVar <= upplim and self.areas[ext].thresholdVar >= lowlim:
feasibleNeigh = 1
if self.areas[ext].thresholdVar > upplim or self.areas[ext].thresholdVar < lowlim:
toRemove.append(ext)
self.toRemove = toRemove
if feasibleNeigh == 0:
stop = 1
if feasibleNeigh == 1:
try:
self.constructRegions()
if self.areas[self.addedArea].thresholdVar > maxValue:
maxValue = self.areas[self.addedArea].thresholdVar
if self.areas[self.addedArea].thresholdVar < minValue:
minValue = self.areas[self.addedArea].thresholdVar
currentRange = maxValue - minValue
self.regionRange[c] = currentRange
except:
stop = 1
self.feasibleRegions[c] = self.region2Area[c]
self.removeRegionAsCandidate()
c += 1
self.getIntraBorderingAreas()
def kmeansInit(self):
y = self.am.y
n = len(y)
distances = numpy.ones(n)
total = sum(distances)
probabilities = map(lambda x: x / float(total), distances)
seeds = []
for k in range(self.pRegions):
random = numpy.random.uniform(0, 1)
find = False
acum = 0
cont = 0
while find == False:
inf = acum
sup = acum + probabilities[cont]
if inf <= random <= sup:
find = True
seeds += [cont]
for area in self.am.areas:
distancei = min(map(lambda x: self.am.areas[area].returnDistance2Area(self.am.areas[x], distanceType=self.distanceType), seeds))
distances[area] = distancei
total = sum(distances)
probabilities = map(lambda x: x / float(total), distances)
else:
cont += 1
acum = sup
return seeds
def extractThresholdVar(self):
"""
Separate aggregation variables (data) from the variable selected
to satisfy a threshold value (thresholdVar)
"""
self.totalThresholdVar = 0.0
for areaId in self.areas.keys():
self.areas[areaId].thresholdVar = self.areas[areaId].data[-1]
self.areas[areaId].data = self.areas[areaId].data[0: -1]
self.totalThresholdVar += self.areas[areaId].thresholdVar
def removeRegionAsCandidate(self):
"""
Remove a region from candidates
"""
for i in self.candidateInfo.keys():
a, r = i
if r in self.feasibleRegions:
self.candidateInfo.pop(i)
def returnRegions(self):
"""
Return regions created
"""
areasId = self.area2Region.keys()
areasId = numpy.sort(areasId).tolist()
return [self.area2Region[area] for area in areasId]
def resetNow(self):
"""
Reset all variables
"""
self.unassignedAreas = self.areas.keys()
self.assignedAreas = []
self.area2Region = {}
self.region2Area = {}
self.potentialRegions4Area = {}
self.intraBorderingAreas = {}
self.candidateInfo = {}
self.externalNeighs = set([])
self.neighsMinusAssigned = set([])
def setSeeds(self, seeds, c=0):
"""
Sets the initial seeds for clustering
"""
if self.numRegionsType == "Exogenous" and len(seeds) <= self.pRegions:
idx = range(self.n)
didx = list((set(idx) - set(seeds)) - self.am.noNeighs)
numpy.random.shuffle(didx)
self.seeds = seeds + didx[0:(self.pRegions - len(seeds))]
else:
self.seeds = seeds
for seed in self.seeds:
self.NRegion += [0]
self.assignSeeds(seed, c)
c += 1
def assignAreaStep1(self, areaID, regionID):
"""
Assgin an area to a region
"""
a = self.areas[areaID]
neighs = a.neighs
try:
self.region2Area[regionID].append(areaID)
if self.objectiveFunctionType == "GWalt":
try:
self.NRegion[regionID] += a.data[0]
for index in range(1,len(a.data)):
self.data[regionID][index - 1] += a.data[index] * a.data[0]
except:
self.NRegion[regionID] = a.data[0]
for index in range(1, len(a.data)):
self.data[regionID][index - 1] = a.data[index] * a.data[0]
self.N += a.data[0]
except:
self.region2Area[regionID] = [areaID]
if self.objectiveFunctionType == "GWalt":
self.NRegion[regionID] = a.data[0]
for index in range(1, len(a.data)):
if index == 1:
self.data[regionID] = [a.data[index] * a.data[0]]
else:
self.data[regionID] += [a.data[index] * a.data[0]]
self.N += a.data[0]
self.area2Region[areaID] = regionID
try:
aid = self.unassignedAreas.remove(areaID)
except:
pass
self.assignedAreas.append(areaID)
setNeighs = set(neighs)
setAssigned = set(self.assignedAreas)
self.oldExternal = self.externalNeighs
self.externalNeighs = (self.externalNeighs | setNeighs) - setAssigned
self.newExternal = self.externalNeighs - self.oldExternal
self.neighsMinusAssigned = setNeighs - setAssigned
def assignSeeds(self, areaID, regionID):
"""
Assign an area to a region and updates potential regions for the neighs
Parameters
"""
self.assignAreaStep1(areaID, regionID)
for neigh in self.neighsMinusAssigned:
try:
self.potentialRegions4Area[neigh] = self.potentialRegions4Area[neigh]|set([regionID])
except:
self.potentialRegions4Area[neigh] = set([regionID])
try:
self.potentialRegions4Area.pop(areaID)
except:
pass
self.changedRegion = 'null'
self.newExternal = self.potentialRegions4Area.keys()
def assignAreasNoNeighs(self):
"""
Assign to the region "-1" for the areas without neighbours
"""
noNeighs = list(self.am.noNeighs)
nr = -1
for areaID in noNeighs:
self.area2Region[areaID] = nr
try:
aid = self.unassignedAreas.remove(areaID)
except:
pass
self.assignedAreas.append(areaID)
setAssigned = set(self.assignedAreas)
nr = nr - 1
def assignArea(self, areaID, regionID):
"""
Assign an area to a region and updates potential regions for neighs
"""
self.changedRegion = regionID
self.addedArea = areaID
self.assignAreaStep1(areaID, regionID)
for neigh in self.neighsMinusAssigned:
try:
self.potentialRegions4Area[neigh] = self.potentialRegions4Area[neigh]|set([regionID])
except:
self.potentialRegions4Area[neigh] = set([regionID])
try:
self.potentialRegions4Area.pop(areaID)
except:
pass
def returnBorderingAreas(self, regionID):
"""
Returns bordering areas of a region
"""
areas2Eval = self.returnRegion2Area(regionID)
borderingAreas = set([])
for area in areas2Eval:
try:
if len(self.intraBorderingAreas[area]) > 0:
borderingAreas = borderingAreas | set([area])
except:
pass
return borderingAreas
def returnIntraBorderingAreas(self):
"""
Returns intrabordering areas
"""
return self.intraBorderingAreas
def getIntraBorderingAreas(self):
"""
Gets the intrabordering areas
"""
self.intraBorderingAreas = {}
if self.numRegionsType == "Exogenous":
nr = range(self.pRegions)
else:
nr = self.feasibleRegions
for regionID in nr:
setNeighsNoRegion = set([])
try:
areas2Eval = self.region2Area[regionID]
except:
areas2Eval = []
for area in areas2Eval:
setNeighsNoRegion = setNeighsNoRegion | (set(self.areas[area].neighs) - set(areas2Eval))
for neigh in list(setNeighsNoRegion):
try:
self.intraBorderingAreas[neigh]=self.intraBorderingAreas[neigh]|set([regionID])
except:
self.intraBorderingAreas[neigh]=set([regionID])
def returnRegion2Area(self, regionID):
"""
Return the areas of a region
"""
return self.region2Area[regionID]
def constructRegions(self, filteredCandidates=-99, filteredReg=-99):
"""
Construct potential regions per area
"""
lastRegion = 0
for areaID in self.potentialRegions4Area.keys():
a = self.areas[areaID]
regionIDs = list(self.potentialRegions4Area[areaID])
for region in regionIDs:
if (self.numRegionsType != "Exogenous" and self.constructionStage == "growing"
and region in self.feasibleRegions):
# once a region reaches the threshold its grow is rejected until the
# assignation of enclaves
pass
else:
if filteredCandidates == -99:
if areaID not in self.newExternal and region != self.changedRegion:
lastRegion = region
pass
else:
if self.selectionType != "FullRandom":
areasIdsIn = self.region2Area[region]
areasInNow = [ self.areas[aID] for aID in areasIdsIn ]
regionDistance = self.am.getDistance2Region(self.areas[areaID], self.region2Area[region],
distanceStat = self.distanceStat, weights = self.weightsDistanceStat,
indexData = self.indexDataStat)
else:
regionDistance = 0.0
self.candidateInfo[(areaID, region)] = regionDistance
elif filteredCandidates != -99 and areaID in filteredCandidates and region == filteredReg:
areasIdsIn = self.region2Area[region]
areasInNow = [ self.areas[aID] for aID in areasIdsIn ]
regionDistance = self.am.getDistance2Region(self.areas[areaID], self.region2Area[region],
distanceStat = self.distanceStat, weights = self.weightsDistanceStat,
indexData = self.indexDataStat)
self.candidateInfo[(areaID,region)] = regionDistance
else:
pass
if len(self.candidateInfo) == 0:
self.changedRegion = lastRegion
if self.numRegionsType == "EndogenousRange":
self.filterCandidate(self.toRemove)
self.selectionTypeDispatcher[self.selectionType](self)
def filterCandidate(self,removeCandidate=[]):
"""
Filter candidates
"""
if len(removeCandidate) > 0:
toRemove = []
for id in removeCandidate:
for cand,reg in self.candidateInfo.keys():
if cand == id:
toRemove.append((cand,reg))
for remov in toRemove:
self.candidateInfo.pop(remov)
def graspList(self, xList, alpha=0.0):
"""
Return random index of values with specified range.
"""
maxX = max(xList)
minX = min(xList)
xRangeMax = minX + ((maxX - minX) * alpha)
candidates = [i <= xRangeMax for i in xList]
indices = indexMultiple(candidates, 1)
nCandidates = len(indices)
idx = range(nCandidates)
numpy.random.shuffle(idx)
random = idx[0]
index4Grasp = indices[random]
return index4Grasp
def getObjective(self, region2AreaDict):
"""
Return the value of the objective function from regions to area dictionary
"""
if (type(self.objectiveFunctionType) == type('objectiveFunctionType')):
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF
return self.objectiveFunctionTypeDispatcher[self.objectiveFunctionType](self, region2AreaDict, indexData)
else:
distance = 0.0
i = 0
for oFT in self.objectiveFunctionType:
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF[i]
if len(self.weightsObjectiveFunctionType) > 0:
distance += self.weightsObjectiveFunctionType[i] * self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData)
else:
distance += self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData)
i += 1
return distance
def getObjectiveFast(self, region2AreaDict, modifiedRegions=[]):
"""
Return the value of the objective function from regions to area dictionary
"""
if (type(self.objectiveFunctionType) == type('objectiveFunctionType')):
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF
return self.objectiveFunctionTypeDispatcher[self.objectiveFunctionType+'f'](self, region2AreaDict, modifiedRegions, indexData)
else:
distance = 0.0
i = 0
for oFT in self.objectiveFunctionType:
if len(self.indexDataOF) == 0:
indexData = range(len(self.areas[0].data))
else:
indexData = self.indexDataOF[i]
if len(self.weightsObjectiveFunctionType) > 0:
distance += self.weightsObjectiveFunctionType[i] * self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData)
else:
distance += self.objectiveFunctionTypeDispatcher[oFT](self, region2AreaDict, indexData)
i += 1
return distance
def getLambda(self):
"""
# FIXME: No se que hace
"""
L = numpy.matrix(numpy.identity(self.pRegions))
for r in range(self.pRegions):
L[r, r] = 1.0 * self.NRegion[r] / self.N
return L
def getB(self):
"""
Return matrix of parameters of all regions
"""
B = numpy.matrix(numpy.zeros(len(self.data[0]) * self.pRegions)).T
index = 0
for r in range(self.pRegions):
for i in range(len(self.data[0])):
B[index, 0] = self.data[r][i] / self.NRegion[r]
index += 1
return B
def getY(self):
"""
Return matrix of the average variance-covariance of all regions
"""
Y = numpy.matrix(numpy.identity(len(self.data[0])))
centroids = {}
for r in range(self.pRegions):
centroids[r] = calculateCentroid([self.areas[aID] for aID in self.region2Area[r]])
for r in range(self.pRegions):
Y += centroids[r].var * numpy.power(self.NRegion[r] / self.N, 2)
return Y
def getH(self):
"""
Return composite matrix
"""
E = numpy.matrix(numpy.ones((1, self.pRegions, self.pRegions)))
L = self.getLambda()
H = L - L * E * L
return H
def getObj(self):
"""
Return the value of the objective function
"""
if self.objInfo < 0:
self.calcObj()
return self.objInfo
def calcObj(self):
"""
Calculate the value of the objective function
"""
self.objInfo = self.getObjective(self.region2Area)
def recalcObj(self, region2AreaDict, modifiedRegions=[]):
"""
Re-calculate the value of the objective function
"""
if "objDict" in dir(self):
obj = self.getObjectiveFast(region2AreaDict, modifiedRegions)
else:
obj = self.getObjective(region2AreaDict)
return obj
def checkFeasibility(self, regionID, areaID, region2AreaDict):
"""
Check feasibility from a change region (remove an area from a region)
"""
areas2Eval = list(region2AreaDict[regionID])
areas2Eval.remove(areaID)
seedArea = areas2Eval[0]
newRegion = (set([seedArea]) | set(self.areas[seedArea].neighs)) & set(areas2Eval)
areas2Eval.remove(seedArea)
flag = 1
newAdded = newRegion - set([seedArea])
newNeighs = set([])
while flag:
for area in newAdded:
newNeighs = newNeighs | (((set(self.areas[area].neighs) &
set(region2AreaDict[regionID])) -
set([areaID])) - newRegion)
areas2Eval.remove(area)
newNeighs = newNeighs - newAdded
newAdded = newNeighs
newRegion = newRegion | newAdded
if len(areas2Eval) == 0:
feasible = 1
flag = 0
break
elif newNeighs == set([]) and len(areas2Eval) > 0:
feasible = 0
flag = 0
break
return feasible
def calculateRegionValueThreshold(self):
"""
# FIXME: No se que hace
"""
if self.numRegionsType == "Exogenous":
nr = range(self.pRegions)
else:
nr = range(len(self.region2Area.keys()))
for regionID in nr:
self.regionValue[regionID] = 0
areas2Eval = self.region2Area[regionID]
for area in areas2Eval:
self.regionValue[regionID] += self.areas[area].thresholdVar
def improvingCandidates(self):
"""
Select solutions that improve the current objective function.
"""
intraCopy = copy.deepcopy(self.intraBorderingAreas)
region2AreaCopy = copy.deepcopy(self.region2Area)
area2RegionCopy = copy.deepcopy(self.area2Region)
self.neighSolutions = {}
for area in intraCopy.keys():
regionIn = self.area2Region[area]
regions4Move = list(self.intraBorderingAreas[area])
if (len(self.region2Area[regionIn]) > 1):
for region in regions4Move:
self.swapArea(area, region, region2AreaCopy, area2RegionCopy)
obj = self.recalcObj(region2AreaCopy)
self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy)
if obj < self.objInfo:
f = self.checkFeasibility(regionIn, area, self.region2Area)
if f == 1:
if self.numRegionsType == "Exogenous":
self.neighSolutions[(area, region)] = obj
elif self.numRegionsType == "EndogenousThreshold":
if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold:
self.neighSolutions[(area,region)] = obj
def allCandidates(self):
"""
Select neighboring solutions.
"""
intraCopy = copy.deepcopy(self.intraBorderingAreas)
region2AreaCopy = copy.deepcopy(self.region2Area)
area2RegionCopy = copy.deepcopy(self.area2Region)
self.neighSolutions = {}
for area in intraCopy.keys():
regionIn = self.area2Region[area]
regions4Move = list(self.intraBorderingAreas[area])
if (len(self.region2Area[regionIn]) > 1):
for region in regions4Move:
f = self.checkFeasibility(regionIn, area, self.region2Area)
if f == 1:
if self.numRegionsType == "Exogenous":
self.swapArea(area, region, region2AreaCopy, area2RegionCopy)
modifiedRegions = [region,regionIn]
obj = self.recalcObj(region2AreaCopy, modifiedRegions)
self.neighSolutions[(area,region)] = obj
self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy)
elif self.numRegionsType == "EndogenousThreshold":
self.swapArea(area, region, region2AreaCopy, area2RegionCopy)
if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold:
obj = self.recalcObj(region2AreaCopy)
self.neighSolutions[(area, region)] = obj
self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy)
def allMoves(self):
"""
Select all posible moves.
"""
moves = []
for area in self.intraBorderingAreas:
regionIn = self.area2Region[area]
regions4Move = list(self.intraBorderingAreas[area])
if len(self.region2Area[regionIn]) > 1:
for region in regions4Move:
moves = moves + [(area, region)]
return moves
def swapArea(self, area, newRegion, region2AreaDict, area2RegionDict):
"""
Removed an area from a region and appended it to another one
"""
oldRegion = area2RegionDict[area]
region2AreaDict[oldRegion].remove(area)
region2AreaDict[newRegion].append(area)
area2RegionDict[area] = newRegion
if self.objectiveFunctionType == "GWalt":
a = self.areas[area]
self.NRegion[newRegion] += a.data[0]
self.NRegion[oldRegion] -= a.data[0]
for index in range(1, len(a.data)):
self.data[newRegion][index - 1] += a.data[index] * a.data[0]
for index in range(1, len(a.data)):
self.data[oldRegion][index-1] -= a.data[index] * a.data[0]
if self.numRegionsType == "EndogenousThreshold":
self.regionValue[newRegion] += self.areas[area].thresholdVar
self.regionValue[oldRegion] -= self.areas[area].thresholdVar
def greedyMove(self, typeGreedy="random"):
"""
Conduct a solution to the best posible with greedy moves
"""
flag = 1
self.round = 0
while flag:
self.improvingCandidates()
self.round = 1
if len(self.neighSolutions.keys()) == 0:
flag = 0
else:
if typeGreedy == "exact":
sorted = sortedKeys(self.neighSolutions)
move = sorted[numpy.random.randint(0, len(sorted))]
area, region = move
else:
values = self.neighSolutions.values()
sorted = sortedKeys(self.neighSolutions)
minVal = min(self.neighSolutions.values())
indicesMin = indexMultiple(values, minVal)
nInd = len(indicesMin)
idx = range(nInd)
numpy.random.shuffle(idx)
minIndex = indicesMin[idx[0]]
area,region = self.neighSolutions.keys()[minIndex]
self.moveArea(area, region)
# self.objInfo = minVal
self.regions = self.returnRegions()
def updateTabuList(self,newValue,aList,endInd):
"""
Add a new value to the tabu list.
"""
return [newValue] + aList[0:endInd-1]
def tabuMove(self, tabuLength=5, convTabu=5, typeTabu="exact"):
"""
Conduct a solution to the best posible with tabu search
"""
aspireOBJ = self.objInfo
currentOBJ = self.objInfo
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
currentRegions = aspireRegions
bestAdmisable = 9999999.0
tabuList = numpy.zeros(tabuLength)
tabuList = tabuList.tolist()
cBreak = []
c = 1
self.round = 0
resList = []
epsilon = 1e-10
while c <= convTabu:
# print "regions: ",self.returnRegions(), self.objInfo
if typeTabu == "exact":
self.objDict = objFunctions.makeObjDict(self)
self.allCandidates()
#print "soluciones vecinas",self.neighSolutions
else:
moves = self.allMoves()
if (typeTabu == "exact" and len(self.neighSolutions) == 0) or (typeTabu == "random" and len(moves) == 0):
c += convTabu
else:
if typeTabu == "exact":
sorted = sortedKeys(self.neighSolutions)
end = len(sorted)
else:
end = len(moves)
run = 0
while run < end:
if typeTabu == "exact":
move = sorted[run]
area,region = move
obj4Move = self.neighSolutions[move]
candidate = 1
# print "** selected move (area,region)",move
else:
candidate = 0
region2AreaCopy = copy.deepcopy(self.region2Area)
area2RegionCopy = copy.deepcopy(self.area2Region)
while (candidate == 0 and len(moves) > 0):
move = moves[numpy.random.randint(0, len(moves))]
moves.remove(move)
area, region = move
run += 1
regionIn = self.area2Region[area]
f = self.checkFeasibility(regionIn, area, self.region2Area)
if f == 1:
if self.numRegionsType == "Exogenous":
self.swapArea(area, region, region2AreaCopy, area2RegionCopy)
obj4Move = self.recalcObj(region2AreaCopy)
self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy)
candidate = 1
elif self.numRegionsType == "EndogenousThreshold":
self.swapArea(area, region, region2AreaCopy, area2RegionCopy)
if self.regionValue[region] >= self.regionalThreshold and self.regionValue[regionIn] >= self.regionalThreshold:
obj4Move = self.recalcObj(region2AreaCopy)
candidate = 1
self.swapArea(area, regionIn, region2AreaCopy, area2RegionCopy)
tabuCount = 0
if candidate == 1:
# print "--- tabu List:", tabuList
if move in tabuList:
# print "move is in tabu list"
if (aspireOBJ-obj4Move) > epsilon:
# print "CASE1: improves aspirational: ",aspireOBJ,obj4Move
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
aspireOBJ = obj4Move
currentOBJ = obj4Move
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
currentRegions = aspireRegions
bestAdmisable = obj4Move
cBreak.append(c)
c = 1
run = end
resList.append([obj4Move, aspireOBJ])
else:
# print "CASE 2: does not improve aspirational: ",aspireOBJ,obj4Move
run += 1
tabuCount += 1
tabuList = self.updateTabuList((-1, 0), tabuList, tabuLength)
if tabuCount == end:
c = convTabu
else:
# print "move is NOT in tabu list"
if (aspireOBJ-obj4Move) > epsilon:
# print "CASE 3: improves aspirational: ",aspireOBJ,obj4Move
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
aspireOBJ = obj4Move
currentOBJ = obj4Move
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
currentRegions = aspireRegions
bestAdmisable = obj4Move
cBreak.append(c)
c = 1
run = end
resList.append( [obj4Move, aspireOBJ] )
else:
# print "CASE 4: does not improve aspirational: ",aspireOBJ,obj4Move
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
currentOBJ = obj4Move
currentRegions = self.returnRegions()
bestAdmisable = obj4Move
# cBreak.append(99)
c += 1
run = end
resList.append([obj4Move, aspireOBJ])
else:
c += convTabu
self.objInfo = aspireOBJ
self.regions = aspireRegions
self.region2Area = copy.deepcopy(region2AreaAspire)
self.area2Region = copy.deepcopy(area2RegionAspire)
# print "FINAL SOLUTION IN TABU",self.objInfo,self.regions
self.resList = resList
self.cBreak = cBreak
def AZPImproving(self):
"""
"""
improve = 1
while improve == 1:
regions = range(0, self.pRegions)
while len(regions) > 0:
# step 3
if len(regions) > 1:
randomRegion = numpy.random.randint(0, len(regions))
else:
randomRegion = 0
region = regions[randomRegion]
regions.remove(region)
# step 4
borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region)))
improve = 0
while len(borderingAreas) > 0:
# step 5
randomArea = numpy.random.randint(0, len(borderingAreas))
area = borderingAreas[randomArea]
borderingAreas.remove(area)
posibleMove = list(self.returnIntraBorderingAreas()[area])
if len(self.region2Area[region]) >= 2:
f = self.checkFeasibility(region, area, self.region2Area)
else:
f = 0
if f == 1:
for move in posibleMove:
self.swapArea(area, move, self.region2Area, self.area2Region)
obj = self.recalcObj(self.region2Area)
self.swapArea(area, region, self.region2Area, self.area2Region)
if obj <= self.objInfo:
self.moveArea(area, move)
improve = 1
borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region)))
break
def AZPSA(self, alpha, temperature):
"""
Openshaw's Simulated Annealing for AZP algorithm
"""
totalMoves = 0
acceptedMoves = 0
bestOBJ = self.objInfo
currentOBJ = self.objInfo
bestRegions = self.returnRegions()
currentRegions = self.returnRegions()
region2AreaBest = copy.deepcopy(self.region2Area)
area2RegionBest = copy.deepcopy(self.area2Region)
improve = 1
while improve == 1:
regions = range(0,self.pRegions)
while len(regions) > 0:
# step 3
if len(regions) > 1:
randomRegion = numpy.random.randint(0, len(regions) - 1)
else:
randomRegion = 0
region = regions[randomRegion]
regions.remove(region)
# step 4
borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region)))
improve = 0
while len(borderingAreas) > 0:
# step 5
randomArea = numpy.random.randint(0,len(borderingAreas))
area = borderingAreas[randomArea]
borderingAreas.remove(area)
posibleMove = list(self.returnIntraBorderingAreas()[area])
if len(self.region2Area[region]) >= 2:
f = self.checkFeasibility(region, area, self.region2Area)
else:
f = 0
if f == 1:
for move in posibleMove:
# if len(region2AreaCopy[area2RegionCopy[area]]) > 1:
self.swapArea(area, move, self.region2Area, self.area2Region)
obj = self.recalcObj(self.region2Area)
self.swapArea(area, region, self.region2Area, self.area2Region)
if obj <= bestOBJ:
self.moveArea(area, move)
improve = 1
self.objInfo = obj
bestOBJ = obj
currentOBJ = obj
bestRegions = self.returnRegions()
currentRegions = self.returnRegions()
region2AreaBest = copy.deepcopy(self.region2Area)
area2RegionBest = copy.deepcopy(self.area2Region)
# print "--- Local improvement (area, region)", area, move
# print "--- New Objective Function value: ", obj
# step 4
borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region)))
break
else:
random = numpy.random.rand(1)[0]
totalMoves += 1
if (numpy.exp(-(obj - currentOBJ) / (currentOBJ * temperature))) > random:
acceptedMoves += 1
self.moveArea(area, move)
self.objInfo = obj
currentOBJ = obj
currentRegions = self.returnRegions()
# print "--- NON-improving move (area, region)", area, move
# print "--- New Objective Function value: ", obj
# step 4
borderingAreas = list(set(self.returnBorderingAreas(region)) & set(self.returnRegion2Area(region)))
break
self.objInfo = bestOBJ
self.region2Area = copy.deepcopy(region2AreaBest)
self.area2Region = copy.deepcopy(area2RegionBest)
def AZPTabuMove(self, tabuLength=5, convTabu=5):
"""
Tabu search algorithm for Openshaws AZP-tabu (1995)
"""
aspireOBJ = self.objInfo
currentOBJ = self.objInfo
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
currentRegions = copy.deepcopy(aspireRegions)
tabuList = numpy.zeros(tabuLength)
tabuList = tabuList.tolist()
cBreak = []
c = 1
self.round = 0
resList = []
epsilon = 1e-10
while c <= convTabu:
self.objDict = objFunctions.makeObjDict(self)
self.allCandidates()
if len(self.neighSolutions) == 0:
c += convTabu
else:
minFound = 0
neighSolutionsCopy = copy.deepcopy(self.neighSolutions)
c += 1
neighNoTabuKeys = list(set(neighSolutionsCopy.keys()) - set(tabuList))
neighNoTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighNoTabuKeys)
if len(neighNoTabuDict) > 0:
move = min(neighNoTabuDict, key = lambda x: neighNoTabuDict.get(x))
obj4Move = self.neighSolutions[move]
moveNoTabu = move
obj4MoveNoTabu = obj4Move
if (currentOBJ - obj4Move) >= epsilon:
minFound = 1
else:
neighTabuKeys = list(set(neighSolutionsCopy.keys()) & set(tabuList))
neighTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighTabuKeys)
if len(neighTabuDict) > 0:
move = min(neighTabuDict, key = lambda x: neighTabuDict.get(x))
obj4Move = self.neighSolutions[move]
moveTabu = move
obj4MoveTabu = obj4Move
if (aspireOBJ - obj4Move) > epsilon:
minFound = 1
if minFound == 1:
area, region = move
obj4Move = self.neighSolutions[move]
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
if (aspireOBJ - obj4Move) > epsilon:
aspireOBJ = obj4Move
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
c = 1
currentOBJ = obj4Move
currentRegions = self.returnRegions()
else:
move = moveNoTabu
area, region = move
obj4Move = self.neighSolutions[move]
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
currentOBJ = obj4Move
currentRegions = self.returnRegions()
self.objInfo = aspireOBJ
self.regions = aspireRegions
self.region2Area = copy.deepcopy(region2AreaAspire)
self.area2Region = copy.deepcopy(area2RegionAspire)
self.resList = resList
def reactiveTabuMove(self, convTabu=99):
"""
AZP
Openshaw's Reactive Tabu algorithm
"""
# step 2
tabuLength = 1
tabuList = numpy.zeros(tabuLength)
tabuList = tabuList.tolist()
rAvg = 1
K1 = 3
K2 = 3
visitedSolutions = []
allVisitedSolutions = {}
self.round = 0
epsilon = 1e-10
aspireOBJ = self.objInfo
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
c = 1
while c <= convTabu:
improved = 0
# step 3
self.objDict = objFunctions.makeObjDict(self)
self.allCandidates()
if len(self.neighSolutions) == 0:
c += convTabu
else:
neighSolutionsCopy = copy.deepcopy(self.neighSolutions)
neighNoTabuKeys = list(set(neighSolutionsCopy.keys()) - set(tabuList))
neighNoTabuDict = dict((key, neighSolutionsCopy[key]) for key in neighNoTabuKeys)
# step 4
if len(neighNoTabuDict) > 0:
move = min(neighNoTabuDict, key = lambda x: neighNoTabuDict.get(x))
obj4Move = self.neighSolutions[move]
else:
c += convTabu
break;
# step 5
area, region = move
obj4Move = self.neighSolutions[move]
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area, oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
self.objInfo = obj4Move
# update aspirational
if (aspireOBJ - obj4Move) > epsilon:
aspireOBJ = obj4Move
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
improved = 1
# step 6
currentSystem = self.returnRegions()
nVisits = visitedSolutions.count(currentSystem)
if nVisits == 0:
# zoning system not found (go to step 10)
# step 10
visitedSolutions.append(currentSystem)
# step 7
elif nVisits > K1:
try:
nVisitsAll = allVisitedSolutions[currentSystem]
except:
nVisitsAll = 0
nVisitsAll =+ 1
allVisitedSolutions[currentSystem] = nVisitsAll
if nVisitsAll >= K2:
# go to step 11
# step 11a
visitedSolutions = []
self.objDict = objFunctions.makeObjDict(self)
self.allCandidates()
moveIndex = range(len(self.neighSolutions))
numpy.random.suffle(moveIndex)
for move in moveIndex[0:int(1 + 0.5 * rAvg)]:
area, region = move
obj4Move = self.neighSolutions[move]
oldRegion = self.area2Region[area]
tabuList = self.updateTabuList((area,oldRegion), tabuList, tabuLength)
self.moveArea(area, region)
obj4Move = self.neighSolutions[move]
# update aspirational
if (aspireOBJ-obj4Move) > epsilon:
aspireOBJ = obj4Move
aspireRegions = self.returnRegions()
region2AreaAspire = copy.deepcopy(self.region2Area)
area2RegionAspire = copy.deepcopy(self.area2Region)
improved = 1
# step 8
elif nVisits < K1:
rAvg += 1
tabuLength = 1.1*tabuLength
# step 9
if tabuLength > rAvg:
tabuLength = max(0.9 * tabuLength, 1)
tabuLength = int(round(tabuLength))
# step 10
visitedSolutions.append(currentSystem)
if improved == 1:
c = 1
else:
c += 1
self.objInfo = aspireOBJ
self.regions = aspireRegions
self.region2Area = copy.deepcopy(region2AreaAspire)
self.area2Region = copy.deepcopy(area2RegionAspire)
def moveArea(self, areaID, regionID):
"""
Move an area to a region
"""
oldRegion = self.area2Region[areaID]
self.region2Area[oldRegion].remove(areaID)
self.region2Area[regionID].append(areaID)
self.area2Region[areaID] = regionID
a = self.areas[areaID]
toUpdate = [areaID] + a.neighs
if self.objectiveFunctionType == "GWalt":
self.NRegion[regionID] += a.data[0]
self.NRegion[oldRegion] -= a.data[0]
if self.numRegionsType == "EndogenousThreshold":
self.regionValue[regionID] += self.areas[areaID].thresholdVar
self.regionValue[oldRegion] -= self.areas[areaID].thresholdVar
try:
for index in range(1, len(a.data)):
self.data[regionID][index - 1] += a.data[index] * a.data[0]
for index in range(1, len(a.data)):
self.data[oldRegion][index - 1] -= a.data[index] *a.data[0]
except:
pass
for area in toUpdate:
regionIn = self.area2Region[area]
areasIdsIn = self.region2Area[regionIn]
areasInNow = [self.areas[aID] for aID in areasIdsIn]
areasInRegion = set(areasIdsIn)
aNeighs = set(self.areas[area].neighs)
neighsInOther = aNeighs - areasInRegion
if len(neighsInOther) == 0 and area in self.intraBorderingAreas:
self.intraBorderingAreas.pop(area)
else:
borderRegions = set([])
for neigh in neighsInOther:
borderRegions = borderRegions | set([self.area2Region[neigh]])
if area in self.intraBorderingAreas:
self.intraBorderingAreas.pop(area)
self.intraBorderingAreas[area] = borderRegions
self.calcObj()
def recoverFromExtendedMemory(self, extendedMemory):
"""
Recover a solution form the extended memory
"""
self.objInfo = extendedMemory.objInfo
self.area2Region = extendedMemory.area2Region
self.region2Area = extendedMemory.region2Area
self.intraBorderingAreas = extendedMemory.intraBorderingAreas
def getSeeds(self):
"""
Return the seeds of the solution
"""
return self.seeds;
def indexMultiple(x,value):
"""
Return indexes in x with multiple values.
"""
return [ i[0] for i in enumerate(x) if i[1] == value ]
def calculateGetisG(keyList, dataMean, dataStd, dataDictionary, dataLength):
"""
This function returns the local G statistic a given region.
"""
sum = 0
for i in keyList:
sum = sum + numpy.double((dataDictionary[i]))
neighborNumber = len(keyList)
numerator = sum - dataMean * neighborNumber
denominator = dataStd * ((float(dataLength * neighborNumber - (neighborNumber ** 2)) / (dataLength - 1)) ** 0.5)
# denominator = (dataStd*((dataLength*neighborNumber-(neighborNumber**2))/(dataLength-1))**0.5)
G = numerator / denominator
return G
def quickSortIntersection(dataList, keyList, discardList):
"""
quickSortIntersection recursively sorts the list of values usinga
quick sort algorithm.
"""
if len(keyList) <= 1:
return keyList
else:
lessData = []
lessKey = []
moreData = []
moreKey = []
pivot = dataList[-1]
kpivot = keyList[-1]
for i in range(len(dataList) - 1):
if keyList[i] not in discardList:
if dataList[i] <= pivot:
lessData.append(dataList[i])
lessKey.append(keyList[i])
else:
moreData.append(dataList[i])
moreKey.append(keyList[i])
return quickSortIntersection(lessData, lessKey, discardList) + [kpivot] + quickSortIntersection(moreData, moreKey, discardList)
def quickSort2(keys, y):
"""
quickSortIntersection recursively sorts the list of values using a
quick sort algorithm.
"""
if len(keys) <= 1:
return keys
else:
lessData = []
lessKey = []
moreData = []
moreKey = []
pivot = y[keys[-1]]
kpivot = keys[-1]
keys=keys[0: -1]
for i in keys:
if y[i] <= pivot:
lessKey.append(i)
else:
moreKey.append(i)
return quickSort2(lessKey, y) + [kpivot] + quickSort2(moreKey, y)
def neighborSort(dictionary, discardList):
"""
Returns the list of keys of a dictionary sorted by the
values that are assigned by them.
"""
dataList = dictionary.values()
keyList = dictionary.keys()
return quickSortIntersection(dataList, keyList, discardList)
def vectorDistance(v1, v2):
"""
this function calculates de euclidean distance between two
vectors.
"""
sum = 0
for i in range(len(v1)):
sum += (v1[i] - v2[i]) ** 2
return sum ** 0.5
# INTERNOS
def calculateCentroid(areaList):
"""
This function return the centroid of an area list
"""
pg = 0.0
pk = []
centroid = AreaCl(0, [], [])
for area in areaList:
pg += area.data[0]
pk = pk + [area.data[0]]
pkPg = numpy.matrix(pk).T / pg
data = [0.0] * len(area.data)
var = numpy.matrix(areaList[0].var) * 0.0
j = 0
for area in areaList:
var += area.var * pow(pkPg[j, 0], 2)
for i in range(len(area.data)):
data[i] += area.data[i] * pkPg[j, 0]
j += 1
centroid.data = data
centroid.var = var
return centroid
def factorial(n):
"""
Returns the factorial of a number.
"""
fact = 1.0
if n > 1:
fact = n * factorial(n - 1)
return fact
def comb(n, m):
"""
This function calculates the number of possible combinations of n items
chosen by m.
"""
return factorial(n) / (factorial(m) * factorial(n - m))
def recode(X):
"""
Tranform a list with regions begining in x to a lis begining in 0.
"""
XP = X + []
assigned = []
r = 0
for i in range(len(X)):
if (i not in assigned):
XP[i] = r
for j in range(len(X) - i - 1):
k = i + j + 1
if (k not in assigned):
if X[k] == X[i]:
XP[k] = r
assigned = assigned + [k]
r = r + 1
return XP
def sortedKeys(d):
"""
Return keys of the dictionary d sorted based on their values.
"""
values = d.values()
sortedIndices = numpy.argsort(values)
sortedKeys = [d.keys()[i] for i in sortedIndices]
minVal = min(values)
countMin = values.count(minVal)
if countMin > 1:
minIndices = sortedKeys[0: countMin]
nInd = len(minIndices)
idx = range(nInd)
numpy.random.shuffle(idx)
permMins = idx
c = 0
for i in range(nInd):
place = permMins[c]
sortedKeys[c] = minIndices[place]
c += 1
return sortedKeys
def feasibleRegion(feasDict):
"""
Return if a list of areas are connected
"""
areas2Eval = []
areas = {}
for key in feasDict.keys():
try:
neighbours = feasDict[key]
except:
neighbours = {}
a = AreaCl(key, neighbours, [])
areas[key] = a
areas2Eval = areas2Eval + [key]
feasible = 1
newRegion = set([])
for area in areas2Eval:
newRegion = newRegion | (set(areas[area].neighs) & set(areas2Eval))
if set(areas2Eval) - newRegion != set([]):
feasible = 0
return feasible
class AreaCl:
"""
Area Class for Regional Clustering.
"""
def __init__(self, id, neighs, data, variance="false"):
"""
@type id: integer
@param id: Id of the polygon/area
@type neighs: list
@param neighs: Neighborhood ids
@type data: list.
@param data: Data releated to the area.
@type variance: boolean
@keyword variance: Boolean indicating if the data have variance matrix
"""
self.id = id
self.neighs = neighs
if variance == "false":
self.data = data
else:
n = (numpy.sqrt(9 + 8 * (len(data) - 1)) - 3) / 2
self.var = numpy.matrix(numpy.identity(n))
index = n + 1
for i in range(int(n)):
for j in range(i + 1):
self.var[i, j] = data[int(index)]
self.var[j, i] = data[int(index)]
index += 1
self.data = data[0: int(n + 1)]
def returnDistance2Area(self, otherArea, distanceType="EuclideanSquared", indexData=[]):
"""
Return the distance between the area and other area
"""
if len(indexData) == 0:
indexData = range(len(self.data))
y0 = []
y1 = []
for index in indexData:
y0 += [self.data[index]]
y1 += [otherArea.data[index]]
data = numpy.concatenate(([y0], [y1]))
areaDistance = distanceFunctions.distMethods[distanceType](data)
try:
dist = areaDistance[0][0]
except:
dist = areaDistance[0]
return dist
class somManager():
"""SOM Manager object
"""
def __init__(self,
data,
iters,
outputLayer,
alphaType,
initialDistribution,
BMUContiguity):
"""This class control all the SOM neural network structure.
It's the repository of the output layer and the solution
generator
@type data: dictionary
@param data: Input layer data
@type iters: integer
@param iters: Number of iterations
@type outputLayer: Layer
@param outputLayer: Output Layer object
@type alphaType: string
@param alphaType: Type of learning rate
@type initialDistribution: string
@param initialDistribution: Neural units initial distribution
@type BMUContiguity: string
@param BMUContiguity: Contiguity criterion
"""
self.alphaType = alphaType
self.data = data
nv = len(data[0])
self.iters = iters
self.outputLayer = outputLayer
# Initializing neural weights
self.outputLayer.generateData(initialDistribution, 'rook', nv, 0, 1)
dataNames = self.outputLayer.fieldNames[-1 * nv:]
self.actualData = outputLayer.getVars(*dataNames)
# initializing empty clusters
self.emptyClusters = {}
for i in range(len(self.outputLayer.areas)):
self.emptyClusters[i] = []
# initializing feasibles BMU
self.feasibleBMU = {}
for i in self.data.keys():
self.feasibleBMU = outputLayer.Y.keys()
# initializing contiguities
if BMUContiguity == 'rook':
self.outputContiguity = self.outputLayer.Wrook
elif BMUContiguity == 'queen':
self.outputContiguity = self.outputLayer.Wqueen
elif BMUContiguity == 'custom':
self.outputContiguity = self.outputLayer.Wcustom
elif BMUContiguity == 'all':
for i in self.data.Y.keys():
self.BMUContiguity[i] = self.data.Y.keys()
else:
raise NameError('Invalid contiguity Type')
# defining areas order
self.order = self.data.keys()
self.solutionsInput = {}
def __alpha(self, value):
"""
Decreasing scalar-valued function used to update
the neural network weights on a specific itereations.
"""
if self.alphaType == 'linear':
return (1 - float(value) / self.iters)
elif self.alphaType == 'quadratic':
return -1 * (float(value) / self.iters) ** 2 + 1
else:
raise NameError('Invalid deacrising function type')
def findBMU(self, areaId):
"""
Find the most similar neural weight, usally called on the
literature such as Best Matching Unit (BMU)
"""
inputY = self.data[areaId]
min = vectorDistance(inputY,
self.actualData[self.feasibleBMU[0]])
bmu = 0
for i in self.feasibleBMU[1:]:
dist = vectorDistance(inputY, self.actualData[i])
if dist < min:
min = dist
bmu = i
return bmu
def modifyUnits(self, bmu, areaId, iter):
"""
Updates the BMU neighborhod
"""
inputY = self.data[areaId]
for i in self.outputContiguity[bmu] + [bmu]:
dist = numpy.array(inputY) - numpy.array(self.actualData[i])
alph = self.__alpha(iter)
self.actualData[i] = list(numpy.array(self.actualData[i]) \
+ alph * dist)
def addSolution(self, iter):
"""
Manage the solutions of each iteration
"""
solution = {}
self.outputLayer.fieldNames += ['iter' + str(iter)]
for i in self.clusters:
self.outputLayer.Y[i] += [len(self.clusters[i])]
for j in self.clusters[i]:
if self.solutionsInput.has_key(j):
self.solutionsInput[j] += [i]
else:
self.solutionsInput[j] = [i]
solution[j] = i
return solution.values()
def compressSolution(self, solution):
"""
Standarize the not sorted solution.
"""
count = 0
order = list(set(solution))
order.sort()
sol = [order.index(x) for x in solution]
return sol
class geoSomManager(somManager):
"""Geo-SOM Manager object
"""
def __init__(self,data,
iters,
outputLayer,
alphaType,
initialDistribution,
BMUContiguity,
iCentroids,
oCentroids):
"""
This class control all the geoSOM neural network structure.
Aditionally it's the repository of the output layer and the
solution generator.
@type data: dictionary
@param data: Input layer data
@type iters: integer
@param iters: Number of iterations
@type outputLayer: Layer
@param outputLayer: Output Layer object
@type alphaType: string
@param alphaType: Type of learning rate
@type initialDistribution: string
@param initialDistribution: Neural units initial distribution
@type BMUContiguity: string
@param BMUContiguity: Contiguity criterion
@type iCentroids: dictionary
@param iCentroids: Centroid coordinates for the input Layer areas.
@type oCentroids: dictionary
@param oCentroids: Centroid coordinates for the output Layer areas.
"""
somManager.__init__(self,data,
iters,
outputLayer,
alphaType,
initialDistribution,
BMUContiguity)
self.iCentroids=iCentroids
self.oCentroids=oCentroids
self.geoWinner, self.feasibleBMU=self.defGeoWinnerAttributes()
def defGeoWinnerAttributes(self):
"""
This function define de geoWinners for all the input areas
"""
geoWinner = {}
feasibleBMU = {}
for c in self.iCentroids:
bestOIndex = 0
minDistance = vectorDistance(self.iCentroids[c], self.oCentroids[0])
outputContiguity = self.outputContiguity[0]
for o in self.oCentroids:
dis = vectorDistance(self.iCentroids[c], self.oCentroids[o])
if dis < minDistance:
minDistance = dis
bestOIndex = o
outputContiguity = self.outputContiguity[o] + [o]
geoWinner[c] = bestOIndex
feasibleBMU[c] = outputContiguity
return geoWinner, feasibleBMU
def findBMU(self, areaId):
"""
Finds the most similar neural network weight, usally called on the
literature such as Best Matching Unit (BMU)
"""
inputY = self.data[areaId]
feasibleBMU = self.feasibleBMU[areaId]
min = vectorDistance(inputY,
self.actualData[feasibleBMU[0]])
bmu = feasibleBMU[0]
for i in feasibleBMU:
dist = vectorDistance(inputY, self.actualData[i])
if dist < min:
min = dist
bmu = i
return bmu
|
tectronics/clusterpy
|
clusterpy/core/toolboxes/cluster/componentsAlg/__init__.py
|
Python
|
bsd-3-clause
| 83,109
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ..._endpoint_base import EndpointBase
from ..._utils import send_session_request
class _PermissionsMixin(EndpointBase):
####################
## PUBLIC METHODS ##
####################
def add_permission(self, principal):
"""
Adds a user/role to the allowed principals list for an endpoint.
"""
r = self._create_operation_request(self._url_full, "permissions/add")
r.data = {"principal": principal, "isAllowed": True}
send_session_request(self._session, r).json()
def get_allowed_principals(self):
"""
Gets a list of the allowed principals on the endpoint.
"""
r = self._create_operation_request(self._url_full, "permissions")
response = send_session_request(self._session, r).json()
return [p["principal"] for p in response["permissions"]]
def remove_permission(self, principal):
"""
Removes a user/role from the allowed principals list for an endpoint.
"""
r = self._create_operation_request(self._url_full, "permissions/add")
r.data = {"principal": principal, "isAllowed": False}
send_session_request(self._session, r).json()
|
DavidWhittingham/agsadmin
|
agsadmin/rest_admin/services/_permissions_mixin.py
|
Python
|
bsd-3-clause
| 1,441
|
import os
import numpy as np
import Bio.Seq
import mirnylib.genome
NUM_READS = 10000
READ_LENGTH = 75
MOLECULE_LENGTH = 400
NOISE = 0.02
RANDOM_JOINT = True
STD_HEADER = 'SRR000000.{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}'
genome_db = mirnylib.genome.Genome('../../fasta/sacCer3', readChrms=['#'])
if not os.path.exists('tmp'):
os.mkdir('tmp')
def noise_seq(seq, noise):
for i in range(len(seq)):
if np.random.random() < noise:
seq = seq[:i] + 'ATGC'[np.random.randint(4)] + seq[i+1:]
return seq
fastq1 = open('./tmp/insilico_1.fastq', 'w')
fastq2 = open('./tmp/insilico_2.fastq', 'w')
for i in range(NUM_READS):
chr1 = np.random.randint(16)
pos1 = np.random.randint(MOLECULE_LENGTH,
genome_db.chrmLens[chr1] - MOLECULE_LENGTH)
if RANDOM_JOINT:
len1 = np.random.randint(MOLECULE_LENGTH)
else:
len1 = MOLECULE_LENGTH / 2
seq1 = str(genome_db.seqs[chr1][pos1:pos1+len1].seq)
seq1 = noise_seq(seq1, NOISE)
chr2 = np.random.randint(16)
pos2 = np.random.randint(MOLECULE_LENGTH,
genome_db.chrmLens[chr2] - MOLECULE_LENGTH)
len2 = MOLECULE_LENGTH - len1
seq2 = str(genome_db.seqs[chr2][pos2:pos2+len2].seq)
seq2 = noise_seq(seq2, NOISE)
concatenated_seq = seq1+seq2
read_id = STD_HEADER.format(i, chr1, pos1, len1,
chr2, pos2+len2, len2, MOLECULE_LENGTH)
fastq1.writelines(
['@' + read_id + '\n',
concatenated_seq[:READ_LENGTH] + '\n',
'+' + read_id + '\n',
'g' * READ_LENGTH + '\n'])
fastq2.writelines(
['@' + read_id + '\n',
Bio.Seq.reverse_complement(concatenated_seq[-READ_LENGTH:]) + '\n',
'+' + read_id + '\n',
'g' * READ_LENGTH + '\n'])
if (i+1) % 10000 == 0:
print '{0} random sequences generated...'.format((i+1))
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/tests/mapping/01_generate_random_fastq.py
|
Python
|
bsd-3-clause
| 1,940
|
def read_airbase_file(filename, station):
"""
Read hourly AirBase data files.
Parameters
----------
filename : string
Path to the data file.
station : string
Name of the station.
Returns
-------
DataFrame
Processed dataframe.
"""
# construct the column names
hours = ["{:02d}".format(i) for i in range(24)]
flags = ['flag' + str(i) for i in range(24)]
colnames = ['date'] + [item for pair in zip(hours, flags) for item in pair]
# read the actual data
data = pd.read_csv(filename, sep='\t', header=None, na_values=[-999, -9999], names=colnames)
# drop the 'flag' columns
data = data.drop([col for col in data.columns if 'flag' in col], axis=1)
# reshape
data_stacked = pd.melt(data, id_vars=['date'], var_name='hour')
# parse to datetime and remove redundant columns
data_stacked.index = pd.to_datetime(data_stacked['date'] + data_stacked['hour'], format="%Y-%m-%d%H")
data_stacked = data_stacked.drop(['date', 'hour'], axis=1)
data_stacked = data_stacked.rename(columns={'value': station})
return data_stacked
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/_solutions/case4_air_quality_processing10.py
|
Python
|
bsd-3-clause
| 1,177
|
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'liste' de la commande 'décor'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'décor liste'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "liste", "list")
self.schema = "(<message>)"
self.aide_courte = "liste les décors existants"
self.aide_longue = \
"Cette commande liste les décors existants."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cherchable = importeur.recherche.cherchables["prdecor"]
if dic_masques["message"]:
chaine = dic_masques["message"].message
else:
chaine = ""
message = cherchable.trouver_depuis_chaine(chaine)
personnage << message
|
stormi/tsunami
|
src/primaires/salle/commandes/decor/liste.py
|
Python
|
bsd-3-clause
| 2,465
|
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.expr import Expr
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_interval(self, x, a, b):
limits = [( i if i[0] != x else (x,a,b) ) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s,n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x,a
>>> Integral(a*x**2,x).subs(x,4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, C.Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old,C.AppliedUndef) or isinstance(old,C.UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
" specify dummy variables for %s. If the integrand contains"
" more than one free symbol, an integration variable should"
" be supplied explicitly e.g., integrate(f(x, y), x)"
% function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not w.has(*self.variables))
return C.Mul(*out[True])*self.func(C.Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, C.Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return C.Add(*[ self.func(i, *self.limits) for i in summand.args ])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
AunShiLord/sympy
|
sympy/concrete/expr_with_limits.py
|
Python
|
bsd-3-clause
| 14,599
|
# coding:utf-8
import io
import base64
import socket
from apiclient.http import MediaIoBaseUpload
from apiclient.errors import HttpError
from cactus.deployment.file import BaseFile
from cactus.utils.network import retry
class GCSFile(BaseFile):
def get_metadata(self):
"""
Generate metadata for the upload.
Note: we don't set the etag, since the GCS API does not accept what we set
"""
metadata = {
"acl": [{"entity": "allUsers", "role": "READER"}],
"md5Hash": base64.b64encode(self.payload_checksum.decode('hex')),
"contentType": self.content_type, # Given twice...
"cacheControl": unicode(self.cache_control) # That's what GCS will return
}
if self.content_encoding is not None:
metadata['contentEncoding'] = self.content_encoding
return metadata
def remote_changed(self):
"""
Compare each piece of metadata that we're setting with the one that's stored remotely
If one's different, upload again.
:rtype: bool
"""
resource = self.engine.get_connection().objects()
req = resource.get(bucket=self.engine.bucket_name, object=self.url)
try:
remote_metadata = req.execute()
except HttpError as e:
if e.resp.status == 404:
return True
raise
ignore_metadata = ["acl"] # We can't control what we'll retrieve TODO: do the best we can do!
for k, v in self.get_metadata().items():
if k not in ignore_metadata and remote_metadata.get(k) != v:
return True
return False
@retry((socket.error,), tries=5, delay=3, backoff=2)
def do_upload(self):
resource = self.engine.get_connection().objects()
stream = io.BytesIO(self.payload())
upload = MediaIoBaseUpload(stream, mimetype=self.content_type)
req = resource.insert(
bucket=self.engine.bucket_name,
name=self.url,
body=self.get_metadata(),
media_body=upload,
)
req.execute()
|
dreadatour/Cactus
|
cactus/deployment/gcs/file.py
|
Python
|
bsd-3-clause
| 2,141
|
import os
import pytest
from flask import url_for
from urllib.request import urlopen
from burpui import create_app
@pytest.fixture(scope="session")
def app():
conf = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../share/burpui/etc/burpui.sample.cfg",
)
bui = create_app(debug=12, logfile="/dev/null", gunicorn=False, unittest=True)
bui.setup(conf, True)
bui.config["DEBUG"] = False
bui.config["TESTING"] = True
bui.config["LOGIN_DISABLED"] = True
bui.config["LIVESERVER_PORT"] = 5001
bui.config["CFG"] = conf
bui.login_manager.init_app(bui)
return bui
def test_server_is_up_and_running(live_server):
import socket
import errno
try:
url = url_for("view.home", _external=True)
response = urlopen(url)
assert response.code == 200
except socket.error as exp:
if exp.errno != errno.ECONNRESET:
raise
|
ziirish/burp-ui
|
tests/functional/test_live_server.py
|
Python
|
bsd-3-clause
| 941
|
# coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..base import MultiOutputMixin
from ..utils import check_random_state, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ._base import LinearRegression
from ..utils.validation import has_fit_parameter
from ..exceptions import ConvergenceWarning
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(MetaEstimatorMixin, RegressorMixin,
MultiOutputMixin, BaseEstimator):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, default=None
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
* `predict(X)`: Returns predicted values using the linear model,
which is used to compute residual error using loss function.
If `base_estimator` is None, then
:class:`~sklearn.linear_model.LinearRegression` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), default=None
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, default=None
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, default=None
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, default=None
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, default=100
Maximum number of iterations for random sample selection.
max_skips : int, default=np.inf
Maximum number of iterations that can be skipped due to finding zero
inliers or invalid data defined by ``is_data_valid`` or invalid models
defined by ``is_model_valid``.
.. versionadded:: 0.19
stop_n_inliers : int, default=np.inf
Stop iteration if at least this number of inliers are found.
stop_score : float, default=np.inf
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], default=0.99
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
loss : string, callable, default='absolute_loss'
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the i-th value of the array corresponding to the loss
on ``X[i]``.
If the loss on a sample is greater than the ``residual_threshold``,
then this sample is classified as an outlier.
.. versionadded:: 0.18
random_state : int, RandomState instance, default=None
The generator used to initialize the centers.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
n_skips_no_inliers_ : int
Number of iterations skipped due to finding zero inliers.
.. versionadded:: 0.19
n_skips_invalid_data_ : int
Number of iterations skipped due to invalid data defined by
``is_data_valid``.
.. versionadded:: 0.19
n_skips_invalid_model_ : int
Number of iterations skipped due to an invalid model defined by
``is_model_valid``.
.. versionadded:: 0.19
Examples
--------
>>> from sklearn.linear_model import RANSACRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = RANSACRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9885...
>>> reg.predict(X[:1,])
array([-31.9417...])
References
----------
.. [1] https://en.wikipedia.org/wiki/RANSAC
.. [2] https://www.sri.com/sites/default/files/publications/ransac-publication.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
@_deprecate_positional_args
def __init__(self, base_estimator=None, *, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100, max_skips=np.inf,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, loss='absolute_loss',
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.max_skips = max_skips
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
.. versionadded:: 0.18
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
# Need to validate separately here.
# We can't pass multi_ouput=True because that would allow y to be csr.
check_X_params = dict(accept_sparse='csr')
check_y_params = dict(ensure_2d=False)
X, y = self._validate_data(X, y, validate_separately=(check_X_params,
check_y_params))
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples: n_samples = %d." % (X.shape[0]))
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
n_inliers_best = 1
score_best = -np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
inlier_best_idxs_subset = None
self.n_skips_no_inliers_ = 0
self.n_skips_invalid_data_ = 0
self.n_skips_invalid_model_ = 0
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
self.n_trials_ = 0
max_trials = self.max_trials
while self.n_trials_ < max_trials:
self.n_trials_ += 1
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
break
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
self.n_skips_invalid_data_ += 1
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
self.n_skips_invalid_model_ += 1
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
self.n_skips_no_inliers_ += 1
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
inlier_best_idxs_subset = inlier_idxs_subset
max_trials = min(
max_trials,
_dynamic_max_trials(n_inliers_best, n_samples,
min_samples, self.stop_probability))
# break if sufficient number of inliers or score is reached
if n_inliers_best >= self.stop_n_inliers or \
score_best >= self.stop_score:
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
if ((self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips):
raise ValueError(
"RANSAC skipped more iterations than `max_skips` without"
" finding a valid consensus set. Iterations were skipped"
" because each randomly chosen sub-sample failed the"
" passing criteria. See estimator attributes for"
" diagnostics (n_skips*).")
else:
raise ValueError(
"RANSAC could not find a valid consensus set. All"
" `max_trials` iterations were skipped because each"
" randomly chosen sub-sample failed the passing criteria."
" See estimator attributes for diagnostics (n_skips*).")
else:
if (self.n_skips_no_inliers_ + self.n_skips_invalid_data_ +
self.n_skips_invalid_model_) > self.max_skips:
warnings.warn("RANSAC found a valid consensus set but exited"
" early due to skipping more iterations than"
" `max_skips`. See estimator attributes for"
" diagnostics (n_skips*).",
ConvergenceWarning)
# estimate final model using all inliers
if sample_weight is None:
base_estimator.fit(X_inlier_best, y_inlier_best)
else:
base_estimator.fit(
X_inlier_best,
y_inlier_best,
sample_weight=sample_weight[inlier_best_idxs_subset])
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self)
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self)
return self.estimator_.score(X, y)
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance(kind=zeros)':
'zero sample_weight is not equivalent to removing samples',
}
}
|
bnaul/scikit-learn
|
sklearn/linear_model/_ransac.py
|
Python
|
bsd-3-clause
| 19,658
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a TOC file from a Java jar.
The TOC file contains the non-package API of the jar. This includes all
public/protected/package classes/functions/members and the values of static
final variables (members with package access are kept because in some cases we
have multiple libraries with the same package, particularly test+non-test). Some
other information (major/minor javac version) is also included.
This TOC file then can be used to determine if a dependent library should be
rebuilt when this jar changes. I.e. any change to the jar that would require a
rebuild, will have a corresponding change in the TOC file.
"""
import optparse
import os
import re
import sys
import zipfile
from util import build_utils
from util import md5_check
def GetClassesInZipFile(zip_file):
classes = []
files = zip_file.namelist()
for f in files:
if f.endswith('.class'):
# f is of the form org/chromium/base/Class$Inner.class
classes.append(f.replace('/', '.')[:-6])
return classes
def CallJavap(classpath, classes):
javap_cmd = [
'javap',
'-package', # Show public/protected/package.
# -verbose is required to get constant values (which can be inlined in
# dependents).
'-verbose',
'-classpath', classpath
] + classes
return build_utils.CheckOutput(javap_cmd)
def ExtractToc(disassembled_classes):
# javap output is structured by indent (2-space) levels.
good_patterns = [
'^[^ ]', # This includes all class/function/member signatures.
'^ SourceFile:',
'^ minor version:',
'^ major version:',
'^ Constant value:',
]
bad_patterns = [
'^const #', # Matches the constant pool (i.e. literals used in the class).
]
def JavapFilter(line):
return (re.match('|'.join(good_patterns), line) and
not re.match('|'.join(bad_patterns), line))
toc = filter(JavapFilter, disassembled_classes.split('\n'))
return '\n'.join(toc)
def UpdateToc(jar_path, toc_path):
classes = GetClassesInZipFile(zipfile.ZipFile(jar_path))
javap_output = CallJavap(classpath=jar_path, classes=classes)
toc = ExtractToc(javap_output)
with open(toc_path, 'w') as tocfile:
tocfile.write(toc)
def DoJarToc(options):
jar_path = options.jar_path
toc_path = options.toc_path
record_path = '%s.md5.stamp' % toc_path
md5_check.CallAndRecordIfStale(
lambda: UpdateToc(jar_path, toc_path),
record_path=record_path,
input_paths=[jar_path],
)
build_utils.Touch(toc_path)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--jar-path', help='Input .jar path.')
parser.add_option('--toc-path', help='Output .jar.TOC path.')
parser.add_option('--stamp', help='Path to touch on success.')
# TODO(newt): remove this once http://crbug.com/177552 is fixed in ninja.
parser.add_option('--ignore', help='Ignored.')
options, _ = parser.parse_args()
DoJarToc(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ChromiumWebApps/chromium
|
build/android/gyp/jar_toc.py
|
Python
|
bsd-3-clause
| 3,249
|
"Pgsql Fts backend"
from django.db import transaction
from fts.backends.base import InvalidFtsBackendError
from fts.backends.base import BaseClass, BaseModel, BaseManager
from django.db import models
LANGUAGES = {
'' : 'simple',
'da' : 'danish',
'nl' : 'dutch',
'en' : 'english',
'fi' : 'finnish',
'fr' : 'french',
'de' : 'german',
'hu' : 'hungarian',
'it' : 'italian',
'no' : 'norwegian',
'pl' : 'polish',
'pt' : 'portuguese',
'ro' : 'romanian',
'ru' : 'russian',
'es' : 'spanish',
'sv' : 'swedish',
'tr' : 'turkish',
}
class _VectorField(models.Field):
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
super(_VectorField, self).__init__(*args, **kwargs)
def db_type(self):
return 'tsvector'
class SearchClass(BaseClass):
def __init__(self, server, params):
from django.conf import settings
if not settings.DATABASE_ENGINE in ['postgresql', 'postgresql_psycopg2']:
raise InvalidFtsBackendError("PostgreSQL with tsearch2 support is needed to use the pgsql FTS backend")
self.backend = 'pgsql'
class SearchManager(BaseManager):
def __init__(self, **kwargs):
super(SearchManager, self).__init__(**kwargs)
self.language = LANGUAGES[self.language_code]
self._vector_field_cache = None
def _vector_field(self):
"""
Returns the _VectorField defined for this manager's model. There must be exactly one _VectorField defined.
"""
if self._vector_field_cache is not None:
return self._vector_field_cache
vectors = [f for f in self.model._meta.fields if isinstance(f, _VectorField)]
if len(vectors) != 1:
raise ValueError('There must be exactly 1 _VectorField defined for the %s model.' % self.model._meta.object_name)
self._vector_field_cache = vectors[0]
return self._vector_field_cache
vector_field = property(_vector_field)
def _vector_sql(self, field, weight=None):
"""
Returns the SQL used to build a tsvector from the given (django) field name.
"""
if weight is None:
weight = self.default_weight
f = self.model._meta.get_field(field)
return "setweight(to_tsvector('%s', coalesce(\"%s\",'')), '%s')" % (self.language, f.column, weight)
@transaction.commit_on_success
def update_index(self, pk=None):
from django.db import connection
# Build a list of SQL clauses that generate tsvectors for each specified field.
clauses = []
if self.fields is None:
self.fields = self._find_text_fields()
if isinstance(self.fields, (list,tuple)):
for field in self.fields:
clauses.append(self._vector_sql(field))
else:
for field, weight in self.fields.items():
clauses.append(self._vector_sql(field, weight))
vector_sql = ' || '.join(clauses)
where = ''
# If one or more pks are specified, tack a WHERE clause onto the SQL.
if pk is not None:
if isinstance(pk, (list,tuple)):
ids = ','.join([str(v) for v in pk])
where = ' WHERE "%s" IN (%s)' % (self.model._meta.pk.column, ids)
else:
where = ' WHERE "%s" = %s' % (self.model._meta.pk.column, pk)
sql = 'UPDATE "%s" SET "%s" = %s%s' % (self.model._meta.db_table, self.vector_field.column, vector_sql, where)
cursor = connection.cursor()
transaction.set_dirty()
cursor.execute(sql)
def search(self, query, **kwargs):
"""
Returns a queryset after having applied the full-text search query. If rank_field
is specified, it is the name of the field that will be put on each returned instance.
When specifying a rank_field, the results will automatically be ordered by -rank_field.
For possible rank_normalization values, refer to:
http://www.postgresql.org/docs/8.3/static/textsearch-controls.html#TEXTSEARCH-RANKING
"""
rank_field = kwargs.get('rank_field')
rank_normalization = kwargs.get('rank_normalization', 32)
ts_query = "plainto_tsquery('%s','%s')" % (self.language, unicode(query).replace("'", "''"))
where = "\"%s\" @@ %s" % (self.vector_field.column, ts_query)
select = {}
order = []
if rank_field is not None:
select[rank_field] = 'ts_rank("%s", %s, %d)' % (self.vector_field.column, ts_query, rank_normalization)
order = ['-%s' % rank_field]
return self.all().extra(select=select, where=[where], order_by=order)
class SearchableModel(BaseModel):
class Meta:
abstract = True
search_index = _VectorField()
objects = SearchManager()
|
ryszard/django-fts
|
fts/backends/pgsql.py
|
Python
|
bsd-3-clause
| 5,071
|
from ..java import (
Method as JavaMethod,
opcodes as JavaOpcodes,
RuntimeVisibleAnnotations,
Annotation,
ConstantElementValue,
)
from .blocks import Block
from .opcodes import (
ALOAD_name, ASTORE_name, free_name,
ILOAD_name, FLOAD_name, DLOAD_name,
Opcode, TRY, CATCH, END_TRY
)
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
def descriptor(annotation):
if annotation == "bool":
return "Z"
elif annotation == "byte":
return "B"
elif annotation == 'char':
return "C"
elif annotation == "short":
return "S"
elif annotation == "int":
return "I"
elif annotation == "long":
return "J"
elif annotation == "float":
return "F"
elif annotation == "double":
return "D"
elif annotation is None:
return "V"
else:
return "L%s;" % annotation
class Method(Block):
def __init__(self, parent, name, parameters, returns=None, static=False, commands=None, code=None):
super().__init__(parent, commands=commands)
self.name = name
self.parameters = parameters
if returns is None:
self.returns = {
'annotation': 'org/python/Object'
}
else:
self.returns = returns
# Load args and kwargs, but don't expose those names into the local_vars.
self.add_self()
for i, param in enumerate(self.parameters[self.self_offset:]):
self.local_vars[param['name']] = len(self.local_vars)
self.static = static
self.code_obj = code
def __repr__(self):
return '<Method %s (%s parameters)>' % (self.name, len(self.parameters))
@property
def is_constructor(self):
return False
@property
def is_closuremethod(self):
return False
@property
def globals_module(self):
return self.module
def store_name(self, name, use_locals):
if use_locals:
self.add_opcodes(
ASTORE_name(self, name)
)
else:
self.add_opcodes(
ASTORE_name(self, '#value'),
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.globals_module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
ALOAD_name(self, '#value'),
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__setattr__', '(Ljava/lang/String;Lorg/python/Object;)V'),
)
free_name(self, '#value')
def store_dynamic(self):
raise NotImplementedError('Methods cannot dynamically store variables.')
def load_name(self, name, use_locals):
if use_locals:
try:
self.add_opcodes(
ALOAD_name(self, name)
)
return
except KeyError:
pass
self.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.globals_module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('org/python/types/Module', '__getattribute__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
def delete_name(self, name):
try:
free_name(name)
except KeyError:
self.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.globals_module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('org/python/types/Module', '__delattr__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
@property
def can_ignore_empty(self):
return False
@property
def signature(self):
return_descriptor = descriptor(self.returns.get('annotation'))
return '(%s)%s' % (
''.join(descriptor(p['annotation']) for p in self.parameters[self.self_offset:]),
return_descriptor
)
@property
def method_name(self):
return self.name
@property
def module(self):
return self.parent
@property
def self_offset(self):
return 0
def add_self(self):
pass
def add_method(self, method_name, code, annotations):
# If a method is added to a method, it is added as an anonymous
# inner class.
from .klass import ClosureClass
callable = ClosureClass(
parent=self.parent,
name='%s$%s' % (self.parent.name, method_name.replace('.<locals>.', '$')),
closure_var_names=code.co_names,
extends='org/python/types/Closure',
implements=['org/python/Callable'],
public=True,
final=True,
)
method = ClosureMethod(
callable,
name='invoke',
parameters=extract_parameters(code, annotations),
returns={
'annotation': annotations.get('return', 'org/python/Object').replace('.', '/')
},
code=code
)
method.extract(code)
callable.methods.append(method)
callable.fields = dict(
(name, 'Lorg/python/Object;')
for name in code.co_names
)
self.parent.classes.append(callable)
return method
def transpile_args(self):
for i, param in enumerate(self.parameters[self.self_offset:]):
annotation = param.get('annotation', 'org/python/Object')
if annotation is None:
raise Exception("Arguments can't be void")
elif annotation == "bool":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Bool'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Bool', '<init>', '(Z)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "byte":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Int'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Int', '<init>', '(B)V'),
ASTORE_name(self, param['name']),
)
elif annotation == 'char':
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Str'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Str', '<init>', '(C)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "short":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Int'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Int', '<init>', '(S)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "int":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Int'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Int', '<init>', '(I)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "long":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Int'),
JavaOpcodes.DUP(),
ILOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Int', '<init>', '(J)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "float":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Float'),
JavaOpcodes.DUP(),
FLOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Float', '<init>', '(F)V'),
ASTORE_name(self, param['name']),
)
elif annotation == "double":
self.add_opcodes(
JavaOpcodes.NEW('org/python/types/Float'),
JavaOpcodes.DUP(),
DLOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Float', '<init>', '(D)V'),
ASTORE_name(self, param['name']),
)
elif annotation != 'org/python/Object':
self.add_opcodes(
JavaOpcodes.NEW('org/python/java/Object'),
JavaOpcodes.DUP(),
ALOAD_name(self, param['name']),
JavaOpcodes.INVOKESPECIAL('org/python/types/Float', '<init>', '(F)V'),
ASTORE_name(self, param['name']),
)
def transpile_setup(self):
self.transpile_args()
def transpile_teardown(self):
if len(self.code) == 0 or not isinstance(self.code[-1], (JavaOpcodes.RETURN, JavaOpcodes.ARETURN)):
if self.returns.get('annotation') is None:
self.add_opcodes(JavaOpcodes.RETURN())
else:
self.add_opcodes(JavaOpcodes.ARETURN())
def method_attributes(self):
return [
RuntimeVisibleAnnotations([
Annotation(
'Lorg/python/Method;',
{
'__doc__': ConstantElementValue("Python method (insert docs here)")
}
)
])
]
def transpile(self):
code = super().transpile()
return JavaMethod(
self.method_name,
self.signature,
static=self.static,
attributes=[code] + self.method_attributes()
)
class InitMethod(Method):
def __init__(self, parent):
super().__init__(
parent, '<init>',
parameters=[
{
'name': 'self',
'kind': POSITIONAL_OR_KEYWORD
},
{
'name': 'args',
'kind': POSITIONAL_OR_KEYWORD
},
{
'name': 'kwargs',
'kind': POSITIONAL_OR_KEYWORD
}
],
returns={'annotation': None},
)
def __repr__(self):
return '<Constructor %s (%s parameters)>' % (self.klass.name, len(self.parameters))
@property
def is_constructor(self):
return True
@property
def klass(self):
return self.parent
@property
def module(self):
return self.klass.module
@property
def can_ignore_empty(self):
return False
@property
def signature(self):
return '([Lorg/python/Object;Ljava/util/Map;)V'
@property
def self_offset(self):
return 1
def add_self(self):
self.local_vars['self'] = len(self.local_vars)
def transpile_setup(self):
self.add_opcodes(
JavaOpcodes.ALOAD_0(),
JavaOpcodes.INVOKESPECIAL(self.klass.extends, '<init>', '()V'),
)
self.add_opcodes(
TRY(),
JavaOpcodes.ALOAD_0(),
JavaOpcodes.LDC_W('__init__'),
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__getattribute__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
for i, param in enumerate(self.parameters[self.self_offset:]):
self.add_opcodes(
ALOAD_name(self, param['name']),
)
self.add_opcodes(
JavaOpcodes.INVOKEINTERFACE('org/python/Callable', 'invoke', '([Lorg/python/Object;Ljava/util/Map;)Lorg/python/Object;'),
CATCH('org/python/exceptions/AttributeError'),
END_TRY(),
)
def transpile_teardown(self):
self.add_opcodes(
JavaOpcodes.RETURN()
)
class InstanceMethod(Method):
def __init__(self, parent, name, parameters, returns=None, static=False, commands=None, code=None):
super().__init__(
parent, name,
parameters=parameters,
returns=returns,
static=static,
commands=commands,
code=code
)
def __repr__(self):
return '<InstanceMethod %s.%s (%s parameters)>' % (self.klass.name, self.name, len(self.parameters))
@property
def klass(self):
return self.parent
@property
def module(self):
return self.klass.module
@property
def self_offset(self):
return 1
def add_self(self):
self.local_vars['self'] = len(self.local_vars)
class MainMethod(Method):
def __init__(self, parent, commands=None, code=None, end_offset=None):
super().__init__(
parent, '__main__',
parameters=[{'name': 'args', 'annotation': 'argv'}],
returns={'annotation': None},
static=True,
commands=commands,
code=code
)
self.end_offset = end_offset
def __repr__(self):
return '<MainMethod %s>' % self.module.name
@property
def method_name(self):
return 'main'
@property
def module(self):
return self.parent
@property
def signature(self):
return '([Ljava/lang/String;)V'
@property
def can_ignore_empty(self):
return True
@property
def globals_module(self):
return self.module
def add_self(self):
pass
def store_name(self, name, use_locals):
self.add_opcodes(
ASTORE_name(self, '#value'),
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
ALOAD_name(self, '#value'),
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__setattr__', '(Ljava/lang/String;Lorg/python/Object;)V'),
)
free_name(self, '#value')
def store_dynamic(self):
self.add_opcodes(
ASTORE_name(self, '#value'),
JavaOpcodes.LDC_W(self.module.descriptor),
JavaOpcodes.INVOKESTATIC('org/python/types/Type', 'pythonType', '(Ljava/lang/String;)Lorg/python/types/Type;'),
JavaOpcodes.GETFIELD('org/python/types/Type', 'attrs', 'Ljava/util/Map;'),
ALOAD_name(self, '#value'),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'putAll', '(Ljava/util/Map;)V'),
)
free_name(self, '#value')
def load_name(self, name, use_locals):
self.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEINTERFACE('org/python/Object', '__getattribute__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
def delete_name(self, name, use_locals):
self.add_opcodes(
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W(self.module.descriptor),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'get', '(Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.CHECKCAST('org/python/types/Module'),
JavaOpcodes.LDC_W(name),
JavaOpcodes.INVOKEVIRTUAL('org/python/types/Module', '__delattr__', '(Ljava/lang/String;)Lorg/python/Object;'),
)
def transpile_setup(self):
self.add_opcodes(
# Register this module as being __main__
JavaOpcodes.GETSTATIC('org/python/ImportLib', 'modules', 'Ljava/util/Map;'),
JavaOpcodes.LDC_W('__main__'),
JavaOpcodes.NEW('org/python/types/Module'),
JavaOpcodes.DUP(),
JavaOpcodes.LDC_W(self.module.descriptor.replace('/', '.')),
JavaOpcodes.INVOKESTATIC('java/lang/Class', 'forName', '(Ljava/lang/String;)Ljava/lang/Class;'),
JavaOpcodes.INVOKESPECIAL('org/python/types/Module', '<init>', '(Ljava/lang/Class;)V'),
JavaOpcodes.INVOKEINTERFACE('java/util/Map', 'put', '(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;'),
JavaOpcodes.POP(),
)
# If there are any commands in this main method,
# add a TRY-CATCH for SystemExit
if self.commands:
self.add_opcodes(
TRY()
)
def transpile_teardown(self):
# Main method is a special case - it always returns Null,
# but the code doesn't contain this return, so the jump
# target doesn't exist. Fake a jump target for the return
java_op = JavaOpcodes.RETURN()
if self.end_offset:
python_op = Opcode(self.end_offset, None, True)
python_op.start_op = java_op
self.jump_targets[self.end_offset] = python_op
self.add_opcodes(java_op)
# If there are any commands in this main method,
# finish the TRY-CATCH for SystemExit
if self.commands:
self.add_opcodes(
CATCH('org/python/exceptions/SystemExit'),
JavaOpcodes.GETFIELD('org/python/exceptions/SystemExit', 'return_code', 'I'),
JavaOpcodes.INVOKESTATIC('java/lang/System', 'exit', '(I)V'),
END_TRY(),
JavaOpcodes.RETURN()
)
def method_attributes(self):
return [
]
class ClosureMethod(Method):
def __init__(self, parent, name, parameters, returns=None, static=False, commands=None, code=None):
super().__init__(
parent, name,
parameters=parameters,
returns=returns,
static=static,
commands=commands,
code=code
)
def __repr__(self):
return '<ClosureMethod %s (%s parameters, %s closure vars)>' % (
self.name, len(self.parameters), len(self.parent.closure_var_names)
)
@property
def is_closuremethod(self):
return True
@property
def globals_module(self):
return self.module.parent
def add_self(self):
self.local_vars['self'] = len(self.local_vars)
# def _insert_closure_vars(self):
# # Load all the arguments into locals
# setup = []
# for i, closure_var_name in enumerate(self.parent.closure_var_names):
# setup.extend([
# ALOAD_name(self, 'self'),
# JavaOpcodes.GETFIELD('org/python/types/Function', closure_var_name, 'Lorg/python/Object;'),
# ASTORE_name(self, closure_var_name),
# ])
# self.code = setup + self.code
def extract_parameters(code, annotations):
pos_count = code.co_argcount
arg_names = code.co_varnames
keyword_only_count = code.co_kwonlyargcount
parameters = []
# Non-keyword-only parameters.
for offset, name in enumerate(arg_names[0:pos_count]):
parameters.append({
'name': name,
'annotation': annotations.get(name, 'org/python/Object'),
'kind': POSITIONAL_OR_KEYWORD,
})
# *args
if code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, 'org/python/Object')
parameters.append({
'name': name,
'annotation': annotation,
'kind': VAR_POSITIONAL
})
# Keyword-only parameters.
for name in arg_names[pos_count:pos_count + keyword_only_count]:
parameters.append({
'name': name,
'annotation': annotations.get(name, 'org/python/Object'),
'kind': KEYWORD_ONLY,
})
# **kwargs
if code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
parameters.append({
'name': name,
'annotation': annotations.get(name, 'org/python/Object'),
'kind': VAR_KEYWORD
})
return parameters
|
rubacalypse/voc
|
voc/python/methods.py
|
Python
|
bsd-3-clause
| 21,563
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingMedian'] , ['Seasonal_Hour'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingMedian_Seasonal_Hour_SVR.py
|
Python
|
bsd-3-clause
| 161
|
from django.test import SimpleTestCase
from corehq.apps.reports.view_helpers import normalize_date
from datetime import date, datetime
class TestCaseTags(SimpleTestCase):
def test_normalize_date(self):
self.assertIsInstance(normalize_date(date.today()), datetime)
self.assertIsInstance(normalize_date(datetime.utcnow()), datetime)
self.assertEqual(normalize_date('123'), '123')
|
dimagi/commcare-hq
|
corehq/ex-submodules/casexml/apps/case/tests/test_tags.py
|
Python
|
bsd-3-clause
| 409
|
import compileall
import os
from importlib import import_module
from django.db import connection, connections
from django.db.migrations.exceptions import (
AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError,
)
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.recorder import MigrationRecorder
from django.test import TestCase, modify_settings, override_settings
from .test_base import MigrationTestBase
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
databases = {'default', 'other'}
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
{("myapp", "0432_ponies")},
)
# That should not affect records of another database
recorder_other = MigrationRecorder(connections['other'])
self.assertEqual(
{(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"},
set(),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
{(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"},
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
def setUp(self):
self.applied_records = []
def tearDown(self):
# Unapply records on databases that don't roll back changes after each
# test method.
if not connection.features.supports_transactions:
for recorder, app, name in self.applied_records:
recorder.record_unapplied(app, name)
def record_applied(self, recorder, app, name):
recorder.record_applied(app, name)
self.applied_records.append((recorder, app, name))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
@modify_settings(INSTALLED_APPS={'append': 'basic'})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
list(author_state.fields),
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(list(book_state.fields), ['id', 'author'])
# Ensure we've included unmigrated apps in there too
self.assertIn("basic", project_state.real_apps)
@override_settings(MIGRATION_MODULES={
'migrations': 'migrations.test_migrations',
'migrations2': 'migrations2.test_migrations_2',
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_plan_handles_repeated_migrations(self):
"""
_generate_plan() doesn't readd migrations already in the plan (#29180).
"""
migration_loader = MigrationLoader(connection)
nodes = [('migrations', '0002_second'), ('migrations2', '0001_initial')]
self.assertEqual(
migration_loader.graph._generate_plan(nodes, at_end=True),
[('migrations', '0001_initial'), ('migrations', '0002_second'), ('migrations2', '0001_initial')]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(list(book_state.fields), ['id', 'user'])
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"})
def test_run_before(self):
"""
Makes sure the loader uses Migration.run_before.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0003_third"),
("migrations", "0002_second"),
],
)
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations_first",
"migrations2": "migrations2.test_migrations_2_first",
})
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
def test_first(self):
"""
Makes sure the '__first__' migrations build correctly.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "second")),
[
("migrations", "thefirst"),
("migrations2", "0001_initial"),
("migrations2", "0002_second"),
("migrations", "second"),
],
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
msg = "There is more than one migration for 'migrations' with the prefix '0'"
with self.assertRaisesMessage(AmbiguityError, msg):
migration_loader.get_migration_by_prefix("migrations", "0")
msg = "There is no migration for 'migrations' with the prefix 'blarg'"
with self.assertRaisesMessage(KeyError, msg):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App with migrations module file not in unmigrated apps."
)
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
loader = MigrationLoader(connection)
self.assertIn(
"migrations", loader.unmigrated_apps,
"App missing __init__.py in migrations module not in unmigrated apps."
)
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
)
def test_marked_as_migrated(self):
"""
Undefined MIGRATION_MODULES implies default migration module.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, {'migrated_app'})
self.assertEqual(migration_loader.unmigrated_apps, set())
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={"migrated_app": None},
)
def test_marked_as_unmigrated(self):
"""
MIGRATION_MODULES allows disabling of migrations for a particular app.
"""
migration_loader = MigrationLoader(connection)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(
INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'],
MIGRATION_MODULES={'migrated_app': 'missing-module'},
)
def test_explicit_missing_module(self):
"""
If a MIGRATION_MODULES override points to a missing module, the error
raised during the importation attempt should be propagated unless
`ignore_no_migrations=True`.
"""
with self.assertRaisesMessage(ImportError, 'missing-module'):
migration_loader = MigrationLoader(connection)
migration_loader = MigrationLoader(connection, ignore_no_migrations=True)
self.assertEqual(migration_loader.migrated_apps, set())
self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'})
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
1,
)
# However, fake-apply one migration and it should now use the old two
self.record_applied(recorder, 'migrations', '0001_initial')
migration_loader.build_graph()
self.assertEqual(
len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]),
2,
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"})
def test_loading_squashed_complex(self):
"Tests loading a complex set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations.keys())
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
self.record_applied(recorder, 'migrations', '1_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 4)
self.record_applied(recorder, 'migrations', '2_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 to 5 cannot use the squashed migration
self.record_applied(recorder, 'migrations', '3_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 4)
self.record_applied(recorder, 'migrations', '4_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# Starting at 5 to 7 we are past the squashed migrations.
self.record_applied(recorder, 'migrations', '5_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 2)
self.record_applied(recorder, 'migrations', '6_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 1)
self.record_applied(recorder, 'migrations', '7_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps(self):
loader = MigrationLoader(connection)
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_complex_multi_apps.app1",
"app2": "migrations.test_migrations_squashed_complex_multi_apps.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_complex_multi_apps.app1",
"migrations.test_migrations_squashed_complex_multi_apps.app2",
]})
def test_loading_squashed_complex_multi_apps_partially_applied(self):
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.record_applied(recorder, 'app1', '1_auto')
self.record_applied(recorder, 'app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations.keys()
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"})
def test_loading_squashed_erroneous(self):
"Tests loading a complex but erroneous set of squashed migrations"
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
def num_nodes():
plan = set(loader.graph.forwards_plan(('migrations', '7_auto')))
return len(plan - loader.applied_migrations.keys())
# Empty database: use squashed migration
loader.build_graph()
self.assertEqual(num_nodes(), 5)
# Starting at 1 or 2 should use the squashed migration too
self.record_applied(recorder, 'migrations', '1_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 4)
self.record_applied(recorder, 'migrations', '2_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 3)
# However, starting at 3 or 4, nonexistent migrations would be needed.
msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). "
"Django tried to replace migration migrations.5_auto with any of "
"[migrations.3_squashed_5] but wasn't able to because some of the replaced "
"migrations are already applied.")
self.record_applied(recorder, 'migrations', '3_auto')
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
self.record_applied(recorder, 'migrations', '4_auto')
with self.assertRaisesMessage(NodeNotFoundError, msg):
loader.build_graph()
# Starting at 5 to 7 we are passed the squashed migrations
self.record_applied(recorder, 'migrations', '5_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 2)
self.record_applied(recorder, 'migrations', '6_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 1)
self.record_applied(recorder, 'migrations', '7_auto')
loader.build_graph()
self.assertEqual(num_nodes(), 0)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history(self):
loader = MigrationLoader(connection=None)
loader.check_consistent_history(connection)
recorder = MigrationRecorder(connection)
self.record_applied(recorder, 'migrations', '0002_second')
msg = (
"Migration migrations.0002_second is applied before its dependency "
"migrations.0001_initial on database 'default'."
)
with self.assertRaisesMessage(InconsistentMigrationHistory, msg):
loader.check_consistent_history(connection)
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'},
INSTALLED_APPS=['migrations'],
)
def test_check_consistent_history_squashed(self):
"""
MigrationLoader.check_consistent_history() should ignore unapplied
squashed migrations that have all of their `replaces` applied.
"""
loader = MigrationLoader(connection=None)
recorder = MigrationRecorder(connection)
self.record_applied(recorder, 'migrations', '0001_initial')
self.record_applied(recorder, 'migrations', '0002_second')
loader.check_consistent_history(connection)
self.record_applied(recorder, 'migrations', '0003_third')
loader.check_consistent_history(connection)
@override_settings(MIGRATION_MODULES={
"app1": "migrations.test_migrations_squashed_ref_squashed.app1",
"app2": "migrations.test_migrations_squashed_ref_squashed.app2",
})
@modify_settings(INSTALLED_APPS={'append': [
"migrations.test_migrations_squashed_ref_squashed.app1",
"migrations.test_migrations_squashed_ref_squashed.app2",
]})
def test_loading_squashed_ref_squashed(self):
"Tests loading a squashed migration with a new migration referencing it"
r"""
The sample migrations are structured like this:
app_1 1 --> 2 ---------------------*--> 3 *--> 4
\ / /
*-------------------*----/--> 2_sq_3 --*
\ / /
=============== \ ============= / == / ======================
app_2 *--> 1_sq_2 --* /
\ /
*--> 1 --> 2 --*
Where 2_sq_3 is a replacing migration for 2 and 3 in app_1,
as 1_sq_2 is a replacing migration for 1 and 2 in app_2.
"""
loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
self.addCleanup(recorder.flush)
# Load with nothing applied: both migrations squashed.
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations.keys()
expected_plan = {
('app1', '1_auto'),
('app2', '1_squashed_2'),
('app1', '2_squashed_3'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply a few from app1: unsquashes migration in app1.
self.record_applied(recorder, 'app1', '1_auto')
self.record_applied(recorder, 'app1', '2_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations.keys()
expected_plan = {
('app2', '1_squashed_2'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
# Fake-apply one from app2: unsquashes migration in app2 too.
self.record_applied(recorder, 'app2', '1_auto')
loader.build_graph()
plan = set(loader.graph.forwards_plan(('app1', '4_auto')))
plan = plan - loader.applied_migrations.keys()
expected_plan = {
('app2', '2_auto'),
('app1', '3_auto'),
('app1', '4_auto'),
}
self.assertEqual(plan, expected_plan)
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_private'})
def test_ignore_files(self):
"""Files prefixed with underscore, tilde, or dot aren't loaded."""
loader = MigrationLoader(connection)
loader.load_disk()
migrations = [name for app, name in loader.disk_migrations if app == 'migrations']
self.assertEqual(migrations, ['0001_initial'])
@override_settings(
MIGRATION_MODULES={'migrations': 'migrations.test_migrations_namespace_package'},
)
def test_loading_namespace_package(self):
"""Migration directories without an __init__.py file are ignored."""
loader = MigrationLoader(connection)
loader.load_disk()
migrations = [name for app, name in loader.disk_migrations if app == 'migrations']
self.assertEqual(migrations, [])
@override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})
def test_loading_package_without__file__(self):
"""
To support frozen environments, MigrationLoader loads migrations from
regular packages with no __file__ attribute.
"""
test_module = import_module('migrations.test_migrations')
loader = MigrationLoader(connection)
# __file__ == __spec__.origin or the latter is None and former is
# undefined.
module_file = test_module.__file__
module_origin = test_module.__spec__.origin
module_has_location = test_module.__spec__.has_location
try:
del test_module.__file__
test_module.__spec__.origin = None
test_module.__spec__.has_location = False
loader.load_disk()
migrations = [
name
for app, name in loader.disk_migrations
if app == 'migrations'
]
self.assertCountEqual(migrations, ['0001_initial', '0002_second'])
finally:
test_module.__file__ = module_file
test_module.__spec__.origin = module_origin
test_module.__spec__.has_location = module_has_location
class PycLoaderTests(MigrationTestBase):
def test_valid(self):
"""
To support frozen environments, MigrationLoader loads .pyc migrations.
"""
with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir:
# Compile .py files to .pyc files and delete .py files.
compileall.compile_dir(migration_dir, force=True, quiet=1, legacy=True)
for name in os.listdir(migration_dir):
if name.endswith('.py'):
os.remove(os.path.join(migration_dir, name))
loader = MigrationLoader(connection)
self.assertIn(('migrations', '0001_initial'), loader.disk_migrations)
def test_invalid(self):
"""
MigrationLoader reraises ImportErrors caused by "bad magic number" pyc
files with a more helpful message.
"""
with self.temporary_migration_module(module='migrations.test_migrations_bad_pyc') as migration_dir:
# The -tpl suffix is to avoid the pyc exclusion in MANIFEST.in.
os.rename(
os.path.join(migration_dir, '0001_initial.pyc-tpl'),
os.path.join(migration_dir, '0001_initial.pyc'),
)
msg = (
r"Couldn't import '\w+.migrations.0001_initial' as it appears "
"to be a stale .pyc file."
)
with self.assertRaisesRegex(ImportError, msg):
MigrationLoader(connection)
|
atul-bhouraskar/django
|
tests/migrations/test_loader.py
|
Python
|
bsd-3-clause
| 24,415
|
# -*- coding: utf-8 -*-
import datetime
from flask import current_app, flash, redirect, url_for, render_template
from purchasing.extensions import db
from purchasing.notifications import Notification
from purchasing.scout.forms import FeedbackForm, SearchForm
from purchasing.users.models import Department, User, Role
from purchasing.data.contracts import ContractBase
from purchasing.data.searches import SearchView
from flask_login import current_user
# build filter and filter form
FILTER_FIELDS = [
('company_name', 'Company Name', SearchView.tsv_company_name),
('line_item', 'Line Item', SearchView.tsv_line_item_description),
('contract_description', 'Contract Description', SearchView.tsv_contract_description),
('contract_detail', 'Contract Detail', SearchView.tsv_detail_value),
('financial_id', 'Controller Number', SearchView.financial_id),
]
def build_filter(req_args, fields, search_for, filter_form, _all):
'''Build the non-exclusive filter conditions for scout search
Along with building the filter for the search, build_filter also modifies
the passed in ``filter_form``, setting the *checked* property on the appropriate
form fields.
Arguments:
req_args: request.args from Flask.request
fields: list of three-tuples. Each three-tuple should contain the following:
* database column name
* desired output display name
* Model property that maps to the specific column name in question.
For build_filter, only the column name and Model property are used. For :func:`build_cases`, all are used.
search_for: string search term
filter_form:
_all: Boolean -- true if we are searching across all fields, false otherwise
Returns:
List of clauses that can be used in `Sqlalchemy query filters`_
'''
clauses = []
for arg_name, _, filter_column in fields:
if _all or req_args.get(arg_name) == 'y':
if not _all:
filter_form[arg_name].checked = True
clauses.append(filter_column.match(
search_for,
postgresql_regconfig='english')
)
return clauses
def build_cases(req_args, fields, search_for, _all):
'''Build case statements for categorizing search matches in scout search
Arguments:
req_args: request.args from Flask.request
fields: list of three-tuples. Each three-tuple should contain the following:
* database column name
* desired output display name
* Model property that maps to the specific column name in question.
For build_cases, all three parts of the tuple are used
search_for: string search term
_all: Boolean -- true if we are searching across all fields, false otherwise
Returns:
List of clauses that can be used in a
`Sqlalchemy case expressions`_
'''
clauses = []
for arg_name, arg_description, filter_column in fields:
if _all or req_args.get(arg_name) == 'y':
clauses.append(
(filter_column.match(
search_for,
postgresql_regconfig='english'
) == True, arg_description)
)
return clauses
def feedback_handler(contract, search_for=None):
'''Allow user to send feedback on the data present in a specific contract
Arguments:
contract: :py:class:`~purchasing.data.contracts.ContractBase` object
search_for: search term or None.
Returns:
Redirects to or renders the appropriate feedback handling template
'''
form = FeedbackForm()
search_form = SearchForm()
if not current_user.is_anonymous():
form.sender.data = current_user.email
if form.validate_on_submit():
current_app.logger.info('WEXFEEDBACK - Feedback from {email} about {contract}'.format(
email=form.sender.data,
contract=contract.description
))
feedback_sent = Notification(
to_email=db.session.query(User.email).join(Role, User.role_id == Role.id).filter(
Role.name.in_(['admin', 'superadmin'])
).all(),
subject='Scout contract feedback - ID: {id}, Description: {description}'.format(
id=contract.id if contract.id else 'N/A',
description=contract.description
), html_template='scout/feedback_email.html',
contract=contract, sender=form.data.get('sender'),
body=form.data.get('body')
).send()
if feedback_sent:
flash('Thank you for your feedback!', 'alert-success')
else:
flash('Oh no! Something went wrong. We are looking into it.', 'alert-danger')
if contract.id:
return redirect(url_for('scout.contract', contract_id=contract.id))
return redirect(url_for('scout.explore'))
return render_template(
'scout/feedback.html',
search_form=search_form,
contract=contract,
choices=Department.choices(),
feedback_form=form,
search_for=search_for
)
def add_archived_filter(query, archived):
'''Adds exclusionary filters and archived contracts to contract searches.
All searches exclude invalid contract objects, such as ones that have no
financial id or no expiration date. Occasionally, the user will also want
to search expired contracts. If the flag is passed, "archived" contracts, (which
are either expired or manually flagged as no longer usuable) are shown as well.
Arguments:
query: Sqlalchemy contract search query
archived: Boolean to determine if archived contracts should be included in search results
Returns:
Original query with additional exclusionary filters and optionally archived contracts
'''
query = query.filter(
ContractBase.financial_id != None,
ContractBase.expiration_date != None,
SearchView.expiration_date != None
)
if not archived:
query = query.filter(
ContractBase.is_archived == False,
ContractBase.expiration_date >= datetime.date.today(),
)
return query
def find_contract_metadata(search_for, case_statements, filter_or, filter_and, archived=False):
'''
Takes a search term, case statements, and filter clauses and
returns out a list of search results objects to be rendered into
the template.
Arguments:
search_for: User's search term
case_statements: An iterable of `Sqlalchemy case expressions`_
filter_or: An iterable of `Sqlalchemy query filters`_, used for non-exclusionary filtering
filter_and: An iterable of `Sqlalchemy query filters`_, used for exclusionary filtering
archived: Boolean of whether or not to add the ``is_archived`` filter
Returns:
A Sqlalchemy resultset that contains the fields to render the
search results view.
'''
rank = db.func.max(db.func.full_text.ts_rank(
db.func.setweight(db.func.coalesce(SearchView.tsv_company_name, ''), 'A').concat(
db.func.setweight(db.func.coalesce(SearchView.tsv_contract_description, ''), 'A')
).concat(
db.func.setweight(db.func.coalesce(SearchView.tsv_detail_value, ''), 'D')
).concat(
db.func.setweight(db.func.coalesce(SearchView.tsv_line_item_description, ''), 'B')
), db.func.to_tsquery(search_for, postgresql_regconfig='english')
))
contracts = db.session.query(
db.distinct(SearchView.contract_id).label('contract_id'),
SearchView.company_id, SearchView.contract_description,
SearchView.financial_id, SearchView.expiration_date,
SearchView.company_name, db.case(case_statements).label('found_in'),
rank.label('rank')
).join(
ContractBase, ContractBase.id == SearchView.contract_id
).filter(
db.or_(
db.cast(SearchView.financial_id, db.String) == search_for,
*filter_or
),
*filter_and
).group_by(
SearchView.contract_id,
SearchView.company_id,
SearchView.contract_description,
SearchView.financial_id,
SearchView.expiration_date,
SearchView.company_name,
db.case(case_statements)
).order_by(
db.text('rank DESC')
)
contracts = add_archived_filter(contracts, archived)
return contracts.all()
def return_all_contracts(filter_and, archived=False):
'''Return all contracts in the event of an empty search
Arguments:
filter_and: An iterable of `Sqlalchemy query filters`_, used for exclusionary filtering
archived: Boolean of whether or not to add the ``is_archived`` filter
Returns:
A Sqlalchemy resultset that contains the fields to render the
search results view.
'''
contracts = db.session.query(
db.distinct(SearchView.contract_id).label('contract_id'), SearchView.company_id,
SearchView.contract_description, SearchView.financial_id,
SearchView.expiration_date, SearchView.company_name
).join(ContractBase, ContractBase.id == SearchView.contract_id).filter(
*filter_and
)
contracts = add_archived_filter(contracts, archived)
return contracts.all()
|
codeforamerica/pittsburgh-purchasing-suite
|
purchasing/scout/util.py
|
Python
|
bsd-3-clause
| 9,422
|
from django.core import mail
from app.models import Item
# It doesn't matter which order all the _again methods are run, we just need
# to check the environment remains constant.
# This is possible with some of the testdir magic, but this is a nice lazy to
# it
def test_mail():
assert len(mail.outbox) == 0
mail.send_mail('subject', 'body', 'from@example.com', ['to@example.com'])
assert len(mail.outbox) == 1
m = mail.outbox[0]
assert m.subject == 'subject'
assert m.body == 'body'
assert m.from_email == 'from@example.com'
assert list(m.to) == ['to@example.com']
def test_mail_again():
test_mail()
def test_database_rollback():
assert Item.objects.count() == 0
Item.objects.create(name='blah')
assert Item.objects.count() == 1
def test_database_rollback_again():
test_database_rollback()
|
bfirsh/pytest_django
|
tests/test_environment.py
|
Python
|
bsd-3-clause
| 850
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingMedian'] , ['BestCycle'] , ['SVR'] );
|
antoinecarme/pyaf
|
tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingMedian_BestCycle_SVR.py
|
Python
|
bsd-3-clause
| 150
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
with open('requirements.txt', 'r') as fd:
requirements = list(filter(
lambda r: not r.strip().startswith('#'), fd.readlines()))
test_requirements = requirements
setup(
name='vulyk_declaration',
version='0.1.0',
description="Vulyk declarations plugin",
long_description=readme + '\n\n' + history,
author="Volodymyr Hotsyk",
author_email='gotsyk@gmail.com',
url='https://github.com/hotsyk/vulyk_declaration',
packages=[
'vulyk_declaration',
'vulyk_declaration.fixtures',
'vulyk_declaration.models',
'vulyk_declaration.static'
],
package_dir={'vulyk_declaration':
'vulyk_declaration'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='vulyk_declaration',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
scripts=['bin/export_to_xlsx.py'],
tests_require=test_requirements
)
|
hotsyk/vulyk-declaration
|
setup.py
|
Python
|
bsd-3-clause
| 1,647
|
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from math3 import quaternion
class test_quaternion(unittest.TestCase):
# many of these values are taken from searches on wolfram alpha
def test_import(self):
import math3
math3.quaternion
from math3 import quaternion
def test_create(self):
result = quaternion.create()
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_parameters(self):
result = quaternion.create(1.0, 2.0, 3.0, 4.0)
np.testing.assert_almost_equal(result, [1.0, 2.0, 3.0, 4.0], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_x_rotation(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(q, [1., 0., 0., 0.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(q, [-np.sqrt(0.5), 0., 0., np.sqrt(0.5)]))
def test_create_from_y_rotation(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 1., 0., 0.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., np.sqrt(0.5), 0., np.sqrt(0.5)]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
def test_create_from_z_rotation(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(q, [0., 0., 1., 0.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(q, [0., 0., np.sqrt(0.5), np.sqrt(0.5)]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
def test_create_from_axis_rotation(self):
# wolfram alpha can be awesome sometimes
result = quaternion.create_from_axis_rotation([0.57735, 0.57735, 0.57735], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_axis_rotation_non_normalised(self):
result = quaternion.create_from_axis_rotation([1., 1., 1.], np.pi)
np.testing.assert_almost_equal(result, [5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17], decimal=3)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_unit(self):
result = quaternion.create_from_matrix(np.eye(3))
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_x(self):
result = quaternion.create_from_matrix([
[1., 0., 0.],
[0., -1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [1., 0., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_y(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., 1., 0.],
[0., 0., -1.],
])
np.testing.assert_almost_equal(result, [0., 1., 0., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
def test_create_from_matrix_z(self):
result = quaternion.create_from_matrix([
[-1., 0., 0.],
[0., -1., 0.],
[0., 0., 1.],
])
np.testing.assert_almost_equal(result, [0., 0., 1., 0.], decimal=5)
self.assertTrue(result.dtype == np.float)
@unittest.skip('Not implemented')
def test_create_from_eulers(self):
pass
@unittest.skip('Not implemented')
def test_create_from_inverse_of_eulers(self):
pass
def test_cross(self):
q1 = quaternion.create_from_x_rotation(np.pi / 2.0)
q2 = quaternion.create_from_x_rotation(-np.pi / 2.0)
result = quaternion.cross(q1, q2)
np.testing.assert_almost_equal(result, quaternion.create(), decimal=5)
def test_is_zero_length(self):
result = quaternion.is_zero_length([1., 0., 0., 0.])
self.assertFalse(result)
def test_is_zero_length_zero(self):
result = quaternion.is_zero_length([0., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length(self):
result = quaternion.is_non_zero_length([1., 0., 0., 0.])
self.assertTrue(result)
def test_is_non_zero_length_zero(self):
result = quaternion.is_non_zero_length([0., 0., 0., 0.])
self.assertFalse(result)
def test_squared_length_identity(self):
result = quaternion.squared_length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_squared_length(self):
result = quaternion.squared_length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 4., decimal=5)
def test_squared_length_batch(self):
result = quaternion.squared_length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 4.], decimal=5)
def test_length_identity(self):
result = quaternion.length([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, 1., decimal=5)
def test_length(self):
result = quaternion.length([1., 1., 1., 1.])
np.testing.assert_almost_equal(result, 2., decimal=5)
def test_length_batch(self):
result = quaternion.length([
[0., 0., 0., 1.],
[1., 1., 1., 1.],
])
np.testing.assert_almost_equal(result, [1., 2.], decimal=5)
def test_normalise_identity(self):
# normalise an identity quaternion
result = quaternion.normalise([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_normalise_non_identity(self):
# normalise an identity quaternion
result = quaternion.normalise([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)], decimal=5)
def test_normalise_batch(self):
# normalise an identity quaternion
result = quaternion.normalise([
[0., 0., 0., 1.],
[1., 2., 3., 4.],
])
expected = [
[0., 0., 0., 1.],
[1. / np.sqrt(30.), np.sqrt(2. / 15.), np.sqrt(3. / 10.), 2. * np.sqrt(2. / 15.)],
]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_rotation_angle(self):
result = quaternion.rotation_angle([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, np.pi, decimal=5)
def test_rotation_axis(self):
result = quaternion.rotation_axis([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [0.57735, 0.57735, 0.57735], decimal=5)
def test_dot_adjacent(self):
result = quaternion.dot([1., 0., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 0.0, decimal=5)
def test_dot_parallel(self):
result = quaternion.dot([0., 1., 0., 0.], [0., 1., 0., 0.])
np.testing.assert_almost_equal(result, 1.0, decimal=5)
def test_dot_angle(self):
result = quaternion.dot([.2, .2, 0., 0.], [2., -.2, 0., 0.])
np.testing.assert_almost_equal(result, 0.36, decimal=5)
def test_dot_batch(self):
result = quaternion.dot([
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[.2, .2, 0., 0.]
], [
[0., 1., 0., 0.],
[0., 1., 0., 0.],
[2., -.2, 0., 0.]
])
expected = [0., 1., 0.36]
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_conjugate(self):
#result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
result = quaternion.conjugate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_conjugate_rotation(self):
result = quaternion.conjugate([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.57735, -0.57735, -0.57735, 6.12323e-17], decimal=5)
@unittest.skip('Not implemented')
def test_power(self):
pass
def test_inverse(self):
result = quaternion.inverse([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., 1.], decimal=5)
def test_inverse_rotation(self):
result = quaternion.inverse([5.77350000e-01, 5.77350000e-01, 5.77350000e-01, 6.12323400e-17])
np.testing.assert_almost_equal(result, [-0.577351, -0.577351, -0.577351, 6.12324e-17], decimal=5)
def test_inverse_non_unit(self):
q = [1, 2, 3, 4]
result = quaternion.inverse(q)
expected = quaternion.conjugate(q) / quaternion.length(q)
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_negate_unit(self):
result = quaternion.negate([0., 0., 0., 1.])
np.testing.assert_almost_equal(result, [0., 0., 0., -1.], decimal=5)
def test_negate(self):
result = quaternion.negate([1., 2., 3., 4.])
np.testing.assert_almost_equal(result, [-1., -2., -3., -4.], decimal=5)
def test_apply_to_vector_unit_x(self):
result = quaternion.apply_to_vector([0., 0., 0., 1.], [1., 0., 0.])
np.testing.assert_almost_equal(result, [1., 0., 0.], decimal=5)
def test_apply_to_vector_x(self):
# 180 degree turn around X axis
q = quaternion.create_from_x_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around X axis
q = quaternion.create_from_x_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0.,-1., 0.]))
# -90 degree rotation around X axis
q = quaternion.create_from_x_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 1., 0.]))
def test_apply_to_vector_y(self):
# 180 degree turn around Y axis
q = quaternion.create_from_y_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0.,-1.]))
# 90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0.,-1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [1., 0., 0.]))
# -90 degree rotation around Y axis
q = quaternion.create_from_y_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 0., 1.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [-1., 0., 0.]))
def test_apply_to_vector_z(self):
# 180 degree turn around Z axis
q = quaternion.create_from_z_rotation(np.pi)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# 90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0., 1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [-1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
# -90 degree rotation around Z axis
q = quaternion.create_from_z_rotation(-np.pi / 2.)
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [1., 0., 0.]), [0.,-1., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 1., 0.]), [1., 0., 0.]))
self.assertTrue(np.allclose(quaternion.apply_to_vector(q, [0., 0., 1.]), [0., 0., 1.]))
def test_identity(self):
# https://en.wikipedia.org/wiki/Quaternion
i = quaternion.create(1., 0., 0., 0.)
j = quaternion.create(0., 1., 0., 0.)
k = quaternion.create(0., 0., 1., 0.)
one = quaternion.create(0., 0., 0., 1.)
# i * 1 = i
# j * 1 = j
# k * 1 = k
# 1 * i = i
# 1 * j = j
# 1 * k = k
i1 = quaternion.cross(i, one)
j1 = quaternion.cross(j, one)
k1 = quaternion.cross(k, one)
_1i = quaternion.cross(one, i)
_1j = quaternion.cross(one, j)
_1k = quaternion.cross(one, k)
self.assertTrue(np.allclose(i1, _1i, i))
self.assertTrue(np.allclose(j1, _1j, j))
self.assertTrue(np.allclose(k1, _1k, k))
# result = -1
ii = quaternion.cross(i, i)
kk = quaternion.cross(k, k)
jj = quaternion.cross(j, j)
ijk = quaternion.cross(quaternion.cross(i, j), k)
self.assertTrue(np.allclose(ii, -one))
self.assertTrue(np.allclose(jj, -one))
self.assertTrue(np.allclose(kk, -one))
self.assertTrue(np.allclose(ijk, -one))
# ij = k
# ji = -k
# jk = i
# kj = -i
# ki = j
# ik = -j
ij = quaternion.cross(i, j)
ji = quaternion.cross(j, i)
jk = quaternion.cross(j, k)
kj = quaternion.cross(k, j)
ki = quaternion.cross(k, i)
ik = quaternion.cross(i, k)
self.assertTrue(np.allclose(ij, k))
self.assertTrue(np.allclose(ji, -k))
self.assertTrue(np.allclose(jk, i))
self.assertTrue(np.allclose(kj, -i))
self.assertTrue(np.allclose(ki, j))
self.assertTrue(np.allclose(ik, -j))
# -k = ijkk = ij(k^2) = ij(-1)
ijkk = quaternion.cross(quaternion.cross(ij, k), k)
ijk2 = quaternion.cross(ij, quaternion.cross(k, k))
ij_m1 = quaternion.cross(ij, -one)
self.assertTrue(np.allclose(ijkk, ijk2))
self.assertTrue(np.allclose(ijk2, ij_m1))
if __name__ == '__main__':
unittest.main()
|
PhloxAR/math3
|
math3/tests/test_quaternion.py
|
Python
|
bsd-3-clause
| 15,824
|
from login.permissions import cache_clear
def mqtt_cache_clear():
# call cache_clear locally
cache_clear()
# and signal through mqtt
from mqtt.publish import SingletonPublishClient
SingletonPublishClient().publish_message('cache_clear')
|
EMSTrack/WebServerAndClient
|
mqtt/cache_clear.py
|
Python
|
bsd-3-clause
| 261
|
##########################################################################
#
# Copyright (c) 2012-2014, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferScene
from _GafferSceneTest import *
from SceneTestCase import SceneTestCase
from ScenePlugTest import ScenePlugTest
from AttributeCacheTest import AttributeCacheTest
from GroupTest import GroupTest
from SceneTimeWarpTest import SceneTimeWarpTest
from SceneProceduralTest import SceneProceduralTest
from CubeTest import CubeTest
from PlaneTest import PlaneTest
from SphereTest import SphereTest
from InstancerTest import InstancerTest
from ObjectToSceneTest import ObjectToSceneTest
from CameraTest import CameraTest
from DisplaysTest import DisplaysTest
from CustomOptionsTest import CustomOptionsTest
from SceneNodeTest import SceneNodeTest
from PathMatcherTest import PathMatcherTest
from PathFilterTest import PathFilterTest
from ShaderAssignmentTest import ShaderAssignmentTest
from CustomAttributesTest import CustomAttributesTest
from AlembicSourceTest import AlembicSourceTest
from DeletePrimitiveVariablesTest import DeletePrimitiveVariablesTest
from SeedsTest import SeedsTest
from SceneContextVariablesTest import SceneContextVariablesTest
from SubTreeTest import SubTreeTest
from OpenGLAttributesTest import OpenGLAttributesTest
from StandardOptionsTest import StandardOptionsTest
from SceneReadWriteTest import SceneReadWriteTest
from ScenePathTest import ScenePathTest
from PathMatcherDataTest import PathMatcherDataTest
from LightTest import LightTest
from TestRender import TestRender
from RenderTest import RenderTest
from OpenGLShaderTest import OpenGLShaderTest
from OpenGLRenderTest import OpenGLRenderTest
from TransformTest import TransformTest
from AimConstraintTest import AimConstraintTest
from PruneTest import PruneTest
from ShaderTest import ShaderTest
from TextTest import TextTest
from MapProjectionTest import MapProjectionTest
from MapOffsetTest import MapOffsetTest
from PointConstraintTest import PointConstraintTest
from SceneReaderTest import SceneReaderTest
from IsolateTest import IsolateTest
from DeleteAttributesTest import DeleteAttributesTest
from UnionFilterTest import UnionFilterTest
from SceneSwitchTest import SceneSwitchTest
from ShaderSwitchTest import ShaderSwitchTest
from ParentConstraintTest import ParentConstraintTest
from ParentTest import ParentTest
from StandardAttributesTest import StandardAttributesTest
from PrimitiveVariablesTest import PrimitiveVariablesTest
from DuplicateTest import DuplicateTest
from ModuleTest import ModuleTest
from GridTest import GridTest
from SetTest import SetTest
from FreezeTransformTest import FreezeTransformTest
if __name__ == "__main__":
import unittest
unittest.main()
|
davidsminor/gaffer
|
python/GafferSceneTest/__init__.py
|
Python
|
bsd-3-clause
| 4,468
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a ``Theme`` class for specifying new default values for Bokeh
:class:`~bokeh.model.Model` properties.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import yaml
# Bokeh imports
from ..core.has_props import HasProps
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# whenever we cache that there's nothing themed for a class, we
# use this same dict instance, so we don't have a zillion empty
# dicts in our caches.
_empty_dict = dict()
__all__ = (
'Theme',
)
#-----------------------------------------------------------------------------
# General API
#----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
# Note: in DirectoryHandler and in general we assume this is an
# immutable object, because we share it among sessions and we
# don't monitor it for changes. If you make this mutable by adding
# any kind of setter, you could have to refactor some other code.
class Theme(object):
''' Provide new default values for Bokeh models.
Bokeh Model properties all have some built-in default value. If a property
has not been explicitly set (e.g. ``m.foo = 10``) then accessing the
property with return the default value. It may be useful for users to be
able to specify a different set of default values than the built-in
default. The ``Theme`` class allows collections of custom default values
to be easily applied to Bokeh documents.
The ``Theme`` class can be constructed either from a YAML file or from a
JSON dict (but not both). Examples of both formats are shown below.
The plotting API's defaults override some theme properties. Namely:
`fill_alpha`, `fill_color`, `line_alpha`, `line_color`, `text_alpha` and
`text_color`. Those properties should therefore be set explicitly when
using the plotting API.
Args:
filename (str, optional) : path to a YAML theme file
json (str, optional) : a JSON dictionary specifying theme values
Raises:
ValueError
If neither ``filename`` or ``json`` is supplied.
Examples:
Themes are specified by providing a top-level key ``attrs`` which
has blocks for Model types to be themed. Each block has keys and
values that specify the new property defaults for that type.
Take note of the fact that YAML interprets the value `None` as
a string, which is not usually what you want. To give `None` as a
value in YAML, use `!!null`. To give 'None' as a value in json,
use `null`.
Here is an example theme in YAML format that sets various visual
properties for all figures, grids, and titles:
.. code-block:: yaml
attrs:
Figure:
background_fill_color: '#2F2F2F'
border_fill_color: '#2F2F2F'
outline_line_color: '#444444'
Axis:
axis_line_color: !!null
Grid:
grid_line_dash: [6, 4]
grid_line_alpha: .3
Title:
text_color: "white"
Here is the same theme, in JSON format:
.. code-block:: python
{
'attrs' : {
'Figure' : {
'background_fill_color': '#2F2F2F',
'border_fill_color': '#2F2F2F',
'outline_line_color': '#444444',
},
'Axis': {
'axis_line_color': null,
},
'Grid': {
'grid_line_dash': [6, 4]',
'grid_line_alpha': .3,
},
'Title': {
'text_color': 'white'
}
}
'''
def __init__(self, filename=None, json=None):
if (filename is not None) and (json is not None):
raise ValueError("Theme should be constructed from a file or from json not both")
if filename is not None:
with open(filename) as f:
json = yaml.safe_load(f)
# empty docs result in None rather than {}, fix it.
if json is None:
json = {}
if json is None:
raise ValueError("Theme requires json or a filename to construct")
self._json = json
if 'attrs' not in self._json:
self._json['attrs'] = {}
if not isinstance(self._json['attrs'], dict):
raise ValueError("theme problem: attrs field should be a dictionary of class names, not %r" % (self._json['attrs']))
for key, value in self._json['attrs'].items():
if not isinstance(value, dict):
raise ValueError("theme problem: attrs.%s should be a dictionary of properties, not %r" % (key, value))
self._line_defaults = self._json.get('line_defaults', _empty_dict)
self._fill_defaults = self._json.get('fill_defaults', _empty_dict)
self._text_defaults = self._json.get('text_defaults', _empty_dict)
# mapping from class name to the full set of properties
# (including those merged in from base classes) for that
# class.
self._by_class_cache = {}
def _add_glyph_defaults(self, cls, props):
from ..models.glyphs import Glyph
if issubclass(cls, Glyph):
if hasattr(cls, "line_alpha"):
props.update(self._line_defaults)
if hasattr(cls, "fill_alpha"):
props.update(self._fill_defaults)
if hasattr(cls, "text_alpha"):
props.update(self._text_defaults)
def _for_class(self, cls):
if cls.__name__ not in self._by_class_cache:
attrs = self._json['attrs']
combined = {}
# we go in reverse order so that subclass props override base class
for base in cls.__mro__[-2::-1]:
if not issubclass(base, HasProps):
continue
self._add_glyph_defaults(base, combined)
combined.update(attrs.get(base.__name__, _empty_dict))
if len(combined) == 0:
combined = _empty_dict
self._by_class_cache[cls.__name__] = combined
return self._by_class_cache[cls.__name__]
def apply_to_model(self, model):
''' Apply this theme to a model.
.. warning::
Typically, don't call this method directly. Instead, set the theme
on the :class:`~bokeh.document.Document` the model is a part of.
'''
model.apply_theme(self._for_class(model.__class__))
# a little paranoia because it would be Bad(tm) to mess
# this up... would be nicer if python had a way to freeze
# the dict.
if len(_empty_dict) > 0:
raise RuntimeError("Somebody put stuff in _empty_dict")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------
|
ericmjl/bokeh
|
bokeh/themes/theme.py
|
Python
|
bsd-3-clause
| 8,240
|
import numpy as np
from scipy.sparse import csc_matrix
from scipy.optimize._trustregion_constr.qp_subproblem \
import (eqp_kktfact,
projected_cg,
box_intersections,
sphere_intersections,
box_sphere_intersections,
modified_dogleg)
from scipy.optimize._trustregion_constr.projections \
import projections
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
import pytest
class TestEQPDirectFactorization(TestCase):
# From Example 16.2 Nocedal/Wright "Numerical
# Optimization" p.452.
def test_nocedal_example(self):
H = csc_matrix([[6, 2, 1],
[2, 5, 2],
[1, 2, 4]])
A = csc_matrix([[1, 0, 1],
[0, 1, 1]])
c = np.array([-8, -3, -3])
b = -np.array([3, 0])
x, lagrange_multipliers = eqp_kktfact(H, c, A, b)
assert_array_almost_equal(x, [2, -1, 1])
assert_array_almost_equal(lagrange_multipliers, [3, -2])
class TestSphericalBoundariesIntersections(TestCase):
def test_2d_sphere_constraints(self):
# Interior inicial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1)
assert_equal(intersect, False)
# Outside initial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1)
assert_equal(intersect, False)
# Outside initial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5)
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Initial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2)
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_sphere_constraints_line_intersections(self):
# Interior initial point
ta, tb, intersect = sphere_intersections([0, 0],
[1, 0], 0.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
assert_equal(intersect, True)
# No intersection between line and circle
ta, tb, intersect = sphere_intersections([2, 0],
[0, 1], 1,
entire_line=True)
assert_equal(intersect, False)
# Outside initial point pointing toward outside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 1,
entire_line=True)
assert_array_almost_equal([ta, tb], [-3, -1])
assert_equal(intersect, True)
# Outside initial point pointing toward inside the circle
ta, tb, intersect = sphere_intersections([2, 0],
[-1, 0], 1.5,
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 3.5])
assert_equal(intersect, True)
# Initial point on the boundary
ta, tb, intersect = sphere_intersections([2, 0],
[1, 0], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [-4, 0])
assert_equal(intersect, True)
class TestBoxBoundariesIntersections(TestCase):
def test_2d_box_constraints(self):
# Box constraint in the direction of vector d
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, 1], [3, 3])
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, -3], [3, -1])
assert_equal(intersect, False)
# Some constraints are absent (set to +/- inf)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-np.inf, 1],
[np.inf, np.inf])
assert_array_almost_equal([ta, tb], [0.5, 1])
assert_equal(intersect, True)
# Intersect on the face of the box
ta, tb, intersect = box_intersections([1, 0], [0, 1],
[1, 1], [3, 3])
assert_array_almost_equal([ta, tb], [1, 1])
assert_equal(intersect, True)
# Interior initial point
ta, tb, intersect = box_intersections([0, 0], [4, 4],
[-2, -3], [3, 2])
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# No intersection between line and box constraints
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -3], [-1, -1])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, 3], [-1, 1])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -np.inf],
[-1, np.inf])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0, 0], [1, 100],
[1, 1], [3, 3])
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
[1, 1], [3, 3])
assert_equal(intersect, False)
# Initial point on the boundary
ta, tb, intersect = box_intersections([2, 2], [0, 1],
[-2, -2], [2, 2])
assert_array_almost_equal([ta, tb], [0, 0])
assert_equal(intersect, True)
def test_2d_box_constraints_entire_line(self):
# Box constraint in the direction of vector d
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, 1], [3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, 1.5])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[1, -3], [3, -1],
entire_line=True)
assert_array_almost_equal([ta, tb], [-1.5, -0.5])
assert_equal(intersect, True)
# Some constraints are absent (set to +/- inf)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-np.inf, 1],
[np.inf, np.inf],
entire_line=True)
assert_array_almost_equal([ta, tb], [0.5, np.inf])
assert_equal(intersect, True)
# Intersect on the face of the box
ta, tb, intersect = box_intersections([1, 0], [0, 1],
[1, 1], [3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [1, 3])
assert_equal(intersect, True)
# Interior initial pointoint
ta, tb, intersect = box_intersections([0, 0], [4, 4],
[-2, -3], [3, 2],
entire_line=True)
assert_array_almost_equal([ta, tb], [-0.5, 0.5])
assert_equal(intersect, True)
# No intersection between line and box constraints
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -3], [-1, -1],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, 3], [-1, 1],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([2, 0], [0, 2],
[-3, -np.inf],
[-1, np.inf],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0, 0], [1, 100],
[1, 1], [3, 3],
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_intersections([0.99, 0], [0, 2],
[1, 1], [3, 3],
entire_line=True)
assert_equal(intersect, False)
# Initial point on the boundary
ta, tb, intersect = box_intersections([2, 2], [0, 1],
[-2, -2], [2, 2],
entire_line=True)
assert_array_almost_equal([ta, tb], [-4, 0])
assert_equal(intersect, True)
def test_3d_box_constraints(self):
# Simple case
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
[1, 1, 1], [3, 3, 3])
assert_array_almost_equal([ta, tb], [1, 1])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
[1, 1, 1], [3, 3, 3])
assert_equal(intersect, False)
# Interior point
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
[1, 1, 1], [3, 3, 3])
assert_array_almost_equal([ta, tb], [0, 1])
assert_equal(intersect, True)
def test_3d_box_constraints_entire_line(self):
# Simple case
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, 1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [1, 3])
assert_equal(intersect, True)
# Negative direction
ta, tb, intersect = box_intersections([1, 1, 0], [0, 0, -1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [-3, -1])
assert_equal(intersect, True)
# Interior point
ta, tb, intersect = box_intersections([2, 2, 2], [0, -1, 1],
[1, 1, 1], [3, 3, 3],
entire_line=True)
assert_array_almost_equal([ta, tb], [-1, 1])
assert_equal(intersect, True)
class TestBoxSphereBoundariesIntersections(TestCase):
def test_2d_box_constraints(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 1])
assert_equal(intersect, True)
# Box constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=False)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=False)
assert_equal(intersect, False)
def test_2d_box_constraints_entire_line(self):
# Both constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-2, 2],
[-1, -2], [1, 2], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# None of the constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-1, 1],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 2])
assert_equal(intersect, True)
# Box constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 10,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.5])
assert_equal(intersect, True)
# Spherical constraints are active
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_array_almost_equal([ta, tb], [0, 0.25])
assert_equal(intersect, True)
# Infeasible problems
ta, tb, intersect = box_sphere_intersections([2, 2], [-4, 4],
[-1, -3], [1, 3], 2,
entire_line=True)
assert_equal(intersect, False)
ta, tb, intersect = box_sphere_intersections([1, 1], [-4, 4],
[2, 4], [2, 4], 2,
entire_line=True)
assert_equal(intersect, False)
class TestModifiedDogleg(TestCase):
def test_cauchypoint_equalsto_newtonpoint(self):
A = np.array([[1, 8]])
b = np.array([-16])
_, _, Y = projections(A)
newton_point = np.array([0.24615385, 1.96923077])
# Newton point inside boundaries
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# Spherical constraint active
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
# Box constraints active
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
def test_3d_example(self):
A = np.array([[1, 8, 1],
[4, 2, 2]])
b = np.array([-16, 2])
Z, LS, Y = projections(A)
newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
origin = np.zeros_like(newton_point)
# newton_point inside boundaries
x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# line between cauchy_point and newton_point contains best point
# (spherical constraint is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.40807330))
assert_array_almost_equal(np.linalg.norm(x), 2)
# line between cauchy_point and newton_point contains best point
# (box constraint is active).
x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.7498195))
assert_array_almost_equal(x[0], -1)
# line between origin and cauchy_point contains best point
# (spherical constraint is active).
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = origin
d = cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.573936265))
assert_array_almost_equal(np.linalg.norm(x), 1)
# line between origin and newton_point contains best point
# (box constraint is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, 1, np.inf])
z = origin
d = newton_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.4478827364))
assert_array_almost_equal(x[1], 1)
class TestProjectCG(TestCase):
# From Example 16.2 Nocedal/Wright "Numerical
# Optimization" p.452.
def test_nocedal_example(self):
H = csc_matrix([[6, 2, 1],
[2, 5, 2],
[1, 2, 4]])
A = csc_matrix([[1, 0, 1],
[0, 1, 1]])
c = np.array([-8, -3, -3])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b)
assert_equal(info["stop_cond"], 4)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, [2, -1, 1])
def test_compare_with_direct_fact(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b, tol=0)
x_kkt, _ = eqp_kktfact(H, c, A, b)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, x_kkt)
def test_trust_region_infeasible(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 1
Z, _, Y = projections(A)
with pytest.raises(ValueError):
projected_cg(H, c, Z, Y, b, trust_radius=trust_radius)
def test_trust_region_barely_feasible(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 2.32379000772445021283
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
assert_array_almost_equal(x, -Y.dot(b))
def test_hits_boundary(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 3
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
def test_negative_curvature_unconstrained(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
with pytest.raises(ValueError):
projected_cg(H, c, Z, Y, b, tol=0)
def test_negative_curvature(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
trust_radius = 1000
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 3)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(np.linalg.norm(x), trust_radius)
# The box constraints are inactive at the solution but
# are active during the iterations.
def test_inactive_box_constraints(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
lb=[0.5, -np.inf,
-np.inf, -np.inf],
return_all=True)
x_kkt, _ = eqp_kktfact(H, c, A, b)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], False)
assert_array_almost_equal(x, x_kkt)
# The box constraints active and the termination is
# by maximum iterations (infeasible iteraction).
def test_active_box_constraints_maximum_iterations_reached(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
lb=[0.8, -np.inf,
-np.inf, -np.inf],
return_all=True)
assert_equal(info["stop_cond"], 1)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(A.dot(x), -b)
assert_array_almost_equal(x[0], 0.8)
# The box constraints are active and the termination is
# because it hits boundary (without infeasible iteraction).
def test_active_box_constraints_hits_boundaries(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 3
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, np.inf, 1.6, np.inf],
trust_radius=trust_radius,
return_all=True)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[2], 1.6)
# The box constraints are active and the termination is
# because it hits boundary (infeasible iteraction).
def test_active_box_constraints_hits_boundaries_infeasible_iter(self):
H = csc_matrix([[6, 2, 1, 3],
[2, 5, 2, 4],
[1, 2, 4, 5],
[3, 4, 5, 7]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 1, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
trust_radius = 4
Z, _, Y = projections(A)
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, 0.1, np.inf, np.inf],
trust_radius=trust_radius,
return_all=True)
assert_equal(info["stop_cond"], 2)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[1], 0.1)
# The box constraints are active and the termination is
# because it hits boundary (no infeasible iteraction).
def test_active_box_constraints_negative_curvature(self):
H = csc_matrix([[1, 2, 1, 3],
[2, 0, 2, 4],
[1, 2, 0, 2],
[3, 4, 2, 0]])
A = csc_matrix([[1, 0, 1, 0],
[0, 1, 0, 1]])
c = np.array([-2, -3, -3, 1])
b = -np.array([3, 0])
Z, _, Y = projections(A)
trust_radius = 1000
x, info = projected_cg(H, c, Z, Y, b,
tol=0,
ub=[np.inf, np.inf, 100, np.inf],
trust_radius=trust_radius)
assert_equal(info["stop_cond"], 3)
assert_equal(info["hits_boundary"], True)
assert_array_almost_equal(x[2], 100)
|
jamestwebber/scipy
|
scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py
|
Python
|
bsd-3-clause
| 27,931
|
def mul(A, B):
a, b, c = A
d, e, f = B
return a * d + b * e, a * e + b * f, b * e + c * f
def pow(A, n):
if n == 1:
return A
if n & 1 == 0:
return pow(mul(A, A), n // 2)
else:
return mul(A, pow(mul(A, A), (n - 1) // 2))
def fib(n):
if n < 2:
return n
return pow((1, 1, 0), n - 1)[0]
print fib(100000)
|
nbari/my-sandbox
|
python/exercices/fibonacci/fib_matrix.py
|
Python
|
bsd-3-clause
| 370
|
import os
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'commcarehq',
'PASSWORD': 'commcarehq',
'HOST': 'localhost',
'PORT': '5432'
}
}
### Reporting database
SQL_REPORTING_DATABASE_URL = "postgresql://%(USER)s:%(PASSWORD)s@%(HOST)s:%(PORT)s/commcarehq_reporting" % DATABASES['default']
####### Couch Config ######
COUCH_HTTPS = False # recommended production value is True if enabling https
COUCH_SERVER_ROOT = '127.0.0.1:5984' #6984 for https couch
COUCH_USERNAME = 'commcarehq'
COUCH_PASSWORD = 'commcarehq'
COUCH_DATABASE_NAME = 'commcarehq'
### Staging Replication Setup for Production/Staging
#Staging domains you want to replicate
STAGING_DOMAINS = []
#COUCHDB URI {http|https}://username:password@host:optionalport/dbname
PRODUCTION_COUCHDB_URI = ""
####### # Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD = "******"
EMAIL_SMTP_HOST = "smtp.gmail.com"
EMAIL_SMTP_PORT = 587
# Print emails to console so there is no danger of spamming, but you can still get registration URLs
EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'
ADMINS = (('HQ Dev Team', 'commcarehq-dev+www-notifications@dimagi.com'),)
BUG_REPORT_RECIPIENTS = ['commcarehq-support@dimagi.com']
NEW_DOMAIN_RECIPIENTS = ['commcarehq-dev+newdomain@dimagi.com']
EXCHANGE_NOTIFICATION_RECIPIENTS = ['commcarehq-dev+exchange@dimagi.com']
SERVER_EMAIL = 'commcarehq-noreply@dimagi.com' #the physical server emailing - differentiate if needed
DEFAULT_FROM_EMAIL = 'commcarehq-noreply@dimagi.com'
SUPPORT_EMAIL = "commcarehq-support@dimagi.com"
EMAIL_SUBJECT_PREFIX = '[commcarehq] '
SERVER_ENVIRONMENT = 'changeme' #Modify this value if you are deploying multiple environments of HQ to the same machine. Identify the target type of this running environment
####### Log/debug setup ########
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# log directories must exist and be writeable!
DJANGO_LOG_FILE = "/tmp/commcare-hq.django.log"
LOG_FILE = "/tmp/commcare-hq.log"
SEND_BROKEN_LINK_EMAILS = True
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_PERIODIC_QUEUE = 'celery' # change this to something else if you want a different queue for periodic tasks
CELERY_FLOWER_URL = 'http://127.0.0.1:5555'
####### Django Compressor ########
COMPRESS_ENABLED = False # this will allow less.js to compile less files on the client side
####### Bitly ########
BITLY_LOGIN = 'dimagi' # set to None to disable bitly app url shortening (useful offline)
BITLY_APIKEY = '*******'
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = dict(
jad_tool = os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"),
key_store = os.path.join(os.path.dirname(os.path.dirname(_ROOT_DIR)), "DimagiKeyStore"),
key_alias = "javarosakey",
store_pass = "*******",
key_pass = "*******",
)
####### SMS Config ########
# Mach
SMS_GATEWAY_URL = "http://gw1.promessaging.com/sms.php"
SMS_GATEWAY_PARAMS = "id=******&pw=******&dnr=%(phone_number)s&msg=%(message)s&snr=DIMAGI"
# Unicel
UNICEL_CONFIG = {"username": "Dimagi",
"password": "******",
"sender": "Promo" }
####### Domain sync / de-id ########
DOMAIN_SYNCS = {
"domain_name": {
"target": "target_db_name",
"transform": "corehq.apps.domainsync.transforms.deidentify_domain"
}
}
DOMAIN_SYNC_APP_NAME_MAP = { "app_name": "new_app_name" }
####### Touchforms config - for CloudCare #######
XFORMS_PLAYER_URL = 'http://127.0.0.1:4444'
# email and password for an admin django user, such as one created with
# ./manage.py bootstrap <project-name> <email> <password>
TOUCHFORMS_API_USER = 'admin@example.com'
TOUCHFORMS_API_PASSWORD = 'password'
####### Misc / HQ-specific Config ########
DEFAULT_PROTOCOL = "http" # or https
OVERRIDE_LOCATION="https://www.commcarehq.org"
# Set to something like "192.168.1.5:8000" (with your IP address).
# See corehq/apps/builds/README.md for more information.
BASE_ADDRESS = None
#Set your analytics IDs here for GA and pingdom RUM
ANALYTICS_IDS = {
'GOOGLE_ANALYTICS_ID': '*******',
'PINGDOM_ID': '*****'
}
AXES_LOCK_OUT_AT_FAILURE = False
LUCENE_ENABLED = True
PREVIEWER_RE = r'^.*@dimagi\.com$'
GMAPS_API_KEY = '******'
MAPS_LAYERS = {
'Maps': {
'family': 'mapbox',
'args': {
'apikey': '*****'
}
},
'Satellite': {
'family': 'mapbox',
'args': {
'apikey': '*****'
}
},
}
FORMTRANSLATE_TIMEOUT = 5
LOCAL_APPS = (
# 'django_coverage', # Adds `python manage.py test_coverage` (settings below)
# 'debug_toolbar', # Adds a retractable panel to every page giving profiling & debugging info
# 'couchdebugpanel', # Adds couch info to said toolbar
# 'devserver', # Adds improved dev server that also prints SQL on the console (for AJAX, etc, when you cannot use debug_toolbar)
# 'django_cpserver', # Another choice for a replacement server
# 'dimagi.utils'
)
# list of domains to enable ADM reporting on
ADM_ENABLED_PROJECTS = []
# prod settings
SOIL_DEFAULT_CACHE = "redis"
SOIL_BACKEND = "soil.CachedDownload"
# reports cache
REPORT_CACHE = 'default' # or e.g. 'redis'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'localhost:11211',
},
'redis': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'localhost:6379:0',
'OPTIONS': {},
},
}
ELASTICSEARCH_HOST = 'localhost' #on both a local and a distributed environment this should be
# localhost
ELASTICSEARCH_PORT = 9200
# our production logstash aggregation
LOGSTASH_DEVICELOG_PORT = 10777
LOGSTASH_COUCHLOG_PORT = 10888
LOGSTASH_AUDITCARE_PORT = 10999
LOGSTASH_HOST = 'localhost'
LOCAL_PILLOWTOPS = {
# 'my_pillows': ['some.pillow.Class', ],
# 'and_more': []
}
#If there are existing doc_ids and case_ids you want to check directly - they are referenced
#in your localsettings for more accurate direct checks, otherwise use view based which can be inaccurate.
ES_CASE_CHECK_DIRECT_DOC_ID = None
ES_XFORM_CHECK_DIRECT_DOC_ID = None
####### API throttling #####
CCHQ_API_THROTTLE_REQUESTS = 200 # number of requests allowed per timeframe
# Use a lower value in production. This is set
# to 200 to prevent AssertionError: 429 != 200
# test failures in development environsments.
CCHQ_API_THROTTLE_TIMEFRAME = 10 # seconds
####### django-coverage config ########
COVERAGE_REPORT_HTML_OUTPUT_DIR='coverage-html'
COVERAGE_MODULE_EXCLUDES= ['tests$', 'settings$', 'urls$', 'locale$',
'common.views.test', '^django', 'management', 'migrations',
'^south', '^djcelery', '^debug_toolbar', '^rosetta']
####### Selenium tests config ########
SELENIUM_SETUP = {
# Firefox, Chrome, Ie, or Remote
'BROWSER': 'Chrome',
# Necessary if using Remote selenium driver
'REMOTE_URL': None,
# If not using Remote, allows you to open browsers in a hidden virtual X Server
'USE_XVFB': True,
'XVFB_DISPLAY_SIZE': (1024, 768),
}
SELENIUM_USERS = {
# 'WEB_USER' is optional; if not set, some tests that want a web user will
# try to use ADMIN instead
'ADMIN': {
'USERNAME': 'foo@example.com',
'PASSWORD': 'password',
'URL': 'http://localhost:8000',
'PROJECT': 'project_name',
'IS_SUPERUSER': False
},
'WEB_USER': {
'USERNAME': 'foo@example.com',
'PASSWORD': 'password',
'URL': 'http://localhost:8000',
'PROJECT': 'mike',
'IS_SUPERUSER': False
},
'MOBILE_WORKER': {
'USERNAME': 'user@project_name.commcarehq.org',
'PASSWORD': 'password',
'URL': 'http://localhost:8000'
}
}
SELENIUM_APP_SETTINGS = {
'reports': {
'MAX_PRELOAD_TIME': 20,
'MAX_LOAD_TIME': 30,
},
}
INTERNAL_DATA = {
"business_unit": [],
"product": ["CommCare", "CommConnect", "CommTrack", "RapidSMS", "Custom"],
"services": [],
"account_types": [],
"initiatives": [],
"contract_type": [],
"area": [
{
"name": "Health",
"sub_areas": ["Maternal, Newborn, & Child Health", "Family Planning", "HIV/AIDS"]
},
{
"name": "Other",
"sub_areas": ["Emergency Response"]
},
],
"country": ["Afghanistan", "Albania", "Algeria", "Andorra", "Angola", "Antigua & Deps", "Argentina", "Armenia",
"Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus",
"Belgium", "Belize", "Benin", "Bhutan", "Bolivia", "Bosnia Herzegovina", "Botswana", "Brazil",
"Brunei", "Bulgaria", "Burkina", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde",
"Central African Rep", "Chad", "Chile", "China", "Colombia", "Comoros", "Congo",
"Congo {Democratic Rep}", "Costa Rica", "Croatia", "Cuba", "Cyprus", "Czech Republic", "Denmark",
"Djibouti", "Dominica", "Dominican Republic", "East Timor", "Ecuador", "Egypt", "El Salvador",
"Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Fiji", "Finland", "France", "Gabon", "Gambia",
"Georgia", "Germany", "Ghana", "Greece", "Grenada", "Guatemala", "Guinea", "Guinea-Bissau", "Guyana",
"Haiti", "Honduras", "Hungary", "Iceland", "India", "Indonesia", "Iran", "Iraq", "Ireland {Republic}",
"Israel", "Italy", "Ivory Coast", "Jamaica", "Japan", "Jordan", "Kazakhstan", "Kenya", "Kiribati",
"Korea North", "Korea South", "Kosovo", "Kuwait", "Kyrgyzstan", "Laos", "Latvia", "Lebanon", "Lesotho",
"Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg", "Macedonia", "Madagascar", "Malawi",
"Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Mauritania", "Mauritius", "Mexico",
"Micronesia", "Moldova", "Monaco", "Mongolia", "Montenegro", "Morocco", "Mozambique", "Myanmar, {Burma}",
"Namibia", "Nauru", "Nepal", "Netherlands", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Norway",
"Oman", "Pakistan", "Palau", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Poland",
"Portugal", "Qatar", "Romania", "Russian Federation", "Rwanda", "St Kitts & Nevis", "St Lucia",
"Saint Vincent & the Grenadines", "Samoa", "San Marino", "Sao Tome & Principe", "Saudi Arabia",
"Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Slovakia", "Slovenia",
"Solomon Islands", "Somalia", "South Africa", "South Sudan", "Spain", "Sri Lanka", "Sudan", "Suriname",
"Swaziland", "Sweden", "Switzerland", "Syria", "Taiwan", "Tajikistan", "Tanzania", "Thailand", "Togo",
"Tonga", "Trinidad & Tobago", "Tunisia", "Turkey", "Turkmenistan", "Tuvalu", "Uganda", "Ukraine",
"United Arab Emirates", "United Kingdom", "United States", "Uruguay", "Uzbekistan", "Vanuatu",
"Vatican City", "Venezuela", "Vietnam", "Yemen", "Zambia", "Zimbabwe"]
}
|
SEL-Columbia/commcare-hq
|
localsettings.example.py
|
Python
|
bsd-3-clause
| 11,552
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .utils import load_class
from . import settings
def load_backend_class():
"""
Load a default backend class instance."
"""
return load_class(settings.GREENQUEUE_BACKEND)
def load_worker_class():
"""
Load a default worker class intance.
"""
return load_class(settings.GREENQUEUE_WORKER_MANAGER)
|
niwinz/django-greenqueue
|
greenqueue/shortcuts.py
|
Python
|
bsd-3-clause
| 399
|
import sys
import logging
from raven.utils.testutils import TestCase
from raven.base import Client
from raven import breadcrumbs
from io import StringIO
class BreadcrumbTestCase(TestCase):
def test_crumb_buffer(self):
for enable in 1, 0:
client = Client('http://foo:bar@example.com/0',
enable_breadcrumbs=enable)
with client.context:
breadcrumbs.record(type='foo', data={'bar': 'baz'},
message='aha', category='huhu')
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == enable
def test_log_crumb_reporting(self):
client = Client('http://foo:bar@example.com/0')
with client.context:
log = logging.getLogger('whatever.foo')
log.info('This is a message with %s!', 'foo', blah='baz')
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 1
assert crumbs[0]['type'] == 'default'
assert crumbs[0]['category'] == 'whatever.foo'
assert crumbs[0]['data'] == {'blah': 'baz'}
assert crumbs[0]['message'] == 'This is a message with foo!'
def test_log_location(self):
out = StringIO()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(out)
handler.setFormatter(logging.Formatter(
u'%(name)s|%(filename)s|%(funcName)s|%(lineno)d|'
u'%(levelname)s|%(message)s'))
logger.addHandler(handler)
client = Client('http://foo:bar@example.com/0')
with client.context:
logger.info('Hello World!')
lineno = sys._getframe().f_lineno - 1
items = out.getvalue().strip().split('|')
assert items[0] == 'tests.breadcrumbs.tests'
assert items[1].rstrip('co') == 'tests.py'
assert items[2] == 'test_log_location'
assert int(items[3]) == lineno
assert items[4] == 'INFO'
assert items[5] == 'Hello World!'
def test_broken_logging(self):
client = Client('http://foo:bar@example.com/0')
with client.context:
log = logging.getLogger('whatever.foo')
log.info('This is a message with %s. %s!', 42)
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 1
assert crumbs[0]['type'] == 'default'
assert crumbs[0]['category'] == 'whatever.foo'
assert crumbs[0]['message'] == 'This is a message with %s. %s!'
def test_dedup_logging(self):
client = Client('http://foo:bar@example.com/0')
with client.context:
log = logging.getLogger('whatever.foo')
log.info('This is a message with %s!', 42)
log.info('This is a message with %s!', 42)
log.info('This is a message with %s!', 42)
log.info('This is a message with %s!', 23)
log.info('This is a message with %s!', 23)
log.info('This is a message with %s!', 23)
log.info('This is a message with %s!', 42)
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 3
assert crumbs[0]['type'] == 'default'
assert crumbs[0]['category'] == 'whatever.foo'
assert crumbs[0]['message'] == 'This is a message with 42!'
assert crumbs[1]['type'] == 'default'
assert crumbs[1]['category'] == 'whatever.foo'
assert crumbs[1]['message'] == 'This is a message with 23!'
assert crumbs[2]['type'] == 'default'
assert crumbs[2]['category'] == 'whatever.foo'
assert crumbs[2]['message'] == 'This is a message with 42!'
def test_manual_record(self):
client = Client('http://foo:bar@example.com/0')
with client.context:
def processor(data):
assert data['message'] == 'whatever'
assert data['level'] == 'warning'
assert data['category'] == 'category'
assert data['type'] == 'the_type'
assert data['data'] == {'foo': 'bar'}
data['data']['extra'] = 'something'
breadcrumbs.record(message='whatever',
level='warning',
category='category',
data={'foo': 'bar'},
type='the_type',
processor=processor)
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 1
data = crumbs[0]
assert data['message'] == 'whatever'
assert data['level'] == 'warning'
assert data['category'] == 'category'
assert data['type'] == 'the_type'
assert data['data'] == {'foo': 'bar', 'extra': 'something'}
def test_special_log_handlers(self):
name = __name__ + '.superspecial'
logger = logging.getLogger(name)
def handler(logger, level, msg, args, kwargs):
assert logger.name == name
assert msg == 'aha!'
return True
breadcrumbs.register_special_log_handler(name, handler)
client = Client('http://foo:bar@example.com/0')
with client.context:
logger.debug('aha!')
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 0
def test_logging_handlers(self):
name = __name__ + '.superspecial2'
logger = logging.getLogger(name)
def handler(logger, level, msg, args, kwargs):
if logger.name == name:
assert msg == 'aha!'
return True
breadcrumbs.register_logging_handler(handler)
client = Client('http://foo:bar@example.com/0')
with client.context:
logger.debug('aha!')
crumbs = client.context.breadcrumbs.get_buffer()
assert len(crumbs) == 0
|
johansteffner/raven-python
|
tests/breadcrumbs/tests.py
|
Python
|
bsd-3-clause
| 6,007
|
"""One and two sample permutation tests.
"""
# Third-party imports
import numpy as np
import scipy.misc as sm
# Our own imports
from nipy.neurospin.graph import graph_3d_grid, graph_cc
from nipy.neurospin.graph.field import Field
from onesample import stat as os_stat, stat_mfx as os_stat_mfx
from twosample import stat as ts_stat, stat_mfx as ts_stat_mfx
# Default parameters
DEF_NDRAWS = int(1e5)
DEF_NPERMS = int(1e4)
DEF_NITER = 5
DEF_STAT_ONESAMPLE = 'student'
DEF_STAT_TWOSAMPLE = 'student'
#===========================================
#===========================================
# Cluster extraction functions
#===========================================
#===========================================
def extract_clusters_from_thresh(T,XYZ,th,k=18):
"""
Extract clusters from statistical map
above specified threshold
In: T (p) statistical map
XYZ (3,p) voxels coordinates
th <float> threshold
k <int> the number of neighbours considered. (6,18 or 26)
Out: labels (p) cluster labels
"""
labels = -np.ones(len(T),int)
I = np.where(T >= th)[0]
if len(I)>0:
SupraThreshXYZ = XYZ[:, I]
# Compute graph associated to suprathresh_coords
A, B, D = graph_3d_grid(SupraThreshXYZ.transpose(),k)
# Number of vertices
V = max(A) + 1
# Labels of connected components
CC_label = graph_cc(A,B,D,V)
labels[I] = CC_label
return labels
def max_dist(XYZ,I,J):
"""
Maximum distance between two set of points
In: XYZ (3,p) voxels coordinates
I (q) index of points
J (r) index of points
Out: d <float>
"""
if min(min(np.shape(I)), min(np.shape(J))) == 0:
return 0
else:
# Square distance matrix
D = np.sum(np.square(XYZ[:,I].reshape(3, len(I), 1) - XYZ[:, J].reshape(3, 1, len(J))), axis=0)
return np.sqrt((D).max())
def extract_clusters_from_diam(T,XYZ,th,diam,k=18):
"""
Extract clusters from a statistical map
under diameter constraint
and above given threshold
In: T (p) statistical map
XYZ (3,p) voxels coordinates
th <float> minimum threshold
diam <int> maximal diameter (in voxels)
k <int> the number of neighbours considered. (6,18 or 26)
Out: labels (p) cluster labels
"""
CClabels = extract_clusters_from_thresh(T,XYZ,th,k)
nCC = CClabels.max() + 1
labels = -np.ones(len(CClabels),int)
clust_label = 0
for i in xrange(nCC):
#print "Searching connected component ", i, " out of ", nCC
I = np.where(CClabels==i)[0]
extCC = len(I)
if extCC <= (diam+1)**3:
diamCC = max_dist(XYZ,I,I)
else:
diamCC = diam+1
if diamCC <= diam:
labels[I] = np.zeros(extCC,int) + clust_label
#print "cluster ", clust_label, ", diam = ", diamCC
#print "ext = ", len(I), ", diam = ", max_dist(XYZ,I,I)
clust_label += 1
else:
# build the field
p = len(T[I])
F = Field(p)
F.from_3d_grid(np.transpose(XYZ[:,I]),k)
F.set_field(np.reshape(T[I],(p,1)))
# compute the blobs
idx, height, parent,label = F.threshold_bifurcations(0,th)
nidx = np.size(idx)
#root = nidx-1
root = np.where(np.arange(nidx)==parent)[0]
# Can constraint be met within current region?
Imin = I[T[I]>=height[root]]
extmin = len(Imin)
if extmin <= (diam+1)**3:
dmin = max_dist(XYZ,Imin,Imin)
else:
dmin = diam+1
if dmin <= diam:# If so, search for the largest cluster meeting the constraint
Iclust = Imin # Smallest cluster
J = I[T[I]<height[root]] # Remaining voxels
argsortTJ = np.argsort(T[J])[::-1] # Sorted by decreasing T values
l = 0
L = np.array([J[argsortTJ[l]]],int)
diameter = dmin
new_diameter = max(dmin,max_dist(XYZ,Iclust,L))
while new_diameter <= diam:
#print "diameter = " + str(new_diameter)
#sys.stdout.flush()
Iclust = np.concatenate((Iclust,L))
diameter = new_diameter
#print "diameter = ", diameter
l += 1
L = np.array([J[argsortTJ[l]]],int)
new_diameter = max(diameter,max_dist(XYZ,Iclust,L))
labels[Iclust] = np.zeros(len(Iclust),int) + clust_label
#print "cluster ", clust_label, ", diam = ", diameter
#print "ext = ", len(Iclust), ", diam = ", max_dist(XYZ,Iclust,Iclust)
clust_label += 1
else:# If not, search inside sub-regions
#print "Searching inside sub-regions "
Irest = I[T[I]>height[root]]
rest_labels = extract_clusters_from_diam(T[Irest],XYZ[:,Irest],th,diam,k)
rest_labels[rest_labels>=0] += clust_label
clust_label = rest_labels.max() + 1
labels[Irest] = rest_labels
return labels
def extract_clusters_from_graph(T, G, th):
"""
This returns a label vector of same size as T,
defining connected components for subgraph of
weighted graph G containing vertices s.t. T >= th
"""
labels = np.zeros(len(T), int) - 1
I = T >= th
nlabels = I.sum()
if nlabels > 0:
labels[I] = G.subgraph(I).cc()
return labels
#======================================
#======================================
# Useful functions
#======================================
#======================================
def sorted_values(a):
"""
Extract list of distinct sortedvalues from an array
"""
if len(a) == 0:
return []
else:
m = min(a)
L = [m]
L.extend( sorted_values(a[a>m]) )
return L
def onesample_stat(Y, V, stat_id, base=0.0, axis=0, Magics=None, niter=DEF_NITER):
"""
Wrapper for os_stat and os_stat_mfx
"""
if stat_id.find('_mfx')<0:
return os_stat(Y, stat_id, base, axis, Magics)
else:
return os_stat_mfx(Y, V, stat_id, base, axis, Magics, niter)
def twosample_stat(Y1, V1, Y2, V2, stat_id, axis=0, Magics=None, niter=DEF_NITER):
"""
Wrapper for ts_stat and ts_stat_mfx
"""
if stat_id.find('_mfx')<0:
return ts_stat(Y1, Y2, stat_id, axis, Magics)
else:
return ts_stat_mfx(Y1, V1, Y2, V2, stat_id, axis, Magics, niter)
#=================================================
#=================================================
# Compute cluster and region summary statistics
#=================================================
#=================================================
def compute_cluster_stats(Tvalues, labels, random_Tvalues, cluster_stats=["size","Fisher"]):
"""
size_values, Fisher_values = compute_cluster_stats(Tvalues, labels, random_Tvalues, cluster_stats=["size","Fisher"])
Compute summary statistics in each cluster
In: see permutation_test_onesample class docstring
Out: size_values Array of size nclust, or None if "size" not in cluster_stats
Fisher_values Array of size nclust, or None if "Fisher" not in cluster_stats
"""
nclust = max(labels)+1
if nclust == 0:
if "size" in cluster_stats:
size_values = np.array([0])
else:
size_values = None
if "Fisher" in cluster_stats:
Fisher_values = np.array([0])
else:
Fisher_values = None
else:
if "size" in cluster_stats:
size_values = np.zeros(nclust,int)
else:
size_values = None
if "Fisher" in cluster_stats:
Fisher_values = np.zeros(nclust,float)
ndraws = len(random_Tvalues)
pseudo_p_values = 1 - np.searchsorted(random_Tvalues,Tvalues)/float(ndraws)
else:
Fisher_values = None
for i in xrange(nclust):
I = np.where(labels==i)[0]
if "size" in cluster_stats:
size_values[i] = len(I)
if "Fisher" in cluster_stats:
Fisher_values[i] = -np.sum(np.log(pseudo_p_values[I]))
return size_values, Fisher_values
def compute_region_stat(Tvalues, labels, label_values, random_Tvalues):
"""
Fisher_values = compute_region_stat(Tvalues, labels, label_values, random_Tvalues)
Compute summary statistics in each cluster
In: see permutation_test_onesample class docstring
Out: Fisher_values Array of size nregions
"""
Fisher_values = np.zeros(len(label_values),float)
pseudo_p_values = 1 - np.searchsorted(random_Tvalues,Tvalues)/float(len(random_Tvalues))
for i in xrange(len(label_values)):
I = np.where(labels==label_values[i])[0]
Fisher_values[i] = -np.sum(np.log(pseudo_p_values[I]))
return Fisher_values
def peak_XYZ(XYZ, Tvalues, labels, label_values):
"""
Returns (3, n_labels) array of maximum T values coordinates for each label value
"""
C = np.zeros((3, len(label_values)), int)
for i in xrange(len(label_values)):
I = np.where(labels == label_values[i])[0]
C[:, i] = XYZ[:, I[np.argmax(Tvalues[I])]]
return C
#======================================
#======================================
# Generic permutation test class
#======================================
#======================================
class permutation_test:
"""
This generic permutation test class contains the calibration method
which is common to the derived classes permutation_test_onesample and
permutation_test_twosample (as well as other common methods)
"""
#=======================================================
# Permutation test calibration of summary statistics
#=======================================================
def calibrate(self, nperms=DEF_NPERMS, clusters=None,
cluster_stats=["size","Fisher"], regions=None,
region_stats=["Fisher"], verbose=False):
"""
Calibrate cluster and region summary statistics using permutation test
Parameters
----------
nperms : int, optional
Number of random permutations generated.
Exhaustive permutations are used only if nperms=None,
or exceeds total number of possible permutations
clusters : list [(thresh1,diam1),(thresh2,diam2),...], optional
List of cluster extraction pairs: (thresh,diam). *thresh* provides
T values threshold, *diam* is the maximum cluster diameter, in
voxels. Using *diam*==None yields classical suprathreshold
clusters.
cluster_stats : list [stat1,...], optional
List of cluster summary statistics id (either 'size' or 'Fisher')
regions : list [Labels1,Labels2,...]
List of region labels arrays, of size (p,) where p is the number
of voxels
region_stats : list [stat1,...], optional
List of cluster summary statistics id (only 'Fisher' supported
for now)
verbose : boolean, optional
"Chatterbox" mode switch
Returns
-------
voxel_results : dict
A dictionary containing the following keys: ``p_values`` (p,)
Uncorrected p-values.``Corr_p_values`` (p,) Corrected p-values,
computed by the Tmax procedure. ``perm_maxT_values`` (nperms)
values of the maximum statistic under permutation.
cluster_results : list [results1,results2,...]
List of permutation test results for each cluster extraction pair.
These are dictionaries with the following keys "thresh", "diam",
"labels", "expected_voxels_per_cluster",
"expected_number_of_clusters", and "peak_XYZ" if XYZ field is
nonempty and for each summary statistic id "S": "size_values",
"size_p_values", "S_Corr_p_values", "perm_size_values",
"perm_maxsize_values"
region_results :list [results1,results2,...]
List of permutation test results for each region labels arrays.
These are dictionaries with the following keys: "label_values",
"peak_XYZ" (if XYZ field nonempty) and for each summary statistic
id "S": "size_values", "size_p_values", "perm_size_values",
"perm_maxsize_values"
"""
# Permutation indices
if self.nsamples ==1:
n, p = self.data.shape[self.axis], self.data.shape[1-self.axis]
max_nperms = 2**n
elif self.nsamples == 2:
n1,p = self.data1.shape[self.axis], self.data1.shape[1-self.axis]
n2 = self.data2.shape[self.axis]
max_nperms = sm.comb(n1+n2,n1,exact=1)
data = np.concatenate((self.data1,self.data2), self.axis)
if self.vardata1 != None:
vardata = np.concatenate((self.vardata1,self.vardata2), self.axis)
if nperms == None or nperms >= max_nperms:
magic_numbers = np.arange(max_nperms)
else:
#magic_numbers = np.random.randint(max_nperms,size=nperms)
# np.random.randint does not handle longint!
# So we use the following hack instead:
magic_numbers = np.random.uniform(max_nperms,size=nperms)
# Initialize cluster_results
cluster_results = []
if clusters != None:
for (thresh,diam) in clusters:
if diam == None:
if self.XYZ == None:
labels = extract_clusters_from_graph(self.Tvalues,self.G,thresh)
else:
labels = extract_clusters_from_thresh(self.Tvalues,self.XYZ,thresh)
else:
labels = extract_clusters_from_diam(self.Tvalues,self.XYZ,thresh,diam)
results = {"thresh" : thresh, "diam" : diam, "labels" : labels}
size_values, Fisher_values = compute_cluster_stats(self.Tvalues, labels, self.random_Tvalues, cluster_stats)
nclust = labels.max() + 1
results["expected_voxels_per_thresh"] = 0.0
results["expected_number_of_clusters"] = 0.0
if self.XYZ != None:
results["peak_XYZ"] = peak_XYZ(self.XYZ, self.Tvalues, labels, np.arange(nclust))
if "size" in cluster_stats:
results["size_values"] = size_values
results["perm_size_values"] = []
results["perm_maxsize_values"] = np.zeros(len(magic_numbers),int)
if "Fisher" in cluster_stats:
results["Fisher_values"] = Fisher_values
results["perm_Fisher_values"] = []
results["perm_maxFisher_values"] = np.zeros(len(magic_numbers),float)
cluster_results.append( results )
# Initialize region_results
region_results = []
if regions != None:
for labels in regions:
label_values = sorted_values(labels)
nregions = len(label_values)
results = { "label_values" : label_values }
if self.XYZ != None:
results["peak_XYZ"] = peak_XYZ(self.XYZ, self.Tvalues, labels, label_values)
if "Fisher" in region_stats:
results["Fisher_values"] = compute_region_stat(self.Tvalues, labels, label_values, self.random_Tvalues)
results["perm_Fisher_values"] = np.zeros((nregions,len(magic_numbers)),float)
results["Fisher_p_values"] = np.zeros(nregions,float)
results["Fisher_Corr_p_values"] = np.zeros(nregions,float)
region_results.append( results )
# Permutation test
p_values = np.zeros(p,float)
Corr_p_values = np.zeros(p,float)
nmagic = len(magic_numbers)
perm_maxT_values = np.zeros(nmagic, float)
for j in xrange(nmagic):
m = magic_numbers[j]
if verbose:
print "Permutation", j+1, "out of", nmagic
# T values under permutation
if self.nsamples == 1:
#perm_Tvalues = onesample_stat(self.data, self.vardata, self.stat_id, self.base, self.axis, np.array([m]), self.niter).squeeze()
rand_sign = (np.random.randint(2,size=n)*2-1).reshape(n,1)
rand_data = rand_sign*self.data
if self.vardata == None:
rand_vardata = None
else:
rand_vardata = rand_sign*self.vardata
perm_Tvalues = onesample_stat(rand_data, rand_vardata, self.stat_id, self.base, self.axis, None, self.niter).squeeze()
elif self.nsamples == 2:
perm_Tvalues = twosample_stat(self.data1, self.vardata1, self.data2, self.vardata2, self.stat_id, self.axis, np.array([m]), self.niter).squeeze()
rand_perm = np.random.permutation(np.arange(n1+n2))
rand_data1 = data[:n1]
rand_data2 = data[n1:]
if self.vardata1 == None:
rand_vardata1 = None
rand_vardata2 = None
else:
rand_vardata1 = vardata[:n1]
rand_vardata2 = vardata[n1:]
# update p values
p_values += perm_Tvalues >= self.Tvalues
Corr_p_values += max(perm_Tvalues) >= self.Tvalues
perm_maxT_values[j] = max(perm_Tvalues)
# Update cluster_results
if clusters != None:
for i in xrange(len(clusters)):
thresh, diam = clusters[i]
if diam == None:
if self.XYZ == None:
perm_labels = extract_clusters_from_graph(perm_Tvalues,self.G,thresh)
else:
perm_labels = extract_clusters_from_thresh(perm_Tvalues,self.XYZ,thresh)
else:
perm_labels = extract_clusters_from_diam(perm_Tvalues,self.XYZ,thresh,diam)
perm_size_values, perm_Fisher_values = compute_cluster_stats(perm_Tvalues, perm_labels, self.random_Tvalues, cluster_stats)
perm_nclust = labels.max() + 1
cluster_results[i]["expected_voxels_per_thresh"] += perm_size_values.sum()/float(nclust)
cluster_results[i]["expected_number_of_clusters"] += nclust
if "size" in cluster_stats:
cluster_results[i]["perm_size_values"][:0] = perm_size_values
cluster_results[i]["perm_maxsize_values"][j] = max(perm_size_values)
if "Fisher" in cluster_stats:
cluster_results[i]["perm_Fisher_values"][:0] = perm_Fisher_values
cluster_results[i]["perm_maxFisher_values"][j] = max(perm_Fisher_values)
# Update region_results
if regions != None:
for i in xrange(len(regions)):
labels = regions[i]
label_values = region_results[i]["label_values"]
nregions = len(label_values)
if "Fisher" in region_stats:
perm_Fisher_values = compute_region_stat(perm_Tvalues, labels, label_values, self.random_Tvalues)
region_results[i]["perm_Fisher_values"][:,j] = perm_Fisher_values
# Compute p-values for clusters summary statistics
if clusters != None:
for i in xrange(len(clusters)):
if "size" in cluster_stats:
cluster_results[i]["perm_size_values"] = np.array(cluster_results[i]["perm_size_values"])
cluster_results[i]["perm_size_values"].sort()
cluster_results[i]["perm_maxsize_values"].sort()
cluster_results[i]["size_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_size_values"], cluster_results[i]["size_values"])/float(cluster_results[i]["expected_number_of_clusters"])
cluster_results[i]["size_Corr_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_maxsize_values"], cluster_results[i]["size_values"])/float(nmagic)
if "Fisher" in cluster_stats:
cluster_results[i]["perm_Fisher_values"] = np.array(cluster_results[i]["perm_Fisher_values"])
cluster_results[i]["perm_Fisher_values"].sort()
cluster_results[i]["perm_maxFisher_values"].sort()
cluster_results[i]["Fisher_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_Fisher_values"], cluster_results[i]["Fisher_values"])/float(cluster_results[i]["expected_number_of_clusters"])
cluster_results[i]["Fisher_Corr_p_values"] = 1 - np.searchsorted(cluster_results[i]["perm_maxFisher_values"], cluster_results[i]["Fisher_values"])/float(nmagic)
cluster_results[i]["expected_voxels_per_thresh"] /= float(nmagic)
cluster_results[i]["expected_number_of_clusters"] /= float(nmagic)
# Compute p-values for regions summary statistics
if regions != None:
for i in xrange(len(regions)):
if "Fisher" in region_stats:
sorted_perm_Fisher_values = np.sort(region_results[i]["perm_Fisher_values"],axis=1)
label_values = region_results[i]["label_values"]
nregions = len(label_values)
# Compute uncorrected p-values
for j in xrange(nregions):
region_results[i]["Fisher_p_values"][j] = 1 - np.searchsorted(sorted_perm_Fisher_values[j],region_results[i]["Fisher_values"][j])/float(nmagic)
#Compute corrected p-values
perm_Fisher_p_values = np.zeros((nregions,nmagic),float)
for j in xrange(nregions):
I = np.argsort(region_results[i]["perm_Fisher_values"][j])
perm_Fisher_p_values[j][I] = 1 - np.arange(1,nmagic+1)/float(nmagic)
perm_min_Fisher_p_values = np.sort(perm_Fisher_p_values.min(axis=0))
region_results[i]["Fisher_Corr_p_values"] = 1 - np.searchsorted(-perm_min_Fisher_p_values,-region_results[i]["Fisher_p_values"])/float(nmagic)
voxel_results = {'p_values':p_values/float(nmagic),
'Corr_p_values':Corr_p_values/float(nmagic),
'perm_maxT_values':perm_maxT_values}
return voxel_results, cluster_results, region_results
def height_threshold(self, pval):
"""
Return the uniform height threshold matching a given
permutation-based P-value.
"""
tvals = self.random_Tvalues
ndraws = tvals.size
idx = np.ceil(ndraws*(1-pval))
if idx >= ndraws:
return np.inf
candidate = tvals[idx]
if tvals[max(0, idx-1)]<candidate:
return candidate
idx = np.searchsorted(tvals, candidate, 'right')
if idx >= ndraws:
return np.inf
return tvals[idx]
def pvalue(self, Tvalues=None):
"""
Return uncorrected voxel-level pseudo p-values.
"""
if Tvalues == None:
Tvalues = self.Tvalues
return 1 - np.searchsorted(self.random_Tvalues, Tvalues)/float(self.ndraws)
def zscore(self, Tvalues=None):
"""
Return z score corresponding to the uncorrected
voxel-level pseudo p-value.
"""
from nipy.neurospin.utils import zscore
if Tvalues == None:
Tvalues = self.Tvalues
return zscore(self.pvalue(Tvalues))
#======================================
#======================================
# One sample permutation test class
#======================================
#======================================
class permutation_test_onesample(permutation_test):
"""
Class derived from the generic permutation_test class.
Inherits the calibrate method
"""
def __init__(self, data, XYZ, axis=0, vardata=None,
stat_id=DEF_STAT_ONESAMPLE, base=0.0, niter=DEF_NITER,
ndraws=DEF_NDRAWS):
"""
Initialize permutation_test_onesample instance,
compute statistic values in each voxel and under permutation
In: data data array
XYZ voxels coordinates
axis <int> Subject axis in data
vardata variance (same shape as data)
optional (if None, mfx statistics cannot be used)
stat_id <char> choice of test statistic
(see onesample.stats for a list of possible stats)
base <float> mean signal under H0
niter <int> number of iterations of EM algorithm
ndraws <int> Number of generated random t values
Out:
self.Tvalues voxelwise test statistic values
self.random_Tvalues sorted statistic values in random voxels and under random
sign permutation
"""
# Create data fields
n,p = data.shape[axis], data.shape[1-axis]
self.data = data
self.stat_id = stat_id
self.XYZ = XYZ
self.axis = axis
self.vardata = vardata
self.niter = niter
self.base = base
self.ndraws = ndraws
self.Tvalues = onesample_stat(data, vardata, stat_id, base, axis, Magics=None, niter=niter).squeeze()
self.nsamples = 1
# Compute statistic values in random voxels and under random permutations
# Use a self.verbose flag for this output?
#print "Computing average null distribution of test statistic..."
self.random_Tvalues = np.zeros(ndraws,float)
# Random voxel selection
I = np.random.randint(0,p,size=ndraws)
if axis == 0:
rand_data = data[:,I]
if vardata == None:
rand_vardata = None
else:
rand_vardata = vardata[:,I]
else:
rand_data = data[I]
if vardata == None:
rand_vardata = None
else:
rand_vardata = vardata[I]
# Random sign permutation
rand_sign = (np.random.binomial(1,0.5,size = n*ndraws)*2-1).reshape(n,ndraws)
if axis == 1:
rand_sign = rand_sign.transpose()
rand_data *= rand_sign
self.random_Tvalues = onesample_stat(rand_data, rand_vardata, stat_id, base, axis).squeeze()
self.random_Tvalues.sort()
#==================================================================
#==================================================================
# One sample permutation test class with arbitrary graph structure
#==================================================================
#==================================================================
class permutation_test_onesample_graph(permutation_test):
"""
Class derived from the generic permutation_test class.
Inherits the calibrate method
"""
def __init__(self,data,G,axis=0,vardata=None,stat_id=DEF_STAT_ONESAMPLE,base=0.0,niter=DEF_NITER,ndraws=DEF_NDRAWS):
"""
Initialize permutation_test_onesample instance,
compute statistic values in each voxel and under permutation
In: data data array
G weighted graph (each vertex corresponds to a voxel)
axis <int> Subject axis in data
vardata variance (same shape as data)
optional (if None, mfx statistics cannot be used)
stat_id <char> choice of test statistic
(see onesample.stats for a list of possible stats)
base <float> mean signal under H0
niter <int> number of iterations of EM algorithm
ndraws <int> Number of generated random t values
Out:
self.Tvalues voxelwise test statistic values
self.random_Tvalues sorted statistic values in random voxels and under random
sign permutation
"""
# Create data fields
n,p = data.shape[axis], data.shape[1-axis]
self.data = data
self.stat_id = stat_id
self.XYZ = None
self.G = G
self.axis = axis
self.vardata = vardata
self.niter = niter
self.base = base
self.ndraws = ndraws
self.Tvalues = onesample_stat(data, vardata, stat_id, base, axis, Magics=None, niter=niter).squeeze()
self.nsamples = 1
# Compute statistic values in random voxels and under random permutations
# Use a self.verbose flag for this output?
#print "Computing average null distribution of test statistic..."
self.random_Tvalues = np.zeros(ndraws,float)
# Random voxel selection
I = np.random.randint(0,p,size=ndraws)
if axis == 0:
rand_data = data[:,I]
if vardata == None:
rand_vardata = None
else:
rand_vardata = vardata[:,I]
else:
rand_data = data[I]
if vardata == None:
rand_vardata = None
else:
rand_vardata = vardata[I]
# Random sign permutation
rand_sign = (np.random.binomial(1,0.5,size = n*ndraws)*2-1).reshape(n,ndraws)
if axis == 1:
rand_sign = rand_sign.transpose()
rand_data *= rand_sign
self.random_Tvalues = onesample_stat(rand_data, rand_vardata, stat_id, base, axis).squeeze()
self.random_Tvalues.sort()
#======================================
#======================================
# Two sample permutation test class
#======================================
#======================================
class permutation_test_twosample(permutation_test):
"""
Class derived from the generic permutation_test class.
Inherits the calibrate method
"""
def __init__(self,data1,data2,XYZ,axis=0,vardata1=None,vardata2=None,stat_id=DEF_STAT_TWOSAMPLE,niter=DEF_NITER,ndraws=DEF_NDRAWS):
"""
Initialize permutation_test_twosample instance,
compute statistic values in each voxel and under permutation
In: data1, data2 data arrays
XYZ voxels coordinates
axis <int> Subject axis in data
vardata1, vardata2 variance (same shape as data)
optional (if None, mfx statistics cannot be used)
stat_id <char> choice of test statistic
(see onesample.stats for a list of possible stats)
niter <int> number of iterations of EM algorithm
ndraws <int> Number of generated random t values
Out:
self.Tvalues voxelwise test statistic values
self.random_Tvalues sorted statistic values in random voxels and under random
sign permutation
"""
# Create data fields
n1,p = data1.shape[axis], data1.shape[1-axis]
n2 = data2.shape[axis]
self.data1 = data1
self.data2 = data2
self.stat_id = stat_id
self.XYZ = XYZ
self.axis = axis
self.vardata1 = vardata1
self.vardata2 = vardata2
self.niter = niter
self.ndraws = ndraws
self.Tvalues = twosample_stat(data1, vardata1, data2, vardata2, stat_id, axis, Magics=None, niter=niter).squeeze()
self.nsamples = 2
# Compute statistic values in random voxels and under random permutations
# Use a self.verbose flag for this output?
#print "Computing average null distribution of test statistic..."
self.random_Tvalues = np.zeros(ndraws,float)
# Random voxel selection
I = np.random.randint(0,p,size=ndraws)
if axis == 0:
perm_data = np.zeros((n1+n2,ndraws),float)
perm_data[:n1] = data1[:,I]
perm_data[n1:] = data2[:,I]
if vardata1 != None:
perm_vardata = np.zeros((n1+n2,ndraws),float)
perm_vardata[:n1] = vardata1[:,I]
perm_vardata[n1:] = vardata2[:,I]
else:
perm_data = np.zeros((ndraws,n1+n2),float)
perm_data[:,:n1] = data1[I]
perm_data[:,n1:] = data2[I]
if vardata1 != None:
perm_vardata = np.zeros((ndraws, n1+n2),float)
perm_vardata[:,:n1] = vardata1[I]
perm_vardata[:,n1:] = vardata2[I]
rand_perm = np.array([np.random.permutation(np.arange(n1+n2)) for i in xrange(ndraws)]).transpose()
ravel_rand_perm = rand_perm*ndraws + np.arange(ndraws).reshape(1,ndraws)
if axis == 0:
perm_data = perm_data.ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)
if vardata1 != None:
perm_vardata = perm_vardata.ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)
else:
perm_data = (perm_data.transpose().ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)).transpose()
if vardata1 != None:
perm_vardata = (perm_vardata.transpose().ravel()[ravel_rand_perm.ravel()].reshape(n1+n2,ndraws)).transpose()
perm_data1 = perm_data[:n1]
perm_data2 = perm_data[n1:]
if vardata1 == None:
perm_vardata1 = None
perm_vardata2 = None
else:
perm_vardata1 = perm_vardata[:n1]
perm_vardata2 = perm_vardata[n1:]
self.random_Tvalues = twosample_stat(perm_data1, perm_vardata1, perm_data2, perm_vardata2, stat_id, axis).squeeze()
self.random_Tvalues.sort()
|
yarikoptic/NiPy-OLD
|
nipy/neurospin/group/permutation_test.py
|
Python
|
bsd-3-clause
| 34,611
|