text stringlengths 4 1.02M | meta dict |
|---|---|
from ColorSpace import *
class SCColorScheme:
def __init__(self, arrowColor = (0.5, 0.5, 1), rolloverColor = (0.53, 0.9, 0.53), frameColor = None, pressedColor = None, menuHolderActiveColor = None, emoteIconColor = None, textColor = (0, 0, 0), emoteIconDisabledColor = (0.5, 0.5, 0.5), textDisabledColor = (0.4, 0.4, 0.4), alpha = 0.95):
def scaleColor(color, s):
y, u, v = rgb2yuv(*color)
return yuv2rgb(y * s, u, v)
def scaleIfNone(color, srcColor, s):
if color is not None:
return color
else:
return scaleColor(srcColor, s)
return
self.__arrowColor = arrowColor
self.__rolloverColor = rolloverColor
self.__frameColor = frameColor
if self.__frameColor is None:
h, s, v = rgb2hsv(*arrowColor)
self.__frameColor = hsv2rgb(h, 0.2 * s, v)
h, s, v = rgb2hsv(*self.__frameColor)
self.__frameColor = hsv2rgb(h, 0.5 * s, v)
self.__pressedColor = scaleIfNone(pressedColor, self.__rolloverColor, 0.92)
self.__menuHolderActiveColor = scaleIfNone(menuHolderActiveColor, self.__rolloverColor, 0.84)
self.__emoteIconColor = emoteIconColor
if self.__emoteIconColor is None:
h, s, v = rgb2hsv(*self.__rolloverColor)
self.__emoteIconColor = hsv2rgb(h, 1.0, 0.8 * v)
self.__emoteIconDisabledColor = emoteIconDisabledColor
self.__textColor = textColor
self.__textDisabledColor = textDisabledColor
self.__alpha = alpha
return
def getArrowColor(self):
return self.__arrowColor
def getRolloverColor(self):
return self.__rolloverColor
def getFrameColor(self):
return self.__frameColor
def getPressedColor(self):
return self.__pressedColor
def getMenuHolderActiveColor(self):
return self.__menuHolderActiveColor
def getEmoteIconColor(self):
return self.__emoteIconColor
def getTextColor(self):
return self.__textColor
def getEmoteIconDisabledColor(self):
return self.__emoteIconDisabledColor
def getTextDisabledColor(self):
return self.__textDisabledColor
def getAlpha(self):
return self.__alpha
def __str__(self):
members = ('arrowColor', 'rolloverColor', 'frameColor', 'pressedColor', 'menuHolderActiveColor', 'emoteIconColor', 'textColor', 'emoteIconDisabledColor', 'textDisabledColor', 'alpha')
result = ''
for member in members:
result += '%s = %s' % (member, self.__dict__['_%s__%s' % (self.__class__.__name__, member)])
if member is not members[-1]:
result += '\n'
return result
def __repr__(self):
return str(self)
| {
"content_hash": "fbfd4ba8d3f6c7ed0441efbd22d0b413",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 295,
"avg_line_length": 35.53164556962025,
"alnum_prop": 0.6031350195938725,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "7b2050bdf41846c0c389947dcc89a77d865666d7",
"size": "2807",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "otp/speedchat/SCColorScheme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
} |
def func():
print("Not really here for a reason.") | {
"content_hash": "0b6b05545f5833d9b4be66c4cf9dab59",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 27.5,
"alnum_prop": 0.6363636363636364,
"repo_name": "DonaldWhyte/module-dependency",
"id": "b52d2b7c7a2ca509544f3a0e34d4005287ff81f2",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/outputters/no-class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141953"
}
],
"symlink_target": ""
} |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import QCAlgorithm
### <summary>
### This algorithm demonstrates the runtime addition and removal of securities from your algorithm.
### With LEAN it is possible to add and remove securities after the initialization.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="assets" />
### <meta name="tag" content="regression test" />
class AddRemoveSecurityRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
self.AddEquity("SPY")
self._lastAction = None
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
if self._lastAction is not None and self._lastAction.date() == self.Time.date():
return
if not self.Portfolio.Invested:
self.SetHoldings("SPY", .5)
self._lastAction = self.Time
if self.Time.weekday() == 1:
self.AddEquity("AIG")
self.AddEquity("BAC")
self._lastAction = self.Time
if self.Time.weekday() == 2:
self.SetHoldings("AIG", .25)
self.SetHoldings("BAC", .25)
self._lastAction = self.Time
if self.Time.weekday() == 3:
self.RemoveSecurity("AIG")
self.RemoveSecurity("BAC")
self._lastAction = self.Time
def OnOrderEvent(self, orderEvent):
if orderEvent.Status == OrderStatus.Submitted:
self.Debug("{0}: Submitted: {1}".format(self.Time, self.Transactions.GetOrderById(orderEvent.OrderId)))
if orderEvent.Status == OrderStatus.Filled:
self.Debug("{0}: Filled: {1}".format(self.Time, self.Transactions.GetOrderById(orderEvent.OrderId))) | {
"content_hash": "666546159052e0c013360f49043f3d15",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 151,
"avg_line_length": 41.6986301369863,
"alnum_prop": 0.6823258869908015,
"repo_name": "AnshulYADAV007/Lean",
"id": "7bc5b1acdc276123e905405f5b08d33a214bbb88",
"size": "3046",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Algorithm.Python/AddRemoveSecurityRegressionAlgorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2540"
},
{
"name": "C#",
"bytes": "15378703"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "F#",
"bytes": "1723"
},
{
"name": "HTML",
"bytes": "2607907"
},
{
"name": "Java",
"bytes": "852"
},
{
"name": "Jupyter Notebook",
"bytes": "16348"
},
{
"name": "Python",
"bytes": "654580"
},
{
"name": "Shell",
"bytes": "2307"
},
{
"name": "Visual Basic",
"bytes": "2448"
}
],
"symlink_target": ""
} |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", path_list=[
[TestAction.create_mini_vm, 'vm1', 'data_volume=true'],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_mini_vm, 'vm2', 'flag=thick'],
[TestAction.resize_data_volume, 'volume1', 5*1024*1024],
[TestAction.attach_volume, 'vm2', 'volume1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.create_mini_vm, 'vm3', 'network=random'],
[TestAction.delete_volume, 'volume1'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume_backup, 'volume1-backup1'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.resize_volume, 'vm3', 5*1024*1024],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=thin,scsi'],
[TestAction.create_mini_vm, 'vm4', 'cpu=random'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.delete_volume, 'volume3'],
[TestAction.expunge_volume, 'volume3'],
[TestAction.create_mini_vm, 'vm5', 'data_volume=true'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup3'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.delete_image, 'image2'],
[TestAction.delete_vm_backup, 'vm1-backup2'],
[TestAction.add_image, 'image3', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.detach_volume, 'volume2'],
])
'''
The final status:
Running:['vm1', 'vm2', 'vm3', 'vm4', 'vm5']
Stopped:[]
Enadbled:['volume2-backup3', 'image3']
attached:[]
Detached:['volume2']
Deleted:['volume1', 'volume1-backup1', 'vm1-backup2', 'image2']
Expunged:['volume3', 'image1']
Ha:['vm1']
Group:
'''
| {
"content_hash": "76f7a50e116d3820a7d6aeffb0a91df2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 102,
"avg_line_length": 39.482142857142854,
"alnum_prop": 0.7132519222071461,
"repo_name": "zstackio/zstack-woodpecker",
"id": "ef836f3a9c0b42dce36a3d9f7de2055b81e694ec",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/paths/path41.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
import pytest
from dcicutils.deployment_utils import CreateMappingOnDeployManager
from dcicutils.qa_utils import notice_pytest_fixtures
from snovault import COLLECTIONS, TYPES
from snovault.elasticsearch.create_mapping import type_mapping, run as run_create_mapping
from snovault.util import add_default_embeds
from unittest.mock import patch, MagicMock
from .datafixtures import ORDER
from .workbook_fixtures import workbook, app_settings, app
from ..commands import create_mapping_on_deploy
from ..commands.create_mapping_on_deploy import (
ITEM_INDEX_ORDER,
_run_create_mapping # noqa - yeah, it's internal but we want to test it
)
# TODO: We should not be importing *. Even stranger, PyCharm says we don't use anything from there. -kmp 14-Feb-2020
# Experimentally commenting this out. -kmp 28-Jun-2020
# from ..types.experiment import *
pytestmark = [pytest.mark.setone, pytest.mark.working]
# Using workbook inserts - required for test_run_create_mapping_with_upgrader
notice_pytest_fixtures(app_settings, app, workbook)
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
"""
This test does not actually use elasticsearch
Only tests the mappings generated from schemas
"""
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
type_info = registry[TYPES].by_item_type[item_type]
schema = type_info.schema
embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema)
# assert that all embeds exist in mapping for the given type
for embed in embeds:
mapping_pointer = mapping
split_embed = embed.split('.')
for idx, split_ in enumerate(split_embed):
# see if this is last level of embedding- may be a field or object
if idx == len(split_embed) - 1:
if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']:
final_mapping = mapping_pointer['properties']
else:
final_mapping = mapping_pointer
if split_ != '*':
assert split_ in final_mapping
else:
assert 'properties' in final_mapping or final_mapping.get('type') == 'object'
else:
assert split_ in mapping_pointer['properties']
mapping_pointer = mapping_pointer['properties'][split_]
def test_create_mapping_item_order(registry):
# make sure every item type name is represented in the item ordering
for i_type in registry[COLLECTIONS].by_item_type:
# ignore "testing" types
if i_type.startswith('testing_'):
continue
assert registry[COLLECTIONS][i_type].type_info.name in ITEM_INDEX_ORDER
class MockedCommandArgs:
def __init__(self, wipe_es=None, skip=None, strict=None, clear_queue=None):
self.wipe_es = wipe_es
self.skip = skip
self.strict = strict
self.clear_queue = clear_queue
class MockedLog:
def __init__(self):
self.log = []
def info(self, msg):
self.log.append(('info', msg))
def error(self, msg):
self.log.append(('error', msg))
# These next are more extensively tested in dcicutils.
# This is just plausibility checking that we've received things OK.
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-blue'))
def test_get_deployment_config_staging():
""" Tests get_deployment_config in the new staging case """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-production-blue'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is False
assert cfg['WIPE_ES'] is True
assert cfg['STRICT'] is True
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-green'))
def test_get_deployment_config_prod():
""" Tests get_deployment_config in the new production case (should always proceed) """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-production-green'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is False
assert cfg['WIPE_ES'] is False
assert cfg['STRICT'] is False
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-hotseat'))
def test_get_deployment_config_hotseat():
""" Tests get_deployment_config in the hotseat case with a new-style ecosystem. """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-hotseat'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is True # The other values (WIPE_ES and STRICT) don't matter if this is set.
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
def test_get_deployment_config_mastertest():
""" Tests get_deployment_config in the hotseat case with a new-style ecosystem. """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-mastertest'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is False
assert cfg['WIPE_ES'] is True
assert cfg['STRICT'] is False
class Simulation:
def __init__(self, mocked_app, expect_check_first=False, expect_purge_queue=False, expect_strict=False):
self.run_has_been_called = False
self.mocked_app = mocked_app
self.expect_check_first = expect_check_first
self.expect_purge_queue = expect_purge_queue
self.expect_strict = expect_strict
def __str__(self):
return ("<{cls} run {called} expecting cf={cf} pq={pq} es={es} {id}>"
.format(cls=self.__class__.__name__, called="CALLED" if self.run_has_been_called else "UNCALLED",
cf=self.expect_check_first, pq=self.expect_purge_queue, es=self.expect_strict, id=id(self)))
def __repr__(self):
return self.__str__()
def mocked_run_create_mapping(self, app, check_first=False, strict=False, purge_queue=False, item_order=None,
**kwargs):
self.run_has_been_called = True
assert kwargs == {}, "mocked_run_create_mapping needs adjusting. It doesn't expect these keywords: %s" % kwargs
assert app == self.mocked_app, "Mocked app was not as expected: %s" % app
# check_first is (not WIPE_ES)
assert check_first is self.expect_check_first, "check_first is not False: %s" % check_first
# purge_queue is whether --clear-queue was in command args
assert bool(purge_queue) is self.expect_purge_queue, (
"bool(purge_queue) is not False. purge_queue=%s" % purge_queue)
# This should be a constant for our purposes
assert item_order == ITEM_INDEX_ORDER, "item_order was not as expected: %s" % item_order
# strict is the STRICT argument
assert strict is self.expect_strict, "strict is not False: %s" % strict
# @patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
# @patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
# def test_run_create_mapping_production(mock_run_create_mapping, app):
#
# simulation = Simulation(mocked_app=app) # Expectations don't matter because we're not expecting to get called.
# mocked_log = create_mapping_on_deploy.log
# try:
# mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
# _run_create_mapping(app, MockedCommandArgs())
# except SystemExit as e:
# print(e)
# assert e.code == 1
# assert simulation.run_has_been_called is True # in the new setup if this is run we mean it
# assert mocked_log.log == [
# ('info', 'Environment fourfront-production-green is currently the production environment.'
# ' Something is definitely wrong. We never deploy there, we always CNAME swap.'
# ' This deploy cannot proceed. DeploymentFailure will be raised.'),
# ('error', 'Exception encountered while gathering deployment information or running create_mapping'),
# ('error', 'DeploymentFailure: Tried to run create_mapping_on_deploy on production.'),
# ]
# @patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
# @patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-production-blue'))
# @patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
# def test_run_create_mapping_staging(mock_run_create_mapping, app):
#
# simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=False, expect_strict=True)
# mocked_log = create_mapping_on_deploy.log
# exit_condition = None
# try:
# mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
# _run_create_mapping(app, MockedCommandArgs())
# except SystemExit as e:
# exit_condition = e
# print(exit_condition)
# except Exception as e:
# print("log =", mocked_log.log)
# raise AssertionError("Unexpected error exit (%s): %s" % (e.__class__.__name__, e))
# assert simulation.run_has_been_called is True
# assert mocked_log.log == [
# ('info', 'Calling run_create_mapping for env fourfront-production-blue.')
# ]
# assert exit_condition, "Unexpected non-error exit."
# assert exit_condition.code == 0
#
#
# @patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
# @patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-hotseat'))
# @patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
# def test_run_create_mapping_hotseat(mock_run_create_mapping, app):
#
# simulation = Simulation(mocked_app=app) # Expectations don't matter because we're not expecting to get called.
# mocked_log = create_mapping_on_deploy.log
# try:
# mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
# _run_create_mapping(app, MockedCommandArgs())
# except SystemExit as e:
# print(e)
# assert e.code == 0
# assert simulation.run_has_been_called is False
# assert mocked_log.log == [
# ('info', 'Environment fourfront-hotseat is a hotseat test environment. Processing mode: SKIP'),
# ('info', 'NOT calling run_create_mapping for env fourfront-hotseat.')
# ]
#
#
# @patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
# @patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
# @patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
# def test_run_create_mapping_mastertest(mock_run_create_mapping, app):
#
# simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=False, expect_strict=False)
# mocked_log = create_mapping_on_deploy.log
# try:
# mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
# _run_create_mapping(app, MockedCommandArgs())
# except SystemExit as e:
# print(e)
# assert e.code == 0
# assert simulation.run_has_been_called is True
# assert mocked_log.log == [
# ('info', 'Environment fourfront-mastertest is a non-hotseat test environment. Processing mode: WIPE_ES'),
# ('info', 'Calling run_create_mapping for env fourfront-mastertest.')
# ]
#
#
# @patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
# @patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-production-green'))
# @patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
# @patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
# def test_run_create_mapping_mastertest_with_clear_queue(mock_run_create_mapping, app):
#
# simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=True, expect_strict=False)
# mocked_log = create_mapping_on_deploy.log
# try:
# mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
# _run_create_mapping(app, MockedCommandArgs(clear_queue=True))
# except SystemExit as e:
# print(e)
# assert e.code == 0
# assert simulation.run_has_been_called is True
# assert mocked_log.log == [
# ('info', 'Environment fourfront-mastertest is a non-hotseat test environment. Processing mode: WIPE_ES'),
# ('info', 'Calling run_create_mapping for env fourfront-mastertest.')
# ]
@patch("snovault.elasticsearch.indexer_queue.QueueManager.add_uuids")
def test_run_create_mapping_with_upgrader(mock_add_uuids, testapp, workbook):
"""
Test for catching items in need of upgrading when running
create_mapping.
Indexer queue method mocked to check correct calls, so no items
actually indexed/upgraded.
"""
app = testapp.app
type_to_upgrade = "Biosample"
search_query = "/search/?type=" + type_to_upgrade + "&frame=object"
search = testapp.get(search_query, status=200).json["@graph"]
item_type_uuids = sorted([x["uuid"] for x in search])
# No schema version change, so nothing needs indexing
run_create_mapping(app, check_first=True)
(_, uuids_to_index), _ = mock_add_uuids.call_args
assert not uuids_to_index
# Change schema version in registry so all posted items of this type
# "need" to be upgraded
registry_schema = app.registry[TYPES][type_to_upgrade].schema
schema_version_default = registry_schema["properties"]["schema_version"]["default"]
updated_schema_version = str(int(schema_version_default) + 1)
registry_schema["properties"]["schema_version"]["default"] = updated_schema_version
run_create_mapping(app, check_first=True)
(_, uuids_to_index), _ = mock_add_uuids.call_args
assert sorted(uuids_to_index) == item_type_uuids
# Revert item type schema version
registry_schema["properties"]["schema_version"]["default"] = schema_version_default
| {
"content_hash": "980e445164c49b725fb0b35bf4aefc2f",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 119,
"avg_line_length": 48.971875,
"alnum_prop": 0.6886605832429328,
"repo_name": "4dn-dcic/fourfront",
"id": "6653c1df0fa1b06601d193c62eac11649e1757aa",
"size": "15671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_create_mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Workflow Language",
"bytes": "15818"
},
{
"name": "Dockerfile",
"bytes": "6312"
},
{
"name": "HTML",
"bytes": "11048"
},
{
"name": "JavaScript",
"bytes": "2106661"
},
{
"name": "Makefile",
"bytes": "9079"
},
{
"name": "PLpgSQL",
"bytes": "12067"
},
{
"name": "Python",
"bytes": "1758496"
},
{
"name": "SCSS",
"bytes": "224522"
},
{
"name": "Shell",
"bytes": "19014"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig(level=logging.DEBUG)
import nengo
import nengo_spinnaker
import numpy as np
def test_probe_passnodes():
"""Test that pass nodes are left on SpiNNaker and that they may be probed.
"""
class ValueReceiver(object):
def __init__(self):
self.ts = list()
self.values = list()
def __call__(self, t, x):
self.ts.append(t)
self.values.append(x[:])
with nengo.Network("Test Network") as net:
# Create an input Node which is a function of time only
input_node = nengo.Node(lambda t: -0.33 if t < 1.0 else 0.10,
label="my input")
# 3D ensemble array to represent this value
ens = nengo.networks.EnsembleArray(500, 3, label="reps")
# Pipe the input to the array and probe the output of the array
nengo.Connection(input_node, ens.input,
transform=[[1.0], [0.0], [-1.0]])
p_ens = nengo.Probe(ens.output, synapse=0.05)
# Also add a node connected to the end of the ensemble array to ensure
# that multiple things correctly receive values from the filter.
receiver = ValueReceiver()
n_receiver = nengo.Node(receiver, size_in=3)
nengo.Connection(ens.output, n_receiver, synapse=0.05)
# Mark the input Node as being a function of time
nengo_spinnaker.add_spinnaker_params(net.config)
net.config[input_node].function_of_time = True
# Create the simulate and simulate
sim = nengo_spinnaker.Simulator(net)
# Run the simulation for long enough to ensure that the decoded value is
# with +/-20% of the input value.
with sim:
sim.run(2.0)
# Check that the values are decoded as expected
index10 = int(p_ens.synapse.tau * 3 / sim.dt)
index11 = 1.0 / sim.dt
index20 = index11 + index10
data = sim.data[p_ens]
assert (np.all(-0.25 >= data[index10:index11, 0]) and
np.all(-0.40 <= data[index10:index11, 0]) and
np.all(+0.05 <= data[index20:, 0]) and
np.all(+0.15 >= data[index20:, 0]))
assert np.all(-0.05 <= data[:, 1]) and np.all(+0.05 >= data[:, 1])
assert (np.all(+0.25 <= data[index10:index11, 2]) and
np.all(+0.40 >= data[index10:index11, 2]) and
np.all(-0.05 >= data[index20:, 2]) and
np.all(-0.15 <= data[index20:, 2]))
# Check that values came into the node correctly
assert +0.05 <= receiver.values[-1][0] <= +0.15
assert -0.05 >= receiver.values[-1][2] >= -0.15
if __name__ == "__main__":
test_probe_passnodes()
| {
"content_hash": "698ec63949cd4e3361fe5310041f87bf",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 36.12328767123287,
"alnum_prop": 0.59613196814562,
"repo_name": "project-rig/nengo_spinnaker",
"id": "27b96e596156651f3309cc3267eb81220c03094c",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regression-tests/test_passnodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "156127"
},
{
"name": "C++",
"bytes": "4428"
},
{
"name": "Makefile",
"bytes": "3057"
},
{
"name": "Python",
"bytes": "609080"
}
],
"symlink_target": ""
} |
class Number:
def __init__(self, number):
"""The constructor of a Number
"""
try:
self.number = float(number)
except ValueError as e:
print(e)
print("That's not a number!")
raise ValueError # Programatically raising an exception
self.factorial = self.__factorial()
def __str__(self):
"""__unicode__ in pyton 2.7
"""
return str(self.number)
def __factorial(self):
"""Private method: Returns the factorial of a number
"""
_, number = 1, self.number # It's common to use an underscore
while number >= 1: # for a value that you're going to throw
_ = _ * number
number = number - 1
return _
@property
def as_dict(self):
"""Returns an instance of the object as a
dictionary
"""
return self.__dict__ # __dict__ magic function
def get_factorial(self):
"""Gets and print the factorial of the number
"""
print("The factorial of {} is {}".format(
self.number, self.__factorial())) # Be careful of indentation
def calculator(self, operation, another_number):
"""It allows to calculate basic arithmetic operations
"""
operations_dict = {
'times': '*',
'plus': '+',
'divide': '/',
'minus': '-'
}
try:
another_number = float(another_number)
except ValueError:
print("That's not a number!")
if operation in operations_dict:
string_to_eval = '%s%s%s' % (self.number,
operations_dict.get(operation),
another_number)
print(string_to_eval)
return eval(string_to_eval)
else:
print("It's not possible to perform that operation")
| {
"content_hash": "199e08d4e0fbf922a0263ab6e74c8479",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 31.693548387096776,
"alnum_prop": 0.5022900763358779,
"repo_name": "razeone/data-storage",
"id": "5493b432595091a44118253f9287e3ebf747203e",
"size": "1965",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python_scripts/Python_01/number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11932"
}
],
"symlink_target": ""
} |
import pecan
import wsme
from wsme import types as wtypes
from tuskar.api.controllers.v1.types.base import Base
from tuskar.api.controllers.v1.types.link import Link
from tuskar.api.controllers.v1.types.relation import Relation
from tuskar.api.controllers.v1.types.chassis import Chassis
from tuskar.api.controllers.v1.types.node import Node
from tuskar.api.controllers.v1.types.capacity import Capacity
class Rack(Base):
"""A representation of Rack in HTTP body."""
id = int
name = wtypes.text
slots = int
subnet = wtypes.text
location = wtypes.text
state = wtypes.text
chassis = Chassis
capacities = [Capacity]
nodes = [Node]
links = [Link]
resource_class = Relation
@classmethod
def convert_with_links(self, rack, links):
kwargs = rack.as_dict() # returns a new dict, overwriting keys is safe
if rack.chassis_id:
kwargs['chassis'] = Chassis(id=rack.chassis_id,
links=[Link.build_ironic_link('chassis',
rack.chassis_id)])
else:
kwargs['chassis'] = Chassis()
if rack.resource_class_id:
l = [Link.build('self', pecan.request.host_url, 'resource_classes',
rack.resource_class_id)]
kwargs['resource_class'] = Relation(id=rack.resource_class_id,
links=l)
kwargs['capacities'] = [Capacity(name=c.name, value=c.value,
unit=c.unit)
for c in rack.capacities]
kwargs['nodes'] = [Node(id=n.node_id,
links=[Link.build_ironic_link('node', n.node_id)])
for n in rack.nodes]
return Rack(links=links, **kwargs)
@classmethod
def convert(self, rack, base_url, minimal=False):
links = [Link.build('self', pecan.request.host_url, 'rack',
rack.id)]
if minimal:
return Rack(links=links, id=str(rack.id)) | {
"content_hash": "75fad683b5069b7d7fdfc5ad659256f5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 88,
"avg_line_length": 36.57627118644068,
"alnum_prop": 0.5532900834105653,
"repo_name": "tuskar/tuskar",
"id": "afa8090c92d64572cbe038caeb550d870d6e55fa",
"size": "2776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuskar/api/controllers/v1/types/rack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "578219"
}
],
"symlink_target": ""
} |
"""C Vulkan generator
# How it works? C vulkan module is generated following these steps:
- Download vk.xml or get it locally
- Pass it to xmltodict to get a python dict from it
- Generate a custom model for each part of the binding
- Pass the model to the jinja2 template engine
- Generate the final c module from within template
We import vulkan.h and vulkan_plateform.h instead of generate it.
# Data model
The model must allow template to generate structs, functions,
dynamic function, dynamic struct, callback functions, constants...
Model design will be described in followings sections.
## Constants
Constants are very basic and can be of type int or string.
model['constants'] = [{
'name': 'name'
'value': 'value',
'type': 'int'
]
## Structs and Unions
There are several way to do it:
- We could copy the Vulkan struct and for each one, create a python
wrapper struct containing a pointer to the the vulkan struct.
- We could create only Python struct, which is good for memory allocation
and simplify the conversion C <> Python but we have a problem with this:
Vulkan functions wait for Vulkan C structs whereas Python structs contain
a Python Header. How can we pass Python structs to Vulkan functions...
The only way I can think of is to update the pointer reference.
We have to create a function which transform a Python struct into a
Vulkan struct. Basically, we create a pointer void onto the first Vulkan
property (sType for a create). We also have to update all pointers into
this Vulkan struct.
After thinking, this solution is a pain in the ass so I will stick with the
obvious first solution.
Here the struct model:
model['structs'] = [{
'name': 'name',
'define': 'DEFINE_MUST_EXIST',
'union': False,
'return_only': False,
'members': [{
'name': 'name',
'type': 'type []',
'raw_type': 'type' -> Type without #text
'null': False,
'enum': MAX_ARRAY -> Size of array with fixed length,
'len': 'member', -> Name of the member for the array length
'force_array': False -> Used when a pointer is an array of VK types
}]
}]
## Functions
There are two types of function in cvulkan, dynamicly linked functions
into the SDK and extensions functions.
Functions vkGetInstanceProcAddr and vkGetDeviceProcAddr are custom so not here!
Here the model:
model['functions'] = [{
'name': 'name',
'define': 'DEFINE_MUST_EXIST',
'arg': 'METH_VARARGS | METH_KEYWORDS', -> python parameter
'custom': False, -> written directly in template
'allocate': True, -> create one object
'count': True, -> create several objects
'return_type': 'type', -> type returned by the function
'return_boolean': True, -> True if the function return a vkbool
'return_member': {
'name': 'name',
'type': 'type without #text',
'handle': False,
'enum': False,
'struct': True,
'static_count': {
'key': 'name', -> Name of the member if count is static
'value': 'name' -> Name of the property if count is static
}
},
'members': [{
'name': 'name',
'type': 'type', -> raw_type + #text
'null': False,
'force_array': False, -> This member is a list
'to_create': False -> this value must be created(count or allocate)
}]
}]
## Extension functions
Extension functions are loaded dynamically with vkGetInstanceProcAddr or
vkGetDeviceProcAddr.
To allow this, we create a new Type for each extension.
This types take a function pointer as argument (PyCapsule).
We make this type callable to be treated as a function although it's a type.
model['extension_functions'] = like model['function']
## Custom functions
Custom functions are written directly in C in the template.
We create an array in the model to declare them in python.
model['custom_functions'] = ['f1', 'f2']
## Custom structs
Custom functions are written directly in C in the template.
We create an array in the model to declare them in python.
model['custom_structs'] = ['f1', 'f2']
## Macro functions
Macros are just custom functions.
model['macro_functions'] = ['f1', 'f2']
## Exceptions
Exceptions are created based on the name in the VkResult enum:
model['exceptions'] = {
'exceptionName': value
}
## Signatures
Signatures are used in converters.c to convert Python to Vulkan type
model['signatures']
"""
import jinja2
import os
import requests
import xmltodict
from cvulkan import jfilter
VULKAN_PLATEFORM_URL = ('http://raw.githubusercontent.com/KhronosGroup/'
'Vulkan-Docs/1.0/src/vulkan/vk_platform.h')
VULKAN_H_URL = ('http://raw.githubusercontent.com/KhronosGroup/'
'Vulkan-Docs/1.0/src/vulkan/vulkan.h')
VK_XML_URL = ('http://raw.githubusercontent.com/KhronosGroup/'
'Vulkan-Docs/1.0/src/spec/vk.xml')
PATH = os.path.dirname(os.path.abspath(__file__))
PATH_TEMPLATE = os.path.join(PATH, 'template')
DEFAULT_OUT_FILE = os.path.join(PATH, 'vulkanmodule.c')
OUT_VULKAN_H = os.path.join(PATH, 'cache_vulkan.h')
OUT_VULKAN_PLATEFORM = os.path.join(PATH, 'cache_vk_plateform.h')
CACHE_MAPPING = {
VULKAN_PLATEFORM_URL: 'cache_vk_plateform.h',
VULKAN_H_URL: 'cache_vulkan.h',
VK_XML_URL: 'cache_vk.xml'
}
MAPPING_EXTENSION_DEFINE = {
'VkAndroidSurfaceCreateInfoKHR': 'VK_USE_PLATFORM_ANDROID_KHR',
'VkMirSurfaceCreateInfoKHR': 'VK_USE_PLATFORM_MIR_KHR',
'VkMirSurfaceCreateFlagsKHR': 'VK_USE_PLATFORM_MIR_KHR',
'MirConnection': 'VK_USE_PLATFORM_MIR_KHR',
'MirSurface': 'VK_USE_PLATFORM_MIR_KHR',
'VkWaylandSurfaceCreateInfoKHR': 'VK_USE_PLATFORM_WAYLAND_KHR',
'HANDLE': 'VK_USE_PLATFORM_WIN32_KHR',
'HWND': 'VK_USE_PLATFORM_WIN32_KHR',
'HINSTANCE': 'VK_USE_PLATFORM_WIN32_KHR',
'SECURITY_ATTRIBUTES': 'VK_USE_PLATFORM_WIN32_KHR',
'DWORD': 'VK_USE_PLATFORM_WIN32_KHR',
'VkWin32SurfaceCreateInfoKHR': 'VK_USE_PLATFORM_WIN32_KHR',
'VkWin32SurfaceCreateFlagsKHR': 'VK_USE_PLATFORM_WIN32_KHR',
'VkImportMemoryWin32HandleInfoNV': 'VK_USE_PLATFORM_WIN32_KHR',
'VkExportMemoryWin32HandleInfoNV': 'VK_USE_PLATFORM_WIN32_KHR',
'VkWin32KeyedMutexAcquireReleaseInfoNV': 'VK_USE_PLATFORM_WIN32_KHR',
'VkXcbSurfaceCreateInfoKHR': 'VK_USE_PLATFORM_XCB_KHR',
'VkXlibSurfaceCreateInfoKHR': 'VK_USE_PLATFORM_XLIB_KHR',
'VkRect3D': 'hackdefine', # VkRect3D is not used
'vkCreateAndroidSurfaceKHR': 'VK_USE_PLATFORM_ANDROID_KHR',
'VkAndroidSurfaceCreateFlagsKHR': 'VK_USE_PLATFORM_ANDROID_KHR',
'ANativeWindow': 'VK_USE_PLATFORM_ANDROID_KHR',
'vkCreateMirSurfaceKHR': 'VK_USE_PLATFORM_MIR_KHR',
'vkGetPhysicalDeviceMirPresentationSupportKHR': 'VK_USE_PLATFORM_MIR_KHR',
'vkCreateWaylandSurfaceKHR': 'VK_USE_PLATFORM_WAYLAND_KHR',
'vkGetPhysicalDeviceWaylandPresentationSupportKHR':
'VK_USE_PLATFORM_WAYLAND_KHR',
'vkCreateWin32SurfaceKHR': 'VK_USE_PLATFORM_WIN32_KHR',
'vkCreateXcbSurfaceKHR': 'VK_USE_PLATFORM_XCB_KHR',
'xcb_connection_t': 'VK_USE_PLATFORM_XCB_KHR',
'xcb_visualid_t': 'VK_USE_PLATFORM_XCB_KHR',
'xcb_window_t': 'VK_USE_PLATFORM_XCB_KHR',
'VisualID': 'VK_USE_PLATFORM_XCB_KHR',
'vkGetPhysicalDeviceXcbPresentationSupportKHR': 'VK_USE_PLATFORM_XCB_KHR',
'vkGetMemoryWin32HandleNV': 'VK_USE_PLATFORM_WIN32_KHR',
'vkGetPhysicalDeviceWin32PresentationSupportKHR':
'VK_USE_PLATFORM_WIN32_KHR',
'vkGetPhysicalDeviceXlibPresentationSupportKHR':
'VK_USE_PLATFORM_XLIB_KHR',
'Window': 'VK_USE_PLATFORM_XLIB_KHR',
'Display': 'VK_USE_PLATFORM_XLIB_KHR',
'vkCreateXlibSurfaceKHR': 'VK_USE_PLATFORM_XLIB_KHR',
'VkWaylandSurfaceCreateFlagsKHR': 'VK_USE_PLATFORM_WAYLAND_KHR'
}
CUSTOM_FUNCTIONS = ('vkGetInstanceProcAddr', 'vkGetDeviceProcAddr',
'vkMapMemory', 'vkGetPipelineCacheData')
CUSTOM_STRUCTS = ('VkDebugReportCallbackCreateInfoEXT',)
CUSTOM_CONSTANTS = {'VK_NULL_HANDLE': 0}
MACRO_FUNCTIONS = ('VK_MAKE_VERSION', 'VK_VERSION_MAJOR',
'VK_VERSION_MINOR', 'VK_VERSION_PATCH')
MACRO_PROPERTIES = ('VK_NULL_HANDLE', 'UINT64_MAX')
NULL_MEMBERS = ('pNext', 'pAllocator', 'pUserData')
def get_source(url):
filename = os.path.join(PATH, CACHE_MAPPING[url])
try:
with open(filename) as f:
result = f.read()
except FileNotFoundError:
result = requests.get(url).text
with open(filename, 'w') as f:
f.write(result)
return result
def init():
"""Init create cache files and return vkxml dict"""
def clean(content):
cleaned = ""
for line in content.splitlines(True):
if '#include "vk_platform.h"' in line:
continue
line = line.replace(' const ', ' ')
line = line.replace('const* ', '*')
cleaned += line
return cleaned
def write_template(filename, content):
with open(os.path.join(PATH_TEMPLATE, filename), 'w') as f:
f.write(content)
write_template('vk_plateform.h', get_source(VULKAN_PLATEFORM_URL))
write_template('vulkan.h', clean(get_source(VULKAN_H_URL)))
return xmltodict.parse(get_source(VK_XML_URL))
def get_enum_names(vk):
return {e['@name'] for e in vk['registry']['enums']}
def get_handle_names(vk):
return {s['name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'handle'}
def get_struct_names(vk):
return {s['@name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'struct'}
def get_union_names(vk):
return {s['name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'union'}
def model_constants(vk, model):
model['constants'] = []
def add_constant(constant, ext_number=0):
if '@bitpos' in constant:
value = constant['@bitpos']
num_val = int(value, 0)
num_val = 1 << num_val
value = '0x%08x' % num_val
model['constants'].append({
'name': constant['@name'],
'value': value,
'type': 'int'})
elif '@value' in constant:
value = constant['@value']
value_type = 'str'
if not constant['@value'].startswith('"'):
value = constant['@value']
value_type = 'int'
model['constants'].append({
'name': constant['@name'],
'value': value,
'type': value_type})
elif '@offset' in constant:
ext_base = 1000000000
ext_block_size = 1000
value = ext_base + (ext_number - 1) * ext_block_size
value += int(constant['@offset'])
model['constants'].append({
'name': constant['@name'],
'value': value,
'type': 'int'})
for enum in vk['registry']['enums']:
# uniform
if type(enum['enum']) is not list:
enum['enum'] = [enum['enum']]
for constant in enum['enum']:
add_constant(constant)
for extension in vk['registry']['extensions']['extension']:
if type(extension['require']['enum']) is not list:
extension['require']['enum'] = [extension['require']['enum']]
for constant in extension['require']['enum']:
add_constant(constant, int(extension['@number']))
add_constant({'@name': 'VK_API_VERSION_1_0',
'@value': 'VK_API_VERSION_1_0'})
def model_structs(vk, model):
model['structs'] = []
structs = [s for s in vk['registry']['types']['type']
if s.get('@category', None) == 'struct']
unions = [u for u in vk['registry']['types']['type']
if u.get('@category', None) == 'union']
FORCE_RETURN_ONLY = ('VkAllocationCallbacks',)
for struct in structs + unions:
sname = struct['@name']
if sname in CUSTOM_STRUCTS:
continue
members = []
for member in struct['member']:
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
l = member['@len'] if '@len' in member else None
members.append({
'name': member['name'],
'type': type_name,
'raw_type': member['type'],
'enum': member.get('enum'),
'null': True if member['name'] in NULL_MEMBERS else False,
'len': l,
'force_array': True if '@len' in member else False
})
return_only = False
if struct.get('@returnedonly') or sname in FORCE_RETURN_ONLY:
return_only = True
model['structs'].append({
'name': sname,
'define': MAPPING_EXTENSION_DEFINE.get(struct['@name']),
'members': members,
'return_only': return_only,
'union': True if struct in unions else False
})
model['custom_structs'] = CUSTOM_STRUCTS
def model_functions(vk, model):
def get_vk_extension_functions():
names = set()
for extension in vk['registry']['extensions']['extension']:
if 'command' not in extension['require']:
continue
if type(extension['require']['command']) is not list:
extension['require']['command'] = [
extension['require']['command']]
for command in extension['require']['command']:
names.add(command['@name'])
return names
def get_count_param(command):
for param in command['param']:
if param['type'] + param.get('#text', '') == 'uint32_t*':
return param
return None
def format_member(member):
type_name = member['type']
if '#text' in member:
text = member['#text'].replace('const ', '').strip()
type_name += ' ' + text
return {'name': member['name'],
'type': type_name,
'null': member['name'] in NULL_MEMBERS,
'force_array': True if '@len' in member else False,
'to_create': False}
def format_return_member(member):
t = member['type']
static_count = None
if '@len' in member and '::' in member['@len']:
lens = member['@len'].split('::')
static_count = {'key': lens[0], 'value': lens[1]}
is_handle = t in get_handle_names(vk)
is_enum = t in get_enum_names(vk)
is_struct = t in get_struct_names(vk)
return {'name': member['name'],
'type': t,
'handle': is_handle,
'enum': is_enum,
'struct': is_struct,
'static_count': static_count}
ALLOCATE_PREFIX = ('vkCreate', 'vkGet', 'vkEnumerate', 'vkAllocate',
'vkMap', 'vkAcquire')
ALLOCATE_EXCEPTION = ('vkGetFenceStatus', 'vkGetEventStatus',
'vkGetQueryPoolResults',
'vkGetPhysicalDeviceXlibPresentationSupportKHR')
COUNT_EXCEPTION = ('vkAcquireNextImageKHR',)
model['functions'] = []
model['extension_functions'] = []
functions = [f for f in vk['registry']['commands']['command']]
extension_function_names = get_vk_extension_functions()
for function in functions:
fname = function['proto']['name']
ftype = function['proto']['type']
if fname in CUSTOM_FUNCTIONS:
continue
if type(function['param']) is not list:
function['param'] = [function['param']]
count_param = get_count_param(function)
if fname in COUNT_EXCEPTION:
count_param = None
is_allocate = any([fname.startswith(a) for a in ALLOCATE_PREFIX])
is_count = is_allocate and count_param is not None
if fname in ALLOCATE_EXCEPTION or ftype == 'VkBool32':
is_allocate = is_count = False
members = []
for member in function['param']:
members.append(format_member(member))
return_member = None
if is_allocate:
return_member = format_return_member(function['param'][-1])
members[-1]['to_create'] = True
if is_count:
members[-2]['to_create'] = True
f = {
'name': fname,
'define': MAPPING_EXTENSION_DEFINE.get(fname),
'members': members,
'arg': 'METH_VARARGS | METH_KEYWORDS',
'custom': fname in CUSTOM_FUNCTIONS,
'allocate': is_allocate,
'count': is_count,
'return_boolean': True if ftype == 'VkBool32' else False,
'return_result': True if ftype == 'VkResult' else False,
'return_member': return_member
}
if fname not in extension_function_names:
model['functions'].append(f)
else:
model['extension_functions'].append(f)
# Add custom functions
model['custom_functions'] = CUSTOM_FUNCTIONS
def model_macros(model):
model['macro_functions'] = MACRO_FUNCTIONS
model['macro_properties'] = MACRO_PROPERTIES
def get_signatures(vk):
'''Return formatted signatures used in filters
signatures = [{'raw':X, 'vkname':X, 'is_struct':X,
'is_union':X, 'is_handle':X}]
'''
names = set()
structs = [s for s in vk['registry']['types']['type']
if s.get('@category', None) == 'struct']
unions = [u for u in vk['registry']['types']['type']
if u.get('@category', None) == 'union']
handles = set([s['name'] for s in vk['registry']['types']['type']
if s.get('@category', None) == 'handle'])
for s in structs + unions:
for m in s['member']:
name = m['type']
if '#text' in m:
text = m['#text'].replace('const ', '').strip()
name += ' ' + text
names.add(name)
for f in vk['registry']['commands']['command']:
if type(f['param']) is not list:
f['param'] = [f['param']]
for p in f['param']:
name = p['type']
if '#text' in p:
text = p['#text'].replace('const ', '').strip()
name += ' ' + text
names.add(name)
result = []
for name in names:
if name.startswith('PFN'):
continue
if not name.startswith('Vk'):
continue
vkname = name.split()[0]
is_struct = vkname in [s['@name'] for s in structs]
is_union = vkname in [s['@name'] for s in unions]
is_handle = vkname in handles
result.append({
'raw': name,
'vkname': vkname,
'is_struct': is_struct,
'is_union': is_union,
'is_handle': is_handle
})
return result
def converters_signatures(signatures):
'''return array used in converters
'''
cache_vknames = set()
result = []
for s in signatures:
if s['vkname'] in cache_vknames:
continue
cache_vknames.add(s['vkname'])
result.append({
'vkname': s['vkname'],
'is_struct': s['is_struct'],
'is_union': s['is_union'],
'is_handle': s['is_handle'],
'define': MAPPING_EXTENSION_DEFINE.get(s['vkname'])
})
return result
def model_exceptions(vk, model):
model['exceptions'] = {}
vk_result = next(x for x in vk['registry']['enums']
if x['@name'] == 'VkResult')
for enum in vk_result['enum']:
if enum['@name'] == 'VK_SUCCESS':
continue
camel_name = ''.join(x for x in enum['@name'].title()
if x != '_')
model['exceptions'][camel_name] = enum['@value']
def get_called_converters(model):
'''Create a list with all called converters
That allow to write only used converters
in converters.c
'''
called_converters = set()
def go(s):
if s.get('return_only'):
return
members = jfilter.members_formated(s['members'])
for m in members:
called_converters.add(jfilter.detect_py_to_c(m))
for f in (model['functions'] + model['extension_functions'] +
model['structs']):
if f.get('union'):
continue
go(f)
return called_converters
def main():
model = {}
vk = init()
model_constants(vk, model)
model_structs(vk, model)
model_functions(vk, model)
model_exceptions(vk, model)
model_macros(model)
env = jinja2.Environment(
autoescape=False,
# trim_blocks=True,
# lstrip_blocks=True,
loader=jinja2.FileSystemLoader(os.path.join(PATH, 'template')))
# jfilter needs signatures
signatures = get_signatures(vk)
jfilter.vulkan_signatures = signatures
model['signatures'] = converters_signatures(signatures)
model['MAPPING_EXTENSION_DEFINE'] = MAPPING_EXTENSION_DEFINE
model['called_converters'] = get_called_converters(model)
env.filters.update({f: getattr(jfilter, f) for f in jfilter.__all__})
with open(DEFAULT_OUT_FILE, 'w') as out:
out.write(env.get_template('main.c').render(model=model))
if __name__ == '__main__':
main()
| {
"content_hash": "22226f3b1c598e4ebf68009c5cb63817",
"timestamp": "",
"source": "github",
"line_count": 615,
"max_line_length": 79,
"avg_line_length": 34.90731707317073,
"alnum_prop": 0.5906931246506428,
"repo_name": "realitix/cvulkan",
"id": "e10cbd55be0c68d6d53df6170dafe1339a6272c7",
"size": "21468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvulkan/vulkan_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2117252"
},
{
"name": "GLSL",
"bytes": "446"
},
{
"name": "PostScript",
"bytes": "6123"
},
{
"name": "Python",
"bytes": "70678"
}
],
"symlink_target": ""
} |
import codecs
import toml
class SettingsLoadStrategyToml:
"""
This is the strategy used to read settings from toml files
"""
name = 'toml'
@staticmethod
def is_valid_file(file_name):
return file_name.endswith('.toml')
@staticmethod
def load_settings_file(settings_file):
with codecs.open(settings_file, 'r') as f:
return toml.loads(f.read())
| {
"content_hash": "0f6dd85317ec5bb52425fb51844deddd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 21.42105263157895,
"alnum_prop": 0.6388206388206388,
"repo_name": "drgarcia1986/simple-settings",
"id": "c4d1b8dcdaea0ef62aa9ef40f5044d67f9da552e",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "simple_settings/strategies/toml_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "748"
},
{
"name": "Python",
"bytes": "81291"
}
],
"symlink_target": ""
} |
"""
Utility module for retrieving a node's children.
"""
import inspect
# pylint: disable=C0302, W0703, C1801
from .Annotation import Annotation
from .Project import Project
from .Study import Study
from .Subject import Subject
from .Visit import Visit
from .Sample import Sample
from .WgsAssembledSeqSet import WgsAssembledSeqSet
from .WgsDnaPrep import WgsDnaPrep
from .WgsRawSeqSet import WgsRawSeqSet
from .SixteenSDnaPrep import SixteenSDnaPrep
from .SixteenSRawSeqSet import SixteenSRawSeqSet
from .SixteenSTrimmedSeqSet import SixteenSTrimmedSeqSet
from .MicrobiomeAssayPrep import MicrobiomeAssayPrep
from .HostAssayPrep import HostAssayPrep
from .HostSeqPrep import HostSeqPrep
# currently used in Base.children()
# __name__ attribute used to ensure that if the class or method name
# changes, the maintainer is forced to update it here, too.
# pylint: disable=C0330
dependency_methods = {
Project.__name__ : Project.studies.__name__,
Annotation.__name__ : Annotation.clustered_seq_sets.__name__,
HostAssayPrep.__name__ : HostAssayPrep.derivations.__name__,
HostSeqPrep.__name__ : HostSeqPrep.derivations.__name__,
MicrobiomeAssayPrep.__name__ : MicrobiomeAssayPrep.derivations.__name__,
Sample.__name__ : Sample.allChildren.__name__,
SixteenSDnaPrep.__name__ : SixteenSDnaPrep.raw_seq_sets.__name__,
SixteenSRawSeqSet.__name__ : SixteenSRawSeqSet.trimmed_seq_sets.__name__,
SixteenSTrimmedSeqSet.__name__ : SixteenSTrimmedSeqSet.abundance_matrices.__name__,
Study.__name__ : Study.subjects.__name__,
Subject.__name__ : Subject.derivations.__name__,
Visit.__name__ : Visit.samples.__name__,
WgsAssembledSeqSet.__name__ : WgsAssembledSeqSet.derivations.__name__,
WgsDnaPrep.__name__ : WgsDnaPrep.child_seq_sets.__name__,
WgsRawSeqSet.__name__ : WgsRawSeqSet.viral_seq_sets.__name__
}
# pylint: enable=C0330
def generator_flatten(gen):
""" Flatten the result of the generator. """
for item in gen:
if inspect.isgenerator(item) or type(item) in (list, tuple):
for value in generator_flatten(item):
yield value
else:
yield item
| {
"content_hash": "cb4487197e230c8511c2220311844823",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 87,
"avg_line_length": 41.836363636363636,
"alnum_prop": 0.6784006953498479,
"repo_name": "ihmpdcc/cutlass",
"id": "1c512ce100edf1a0ad8a474e57f3b88aede40249",
"size": "2301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cutlass/dependency.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1322642"
}
],
"symlink_target": ""
} |
import copy
from .elements import (RightAlignedClauseElement, LeftAlignedClauseElement,
NotRightAlignedClauseElement, NotLeftAlignedClauseElement)
from .attributes import (HierarchicalAnnotation)
from .results import QueryResults
from polyglotdb.exceptions import GraphQueryError
from ..base import BaseQuery
def base_stop_check():
return False
class GraphQuery(BaseQuery):
"""
Base GraphQuery class.
Extend this class to implement more advanced query functions.
Parameters
----------
corpus : :class:`~polyglotdb.corpus.CorpusContext`
The corpus to query
to_find : :class:`~polyglotdb.query.annotations.attributes.AnnotationNode`
Name of the annotation type to search for
"""
_parameters = ['_criterion', '_columns', '_order_by', '_aggregate',
'_preload', '_set_labels', '_remove_labels',
'_set_properties', '_delete', '_limit',
'_cache', '_acoustic_columns', '_offset', '_preload_acoustics']
set_pause_template = '''SET {alias} :pause, {type_alias} :pause_type
REMOVE {alias}:speech
WITH {alias}
OPTIONAL MATCH (prec)-[r1:precedes]->({alias})
FOREACH (o IN CASE WHEN prec IS NOT NULL THEN [prec] ELSE [] END |
CREATE (prec)-[:precedes_pause]->({alias})
)
DELETE r1
WITH {alias}, prec
OPTIONAL MATCH ({alias})-[r2:precedes]->(foll)
FOREACH (o IN CASE WHEN foll IS NOT NULL THEN [foll] ELSE [] END |
CREATE ({alias})-[:precedes_pause]->(foll)
)
DELETE r2'''
def __init__(self, corpus, to_find, stop_check=None):
super(GraphQuery, self).__init__(corpus, to_find)
if stop_check is None:
stop_check = base_stop_check
self.stop_check = stop_check
self._acoustic_columns = []
self._preload_acoustics = []
self._add_subannotations = []
def required_nodes(self):
from .attributes.hierarchical import HierarchicalAnnotation
tf_type = type(self.to_find)
ns = super(GraphQuery, self).required_nodes()
for c in self._columns + self._aggregate + self._preload + self._cache:
ns.update(x for x in c.nodes if isinstance(x, HierarchicalAnnotation))
for c, _ in self._order_by:
ns.update(x for x in c.nodes if isinstance(x, HierarchicalAnnotation))
for c in self._acoustic_columns:
ns.update(x for x in c.nodes if type(x) is not tf_type)
return ns
def set_pause(self):
""" sets pauses in graph"""
self._set_properties['pause'] = True
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
self._set_properties = {}
def _generate_set_properties_return(self):
if 'pause' in self._set_properties:
kwargs = {'alias': self.to_find.alias,
'type_alias': self.to_find.type_alias}
return_statement = self.set_pause_template.format(**kwargs)
return return_statement
return super(GraphQuery, self)._generate_set_properties_return()
def columns(self, *args):
"""
Add one or more additional columns to the results.
Columns should be :class:`~polyglotdb.graph.attributes.Attribute` objects.
"""
column_set = set(self._columns) & set(self._acoustic_columns) & set(self._hidden_columns)
for c in args:
if c in column_set:
continue
if c.acoustic:
self._acoustic_columns.append(c)
else:
self._columns.append(c)
# column_set.add(c) #FIXME failing tests
return self
def filter_left_aligned(self, annotation_type):
"""
Short cut function for aligning the queried annotations with
another annotation type.
Same as query.filter(g.word.begin == g.phone.begin).
"""
if not isinstance(annotation_type, HierarchicalAnnotation):
annotation_type = getattr(self.to_find, annotation_type.node_type)
self._criterion.append(LeftAlignedClauseElement(self.to_find, annotation_type))
return self
def filter_right_aligned(self, annotation_type):
"""
Short cut function for aligning the queried annotations with
another annotation type.
Same as query.filter(g.word.end == g.phone.end).
"""
if not isinstance(annotation_type, HierarchicalAnnotation):
annotation_type = getattr(self.to_find, annotation_type.node_type)
self._criterion.append(RightAlignedClauseElement(self.to_find, annotation_type))
return self
def filter_not_left_aligned(self, annotation_type):
"""
Short cut function for aligning the queried annotations with
another annotation type.
Same as query.filter(g.word.begin != g.phone.begin).
"""
if not isinstance(annotation_type, HierarchicalAnnotation):
annotation_type = getattr(self.to_find, annotation_type.node_type)
self._criterion.append(NotLeftAlignedClauseElement(self.to_find, annotation_type))
return self
def filter_not_right_aligned(self, annotation_type):
"""
Short cut function for aligning the queried annotations with
another annotation type.
Same as query.filter(g.word.end != g.phone.end).
"""
if not isinstance(annotation_type, HierarchicalAnnotation):
annotation_type = getattr(self.to_find, annotation_type.node_type)
self._criterion.append(NotRightAlignedClauseElement(self.to_find, annotation_type))
return self
def preload(self, *args):
from .attributes.path import SubPathAnnotation
from .attributes.subannotation import SubAnnotation
for a in args:
if isinstance(a, SubPathAnnotation) and not isinstance(a, SubAnnotation):
a.with_subannotations = True
self._preload.append(a)
return self
def preload_acoustics(self, *args):
self._preload_acoustics.extend(args)
return self
def all(self):
"""
Returns all results for the query
Returns
-------
res_list : list
a list of results from the query
"""
if self._preload_acoustics:
discourse_found = False
speaker_found = False
for p in self._preload:
if p.node_type == 'Discourse':
discourse_found = True
elif p.node_type == 'Speaker':
speaker_found = True
if not discourse_found:
self.preload(getattr(self.to_find, 'discourse'))
if not speaker_found:
self.preload(getattr(self.to_find, 'speaker'))
if self._acoustic_columns:
for a in self._acoustic_columns:
discourse_found = False
speaker_found = False
begin_found = False
end_found = False
utterance_id_found = False
for c in self._columns + self._hidden_columns:
if a.node.discourse == c.node and c.label == 'name':
a.discourse_alias = c.output_alias
discourse_found = True
elif a.node.speaker == c.node and c.label == 'name':
a.speaker_alias = c.output_alias
speaker_found = True
elif a.node == c.node and c.label == 'begin':
a.begin_alias = c.output_alias
begin_found = True
elif a.node == c.node and c.label == 'end':
a.end_alias = c.output_alias
end_found = True
elif c.node.node_type == 'utterance' and c.label == 'id':
a.utterance_alias = c.output_alias
utterance_id_found = True
if not discourse_found:
self._hidden_columns.append(a.node.discourse.name.column_name(a.discourse_alias))
if not speaker_found:
self._hidden_columns.append(a.node.speaker.name.column_name(a.speaker_alias))
if not begin_found:
self._hidden_columns.append(a.node.begin.column_name(a.begin_alias))
if not end_found:
self._hidden_columns.append(a.node.end.column_name(a.end_alias))
if not utterance_id_found:
if self.to_find.node_type == 'utterance':
self._hidden_columns.append(a.node.id.column_name(a.utterance_alias))
else:
self._hidden_columns.append(a.node.utterance.id.column_name(a.utterance_alias))
return QueryResults(self)
def create_subset(self, label):
labels_to_add = []
if self.to_find.node_type not in self.corpus.hierarchy.subset_tokens or \
label not in self.corpus.hierarchy.subset_tokens[self.to_find.node_type]:
labels_to_add.append(label)
super(GraphQuery, self).create_subset(label)
if labels_to_add:
self.corpus.hierarchy.add_token_subsets(self.corpus, self.to_find.node_type, labels_to_add)
def set_properties(self, **kwargs):
props_to_remove = []
props_to_add = []
for k, v in kwargs.items():
if v is None:
props_to_remove.append(k)
else:
if not self.corpus.hierarchy.has_token_property(self.to_find.node_type, k):
props_to_add.append((k, type(kwargs[k])))
super(GraphQuery, self).set_properties(**kwargs)
if props_to_add:
self.corpus.hierarchy.add_token_properties(self.corpus, self.to_find.node_type, props_to_add)
if props_to_remove:
self.corpus.hierarchy.remove_token_properties(self.corpus, self.to_find.node_type, props_to_remove)
def remove_subset(self, label):
super(GraphQuery, self).remove_subset(label)
self.corpus.hierarchy.remove_token_subsets(self.corpus, self.to_find.node_type, [label])
def cache(self, *args):
self._cache.extend(args)
self.corpus.execute_cypher(self.cypher(), **self.cypher_params())
props_to_add = []
for k in args:
k = k.output_label
if not self.corpus.hierarchy.has_token_property(self.to_find.node_type, k):
props_to_add.append((k, float))
if props_to_add:
self.corpus.hierarchy.add_token_properties(self.corpus, self.to_find.node_type, props_to_add)
class SplitQuery(GraphQuery):
def __init__(self, corpus, to_find, stop_check=None):
super(SplitQuery, self).__init__(corpus, to_find, stop_check)
try:
self.splitter = self.corpus.config.query_behavior
except (AttributeError, GraphQueryError):
self.splitter = 'speaker'
def base_query(self, filters=None):
""" sets up base query
Returns
-------
q : :class: `~polyglotdb.graph.GraphQuery`
the base query
"""
q = GraphQuery(self.corpus, self.to_find)
for p in q._parameters:
if p == '_criterion' and filters is not None:
setattr(q, p, filters)
elif isinstance(getattr(self, p), list):
for x in getattr(self, p):
getattr(q, p).append(x)
else:
setattr(q, p, copy.deepcopy(getattr(self, p)))
return q
def split_queries(self):
""" splits a query into multiple queries """
from .elements import BaseNotEqualClauseElement, BaseNotInClauseElement
if self.splitter not in ['speaker', 'discourse']:
yield self.base_query()
return
labels = [x.attribute.label for x in self._criterion if hasattr(x, 'attribute')]
if self._offset is not None or self._limit is not None or 'id' in labels:
yield self.base_query()
return
speaker_annotation = getattr(self.to_find, 'speaker')
speaker_attribute = getattr(speaker_annotation, 'name')
discourse_annotation = getattr(self.to_find, 'discourse')
discourse_attribute = getattr(discourse_annotation, 'name')
splitter_names = sorted(getattr(self.corpus, self.splitter + 's'))
if self.call_back is not None:
self.call_back(0, len(splitter_names))
if self.splitter == 'speaker':
splitter_annotation = speaker_annotation
splitter_attribute = speaker_attribute
else:
splitter_annotation = discourse_annotation
splitter_attribute = discourse_attribute
selection = []
include = True
reg_filters = []
filter_on_speaker = False
filter_on_discourse = False
for c in self._criterion:
try:
if c.attribute.node == speaker_annotation and \
c.attribute.label == 'name':
filter_on_speaker = True
elif c.attribute.node == discourse_annotation and \
c.attribute.label == 'name':
filter_on_discourse = True
if c.attribute.node == splitter_annotation and \
c.attribute.label == 'name':
if isinstance(c.value, (list, tuple, set)):
selection.extend(c.value)
else:
selection.append(c.value)
if isinstance(c, (BaseNotEqualClauseElement, BaseNotInClauseElement)):
include = False
else:
reg_filters.append(c)
except AttributeError:
reg_filters.append(c)
if filter_on_speaker and filter_on_discourse:
yield self.base_query()
return
for i, x in enumerate(splitter_names):
if selection:
if include and x not in selection:
continue
if not include and x in selection:
continue
if self.call_back is not None:
self.call_back(i)
self.call_back('Querying {} {} of {} ({})...'.format(self.splitter, i, len(splitter_names), x))
base = self.base_query(reg_filters)
al = base.required_nodes()
al.update(base.optional_nodes())
base = base.filter(splitter_attribute == x)
yield base
def set_pause(self):
""" sets a pause in queries """
for q in self.split_queries():
if self.stop_check():
return
q.set_pause()
def all(self):
""" returns all results from a query """
results = None
for q in self.split_queries():
if self.stop_check():
return
if results is None:
r = q.all()
results = r
else:
results.add_results(q)
return results
def count(self):
count = 0
for q in self.split_queries():
count += q.count()
return count
def to_csv(self, path):
for i, q in enumerate(self.split_queries()):
if i == 0:
mode = 'w'
else:
mode = 'a'
r = q.all()
r.to_csv(path, mode=mode)
def delete(self):
""" deletes the query """
for q in self.split_queries():
if self.stop_check():
return
q.delete()
def cache(self, *args):
for q in self.split_queries():
if self.stop_check():
return
q.cache(*args)
def set_label(self, *args):
""" sets the query type"""
for q in self.split_queries():
if self.stop_check():
return
q.set_label(*args)
def set_properties(self, **kwargs):
""" sets the query token """
for q in self.split_queries():
if self.stop_check():
return
q.set_properties(**kwargs)
| {
"content_hash": "74a4fa7fcdac6963f4491b5b3f10e113",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 111,
"avg_line_length": 38.33799533799534,
"alnum_prop": 0.560588557183681,
"repo_name": "MontrealCorpusTools/PolyglotDB",
"id": "4e139f5a0d1b0eff25e3cb475d70e5a3793cf125",
"size": "16448",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "polyglotdb/query/annotations/query.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "924894"
},
{
"name": "Shell",
"bytes": "1982"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except OSError:
continue
with fp:
morecaps = readmailcapfile(fp)
for key, value in morecaps.items():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# This is mostly a Unix thing, but we use the OS path separator anyway
if 'MAILCAPS' in os.environ:
pathstr = os.environ['MAILCAPS']
mailcaps = pathstr.split(os.pathsep)
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = [e for e in entries if key in e]
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print("usage: mailcap [MIMEtype file] ...")
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print("No viewer found for", type)
else:
print("Executing:", command)
sts = os.system(command)
if sts:
print("Exit status:", sts)
def show(caps):
print("Mailcap files:")
for fn in listmailcapfiles(): print("\t" + fn)
print()
if not caps: caps = getcaps()
print("Mailcap entries:")
print()
ckeys = sorted(caps)
for type in ckeys:
print(type)
entries = caps[type]
for e in entries:
keys = sorted(e)
for k in keys:
print(" %-15s" % k, e[k])
print()
if __name__ == '__main__':
test()
=======
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except OSError:
continue
with fp:
morecaps = readmailcapfile(fp)
for key, value in morecaps.items():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# This is mostly a Unix thing, but we use the OS path separator anyway
if 'MAILCAPS' in os.environ:
pathstr = os.environ['MAILCAPS']
mailcaps = pathstr.split(os.pathsep)
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = [e for e in entries if key in e]
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print("usage: mailcap [MIMEtype file] ...")
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print("No viewer found for", type)
else:
print("Executing:", command)
sts = os.system(command)
if sts:
print("Exit status:", sts)
def show(caps):
print("Mailcap files:")
for fn in listmailcapfiles(): print("\t" + fn)
print()
if not caps: caps = getcaps()
print("Mailcap entries:")
print()
ckeys = sorted(caps)
for type in ckeys:
print(type)
entries = caps[type]
for e in entries:
keys = sorted(e)
for k in keys:
print(" %-15s" % k, e[k])
print()
if __name__ == '__main__':
test()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except OSError:
continue
with fp:
morecaps = readmailcapfile(fp)
for key, value in morecaps.items():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# This is mostly a Unix thing, but we use the OS path separator anyway
if 'MAILCAPS' in os.environ:
pathstr = os.environ['MAILCAPS']
mailcaps = pathstr.split(os.pathsep)
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = [e for e in entries if key in e]
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print("usage: mailcap [MIMEtype file] ...")
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print("No viewer found for", type)
else:
print("Executing:", command)
sts = os.system(command)
if sts:
print("Exit status:", sts)
def show(caps):
print("Mailcap files:")
for fn in listmailcapfiles(): print("\t" + fn)
print()
if not caps: caps = getcaps()
print("Mailcap entries:")
print()
ckeys = sorted(caps)
for type in ckeys:
print(type)
entries = caps[type]
for e in entries:
keys = sorted(e)
for k in keys:
print(" %-15s" % k, e[k])
print()
if __name__ == '__main__':
test()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "06ac076f810a113809359b673e775952",
"timestamp": "",
"source": "github",
"line_count": 765,
"max_line_length": 74,
"avg_line_length": 29.347712418300652,
"alnum_prop": 0.5273261770077057,
"repo_name": "ArcherSys/ArcherSys",
"id": "583a07578c84b273e904b8314302a8105dd3ad0c",
"size": "22451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/mailcap.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saving import checkpoint_options
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
__slots__ = ["_saveable_objects"]
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor = spec.tensor
# A tensor value of `None` indicates that this SaveableObject gets
# recorded in the object graph, but that no value is saved in the
# checkpoint.
if tensor is not None:
tensor_names.append(spec.name)
tensors.append(tensor)
tensor_slices.append(spec.slice_spec)
save_device = options.experimental_io_device or "cpu:0"
with ops.device(save_device):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
restore_device = options.experimental_io_device or "cpu:0"
with ops.device(restore_device):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
Objects extending `SaveableObject` will be saved and restored, and
objects extending `SaveableHook` will be called into at save and
restore time.
"""
self._before_save_callbacks = []
self._after_restore_callbacks = []
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
is_saveable = isinstance(saveable, saveable_object.SaveableObject)
is_hook = isinstance(saveable, saveable_hook.SaveableHook)
if not is_saveable and not is_hook:
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
if is_hook:
self._before_save_callbacks.append(saveable.before_save)
self._after_restore_callbacks.append(saveable.after_restore)
if is_saveable:
host_device = saveable_object_util.set_cpu0(saveable.device)
saveables_by_device.setdefault(host_device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix, options=None):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object.
Returns:
An `Operation`, or None when executing eagerly.
"""
options = options or checkpoint_options.CheckpointOptions()
for callback in self._before_save_callbacks:
callback()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Filesystems with eventual consistency (such as S3), don't need a
# temporary location. Using a temporary directory in those cases might
# cause situations where files are not available during copy.
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
with ops.device("CPU"):
sharded_suffix = array_ops.where(
string_ops.regex_full_match(file_prefix, "^s3://.*"),
constant_op.constant(".part"),
constant_op.constant("_temp/part"))
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
def save_fn():
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but
# initial read operations should be placed on the SaveableObject's
# device.
sharded_saves.append(saver.save(shard_prefix, options))
with ops.control_dependencies(sharded_saves):
# Merge on the io_device if specified, otherwise co-locates the merge op
# with the last device used.
merge_device = (
options.experimental_io_device or
saveable_object_util.set_cpu0(last_device))
with ops.device(merge_device):
# V2 format write path consists of a metadata merge step. Once
# merged, attempts to delete the temporary directory,
# "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
# Since this will causes a function re-trace on each save, limit this to the
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
# Explicitly place the identity op on the first device.
@def_function.function(jit_compile=False)
def tf_function_save():
save_fn()
tf_function_save()
else:
return save_fn()
def restore(self, file_prefix, options=None):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
options = options or checkpoint_options.CheckpointOptions()
def restore_fn():
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
restore_ops.update(saver.restore(file_prefix, options))
return restore_ops
# Since this will causes a function re-trace on each save, limit this to the
# cases where it is needed: eager and when there are multiple tasks/single
# device savers. Note that the retrace is needed to ensure we pickup the
# latest values of options like experimental_io_device.
if context.executing_eagerly() and len(self._single_device_savers) > 1:
first_device, _ = list(self._single_device_savers.items())[0]
@def_function.function(jit_compile=False)
def tf_function_restore():
restore_ops = restore_fn()
restore_tensors = {}
# tf.functions must return tensors, thus we use control dependencies so
# that we can return a tensor which depends on the given op.
with ops.device(saveable_object_util.set_cpu0(first_device)):
for name, op in restore_ops.items():
with ops.control_dependencies([op]):
restore_tensors[name] = array_ops.identity(file_prefix)
return restore_tensors
restore_ops = tf_function_restore()
else:
restore_ops = restore_fn()
for callback in self._after_restore_callbacks:
callback()
return restore_ops
| {
"content_hash": "17789f92b290713a38a7ec0cbcc7d459",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 80,
"avg_line_length": 39.88095238095238,
"alnum_prop": 0.6780597014925374,
"repo_name": "freedomtan/tensorflow",
"id": "9511fdbaa05bc0b46cb48ad229d332f9baa79a07",
"size": "14089",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saving/functional_saver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import pickle
from gym import spaces
from gym.envs.registration import EnvSpec
import gym
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.pg import PGTrainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.models import ModelCatalog
from ray.rllib.models.model import Model
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.rollout import rollout
from ray.rllib.tests.test_external_env import SimpleServing
from ray.tune.registry import register_env
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
_, nn = try_import_torch()
DICT_SPACE = spaces.Dict({
"sensors": spaces.Dict({
"position": spaces.Box(low=-100, high=100, shape=(3, )),
"velocity": spaces.Box(low=-1, high=1, shape=(3, )),
"front_cam": spaces.Tuple(
(spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
"rear_cam": spaces.Box(low=0, high=1, shape=(10, 10, 3)),
}),
"inner_state": spaces.Dict({
"charge": spaces.Discrete(100),
"job_status": spaces.Dict({
"task": spaces.Discrete(5),
"progress": spaces.Box(low=0, high=100, shape=()),
})
})
})
DICT_SAMPLES = [DICT_SPACE.sample() for _ in range(10)]
TUPLE_SPACE = spaces.Tuple([
spaces.Box(low=-100, high=100, shape=(3, )),
spaces.Tuple((spaces.Box(low=0, high=1, shape=(10, 10, 3)),
spaces.Box(low=0, high=1, shape=(10, 10, 3)))),
spaces.Discrete(5),
])
TUPLE_SAMPLES = [TUPLE_SPACE.sample() for _ in range(10)]
def one_hot(i, n):
out = [0.0] * n
out[i] = 1.0
return out
class NestedDictEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = DICT_SPACE
self._spec = EnvSpec("NestedDictEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return DICT_SAMPLES[0]
def step(self, action):
self.steps += 1
return DICT_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedTupleEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = TUPLE_SPACE
self._spec = EnvSpec("NestedTupleEnv-v0")
self.steps = 0
def reset(self):
self.steps = 0
return TUPLE_SAMPLES[0]
def step(self, action):
self.steps += 1
return TUPLE_SAMPLES[self.steps], 1, self.steps >= 5, {}
class NestedMultiAgentEnv(MultiAgentEnv):
def __init__(self):
self.steps = 0
def reset(self):
return {
"dict_agent": DICT_SAMPLES[0],
"tuple_agent": TUPLE_SAMPLES[0],
}
def step(self, actions):
self.steps += 1
obs = {
"dict_agent": DICT_SAMPLES[self.steps],
"tuple_agent": TUPLE_SAMPLES[self.steps],
}
rew = {
"dict_agent": 0,
"tuple_agent": 0,
}
dones = {"__all__": self.steps >= 5}
infos = {
"dict_agent": {},
"tuple_agent": {},
}
return obs, rew, dones, infos
class InvalidModel(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return "not", "valid"
class InvalidModel2(Model):
def _build_layers_v2(self, input_dict, num_outputs, options):
return tf.constant(0), tf.constant(0)
class TorchSpyModel(TorchModelV2, nn.Module):
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.fc = FullyConnectedNetwork(
obs_space.original_space.spaces["sensors"].spaces["position"],
action_space, num_outputs, model_config, name)
def forward(self, input_dict, state, seq_lens):
pos = input_dict["obs"]["sensors"]["position"].numpy()
front_cam = input_dict["obs"]["sensors"]["front_cam"][0].numpy()
task = input_dict["obs"]["inner_state"]["job_status"]["task"].numpy()
ray.experimental.internal_kv._internal_kv_put(
"torch_spy_in_{}".format(TorchSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
TorchSpyModel.capture_index += 1
return self.fc({
"obs": input_dict["obs"]["sensors"]["position"]
}, state, seq_lens)
def value_function(self):
return self.fc.value_function()
class DictSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, front_cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"d_spy_in_{}".format(DictSpyModel.capture_index),
pickle.dumps((pos, front_cam, task)),
overwrite=True)
DictSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"]["sensors"]["position"],
input_dict["obs"]["sensors"]["front_cam"][0],
input_dict["obs"]["inner_state"]["job_status"]["task"]
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"]["sensors"]["position"],
num_outputs)
return output, output
class TupleSpyModel(Model):
capture_index = 0
def _build_layers_v2(self, input_dict, num_outputs, options):
def spy(pos, cam, task):
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"t_spy_in_{}".format(TupleSpyModel.capture_index),
pickle.dumps((pos, cam, task)),
overwrite=True)
TupleSpyModel.capture_index += 1
return 0
spy_fn = tf.py_func(
spy, [
input_dict["obs"][0],
input_dict["obs"][1][0],
input_dict["obs"][2],
],
tf.int64,
stateful=True)
with tf.control_dependencies([spy_fn]):
output = tf.layers.dense(input_dict["obs"][0], num_outputs)
return output, output
class NestedSpacesTest(unittest.TestCase):
def testInvalidModel(self):
ModelCatalog.register_custom_model("invalid", InvalidModel)
self.assertRaises(ValueError, lambda: PGTrainer(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid",
},
}))
def testInvalidModel2(self):
ModelCatalog.register_custom_model("invalid2", InvalidModel2)
self.assertRaisesRegexp(
ValueError, "Expected output.*",
lambda: PGTrainer(
env="CartPole-v0", config={
"model": {
"custom_model": "invalid2",
},
}))
def doTestNestedDict(self, make_env, test_lstm=False):
ModelCatalog.register_custom_model("composite", DictSpyModel)
register_env("nested", make_env)
pg = PGTrainer(
env="nested",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
"use_lstm": test_lstm,
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def doTestNestedTuple(self, make_env):
ModelCatalog.register_custom_model("composite2", TupleSpyModel)
register_env("nested2", make_env)
pg = PGTrainer(
env="nested2",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite2",
},
})
pg.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testNestedDictGym(self):
self.doTestNestedDict(lambda _: NestedDictEnv())
def testNestedDictGymLSTM(self):
self.doTestNestedDict(lambda _: NestedDictEnv(), test_lstm=True)
def testNestedDictVector(self):
self.doTestNestedDict(
lambda _: VectorEnv.wrap(lambda i: NestedDictEnv()))
def testNestedDictServing(self):
self.doTestNestedDict(lambda _: SimpleServing(NestedDictEnv()))
def testNestedDictAsync(self):
self.doTestNestedDict(lambda _: BaseEnv.to_base_env(NestedDictEnv()))
def testNestedTupleGym(self):
self.doTestNestedTuple(lambda _: NestedTupleEnv())
def testNestedTupleVector(self):
self.doTestNestedTuple(
lambda _: VectorEnv.wrap(lambda i: NestedTupleEnv()))
def testNestedTupleServing(self):
self.doTestNestedTuple(lambda _: SimpleServing(NestedTupleEnv()))
def testNestedTupleAsync(self):
self.doTestNestedTuple(lambda _: BaseEnv.to_base_env(NestedTupleEnv()))
def testMultiAgentComplexSpaces(self):
ModelCatalog.register_custom_model("dict_spy", DictSpyModel)
ModelCatalog.register_custom_model("tuple_spy", TupleSpyModel)
register_env("nested_ma", lambda _: NestedMultiAgentEnv())
act_space = spaces.Discrete(2)
pg = PGTrainer(
env="nested_ma",
config={
"num_workers": 0,
"sample_batch_size": 5,
"train_batch_size": 5,
"multiagent": {
"policies": {
"tuple_policy": (
PGTFPolicy, TUPLE_SPACE, act_space,
{"model": {"custom_model": "tuple_spy"}}),
"dict_policy": (
PGTFPolicy, DICT_SPACE, act_space,
{"model": {"custom_model": "dict_spy"}}),
},
"policy_mapping_fn": lambda a: {
"tuple_agent": "tuple_policy",
"dict_agent": "dict_policy"}[a],
},
})
pg.train()
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"d_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"t_spy_in_{}".format(i)))
pos_i = TUPLE_SAMPLES[i][0].tolist()
cam_i = TUPLE_SAMPLES[i][1][0].tolist()
task_i = one_hot(TUPLE_SAMPLES[i][2], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
def testRolloutDictSpace(self):
register_env("nested", lambda _: NestedDictEnv())
agent = PGTrainer(env="nested")
agent.train()
path = agent.save()
agent.stop()
# Test train works on restore
agent2 = PGTrainer(env="nested")
agent2.restore(path)
agent2.train()
# Test rollout works on restore
rollout(agent2, "nested", 100)
def testPyTorchModel(self):
ModelCatalog.register_custom_model("composite", TorchSpyModel)
register_env("nested", lambda _: NestedDictEnv())
a2c = A2CTrainer(
env="nested",
config={
"num_workers": 0,
"use_pytorch": True,
"sample_batch_size": 5,
"train_batch_size": 5,
"model": {
"custom_model": "composite",
},
})
a2c.train()
# Check that the model sees the correct reconstructed observations
for i in range(4):
seen = pickle.loads(
ray.experimental.internal_kv._internal_kv_get(
"torch_spy_in_{}".format(i)))
pos_i = DICT_SAMPLES[i]["sensors"]["position"].tolist()
cam_i = DICT_SAMPLES[i]["sensors"]["front_cam"][0].tolist()
task_i = one_hot(
DICT_SAMPLES[i]["inner_state"]["job_status"]["task"], 5)
self.assertEqual(seen[0][0].tolist(), pos_i)
self.assertEqual(seen[1][0].tolist(), cam_i)
self.assertEqual(seen[2][0].tolist(), task_i)
if __name__ == "__main__":
ray.init(num_cpus=5)
unittest.main(verbosity=2)
| {
"content_hash": "f35e8342dbc3aa00305b2bcfeb290ad7",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 79,
"avg_line_length": 34.84905660377358,
"alnum_prop": 0.5444639956686519,
"repo_name": "stephanie-wang/ray",
"id": "24f17d91123311243440071343bf2dcc2fbe2e50",
"size": "14776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/tests/test_nested_spaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
import android
import json
import time
from urllib import urlencode
from urllib2 import urlopen
hello_msg = "Welcome to Coach Kelly's Timing App"
list_title = 'Here is your list of athletes:'
quit_msg = "Quitting Coach Kelly's App."
web_server = 'http://192.168.1.33:8080'
get_names_cgi = '/cgi-bin/generate_names.py'
get_data_cgi = '/cgi-bin/generate_data.py'
def send_to_server(url, post_data=None):
if post_data:
page = urlopen(url, urlencode(post_data))
else:
page = urlopen(url)
return(page.read().decode("utf8"))
app = android.Android()
def status_update(msg, how_long=2):
app.makeToast(msg)
time.sleep(how_long)
status_update(hello_msg)
athletes = sorted(json.loads(send_to_server(web_server + get_names_cgi)))
athlete_names = [ath[0] for ath in athletes]
app.dialogCreateAlert(list_title)
app.dialogSetSingleChoiceItems(athlete_names)
app.dialogSetPositiveButtonText('Select')
app.dialogSetNegativeButtonText('Quit')
app.dialogShow()
resp = app.dialogGetResponse().result
if resp['which'] in ('positive'):
selected_athlete = app.dialogGetSelectedItems().result[0]
which_athlete = athletes[selected_athlete][1]
athlete = json.loads(send_to_server(web_server + get_data_cgi,
{'which_athlete': which_athlete}))
athlete_title = athlete['Name'] + ' (' + athlete['DOB'] + '), top 3 times:'
app.dialogCreateAlert(athlete_title)
app.dialogSetItems(athlete['top3'])
app.dialogSetPositiveButtonText('OK')
# Need to add another button to add a timing value.
app.dialogSetNegativeButtonText('Add Time')
app.dialogShow()
resp = app.dialogGetResponse().result
if resp['which'] in ('positive'):
pass
elif resp['which'] in ('negative'):
timing_title = 'Enter a new time'
timing_msg = 'Provide a new timing value ' + athlete['Name'] + ': '
add_time_cgi = '/cgi-bin/add_timing_data.py'
resp = app.dialogGetInput(timing_title, timing_msg).result
if resp is not None:
new_time = resp
send_to_server(web_server + add_time_cgi,
{'Time': new_time,
'Athlete': which_athlete})
status_update(quit_msg)
| {
"content_hash": "31051f490ad196a5901ecf3208f27b89",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 30.276315789473685,
"alnum_prop": 0.6397218600608431,
"repo_name": "tdean1995/HFPythonSandbox",
"id": "023b5205ed9b90d0d48cc82248f8f53a986b742d",
"size": "2302",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "hfpython_code/hfpy_code/chapter9/page346.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5893"
},
{
"name": "Python",
"bytes": "103816"
}
],
"symlink_target": ""
} |
import pickle
import unittest
import numpy as np
from pathlib import Path
from .context import getspectrum
import pyimzml.ImzMLParser as imzmlp
import pyimzml.ImzMLWriter as imzmlw
# Example files from https://ms-imaging.org/wp/imzml/example-files-test/
CONTINUOUS_IMZML_PATH = str(Path(__file__).parent / 'data/Example_Continuous.imzML')
CONTINUOUS_IBD_PATH = str(Path(__file__).parent / 'data/Example_Continuous.ibd')
PROCESSED_IMZML_PATH = str(Path(__file__).parent / 'data/Example_Processed.imzML')
PROCESSED_IBD_PATH = str(Path(__file__).parent / 'data/Example_Processed.ibd')
PARSE_LIB_TEST_CASES = ['lxml', 'ElementTree']
DATA_TEST_CASES = [
('Continuous', CONTINUOUS_IMZML_PATH, CONTINUOUS_IBD_PATH),
('Processed', PROCESSED_IMZML_PATH, PROCESSED_IBD_PATH),
]
ALL_TEST_CASES = [(parse_lib, data_name, imzml_path, ibd_path)
for parse_lib in PARSE_LIB_TEST_CASES
for data_name, imzml_path, ibd_path in DATA_TEST_CASES]
class ImzMLParser(unittest.TestCase):
def test_bisect(self):
mzs = [100., 201.89, 201.99, 202.0, 202.01, 202.10000001, 400.]
test_mz = 202.0
test_tol = 0.1
ix_l, ix_u = imzmlp._bisect_spectrum(mzs, test_mz, test_tol)
assert ix_l == 2
assert ix_u == 4
assert ix_l <= ix_u
assert mzs[ix_l] >= test_mz - test_tol
assert mzs[ix_u] <= test_mz + test_tol
def test_getspectrum(self):
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib) as parser:
mzs, ints = parser.getspectrum(4)
assert parser.polarity == 'negative'
assert len(parser.coordinates) == 9
assert mzs.dtype == np.float32
assert ints.dtype == np.float32
assert len(mzs) == 8399
assert len(ints) == 8399
assert np.all(mzs > 100.0)
assert np.all(mzs < 800.0)
assert np.all(ints >= 0.0)
assert np.all(ints < 3.0)
def test_files_instead_of_paths(self):
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
open(imzml_path, 'rb') as imzml_file,\
open(ibd_path, 'rb') as ibd_file,\
imzmlp.ImzMLParser(imzml_file, parse_lib=parse_lib, ibd_file=ibd_file) as parser:
mzs, ints = parser.getspectrum(4)
assert len(parser.coordinates) == 9
assert len(mzs) > 0
assert len(ints) > 0
def test_parse_metadata(self):
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib) as parser:
md = parser.metadata
# fileDescription section
assert md.file_description['MS:1000579'] == True
assert 'ibd SHA-1' in md.file_description
assert len(md.file_description.source_files) == 1
assert md.file_description.source_files['sf1']['Thermo RAW format'] == True
assert md.file_description.source_files['sf1'].attrs['name'] == 'Example.raw'
assert len(md.file_description.contacts) == 1
# referenceableParamGroupList section
assert len(md.referenceable_param_groups) == 4
assert md.referenceable_param_groups['scan1']['increasing m/z scan']
# sampleList section
assert len(md.samples) == 1
assert md.samples['sample1']['sample number'] == '1'
# softwareList section
assert len(md.softwares) == 2
assert md.softwares['Xcalibur']['Xcalibur']
# scanSettingsList section
assert len(md.scan_settings) == 1
assert md.scan_settings['scansettings1']['pixel size (x)'] == 100.0
# instrumentConfigurationList section
assert len(md.instrument_configurations) == 1
ic = md.instrument_configurations['LTQFTUltra0']
assert ic.param_by_name['instrument serial number'] == 'none'
assert len(ic.components) == 3
assert ic.components[0].type == 'source'
assert ic.components[1].type == 'analyzer'
assert ic.components[2].type == 'detector'
assert ic.software_ref == 'Xcalibur'
# dataProcessingList section
assert len(md.data_processings) == 2
assert md.data_processings['XcaliburProcessing'].methods[0].attrs['softwareRef'] == 'Xcalibur'
assert md.data_processings['XcaliburProcessing'].methods[0]['low intensity data point removal']
def test_parse_full_spectrum_metadata(self):
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib, include_spectra_metadata='full') as parser:
assert len(parser.spectrum_full_metadata) == len(parser.coordinates)
spectrum = parser.spectrum_full_metadata[0]
assert spectrum['ms level'] == 0 # comes from referenceable param group
assert spectrum['total ion current'] > 100
assert spectrum.scan_list_params['no combination']
assert spectrum.scans[0].attrs['instrumentConfigurationRef'] == 'LTQFTUltra0'
assert spectrum.scans[0]['position x'] == 1
assert 'm/z array' in spectrum.binary_data_arrays[0]
assert 'intensity array' in spectrum.binary_data_arrays[1]
def test_parse_partial_spectrum_metadata(self):
TIC, POS_X, EXT_LEN, INVALID = 'MS:1000285', 'IMS:1000050', 'IMS:1000104', 'INVALID'
ACCESSIONS = [TIC, POS_X, EXT_LEN, INVALID]
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib, include_spectra_metadata=ACCESSIONS) as parser:
assert len(parser.spectrum_metadata_fields[TIC]) == len(parser.coordinates)
assert len(parser.spectrum_metadata_fields[POS_X]) == len(parser.coordinates)
assert len(parser.spectrum_metadata_fields[EXT_LEN]) == len(parser.coordinates)
assert len(parser.spectrum_metadata_fields[INVALID]) == len(parser.coordinates)
assert all(tic > 100 for tic in parser.spectrum_metadata_fields[TIC])
assert all(isinstance(pos_x, int) for pos_x in parser.spectrum_metadata_fields[POS_X])
assert all(isinstance(ext_len, int) for ext_len in parser.spectrum_metadata_fields[EXT_LEN])
assert all(invalid is None for invalid in parser.spectrum_metadata_fields[INVALID])
class PortableSpectrumReader(unittest.TestCase):
def test_read_file(self):
spectrum_idx = 4
for parse_lib, data_name, imzml_path, ibd_path in ALL_TEST_CASES:
with self.subTest(parse_lib=parse_lib, data=data_name),\
imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib) as normal_parser,\
open(ibd_path, 'rb') as ibd_file:
normal_mzs, normal_ints = normal_parser.getspectrum(spectrum_idx)
detached_parser = imzmlp.ImzMLParser(imzml_path, parse_lib=parse_lib, ibd_file=None)
portable_reader = detached_parser.portable_spectrum_reader()
# Pickle and unpickle to ensure it survives for its intended use case
portable_reader = pickle.loads(pickle.dumps(portable_reader))
portable_mzs, portable_ints = portable_reader.read_spectrum_from_file(ibd_file, spectrum_idx)
assert np.all(normal_mzs == portable_mzs)
assert np.all(normal_ints == portable_ints)
class ImzMLWriter(unittest.TestCase):
def test_simple_write(self):
mzs = np.linspace(100,1000,20)
ints = np.random.rand(mzs.shape[0])
coords = [1,1,1]
with imzmlw.ImzMLWriter("test.mzML", mode="processed") as imzml:
imzml.addSpectrum(mzs, ints, coords=coords)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "4dedf7065a03339963d543c60ce614fe",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 116,
"avg_line_length": 49.37714285714286,
"alnum_prop": 0.6056012035644023,
"repo_name": "alexandrovteam/pyimzML",
"id": "afc4816eaf369122c425ead0ca91586de7205915",
"size": "8641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "267445"
}
],
"symlink_target": ""
} |
"""
Class Hierarchy
G{classtree: ControllerManager}
Package tree
G{packagetree: controller_manager}
Import Graph
G{importgraph: controller_manager}
"""
from controller import *
class ControllerManager:
def __init__(self):
print "[ControllerManager] init ..."
self.apache_controller = ApacheController()
""" @type: L{ApacheController} """
self.mysql_controller = MysqlController()
""" @type: L{MysqlController} """
self.robust_controller = RobustController()
""" @type: L{RobustController} """
print "[ControllerManager] OK"
def get_apache_controller(self):
return self.apache_controller
def get_mysql_controller(self):
return self.mysql_controller
def get_robust_controller(self):
return self.robust_controller
class ControllerManagerTesting(ControllerManager):
pass
| {
"content_hash": "7f4796315177affe95d61061254c2f6b",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 50,
"avg_line_length": 20.275,
"alnum_prop": 0.7348951911220715,
"repo_name": "justasabc/kubernetes-ubuntu",
"id": "de24097133b0e872eebce2ee9975e126872de690",
"size": "811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ke/images/python/controller_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3307"
},
{
"name": "C#",
"bytes": "19553875"
},
{
"name": "CSS",
"bytes": "4462"
},
{
"name": "Go",
"bytes": "10376"
},
{
"name": "HTML",
"bytes": "20204"
},
{
"name": "Java",
"bytes": "3258"
},
{
"name": "JavaScript",
"bytes": "7373"
},
{
"name": "Makefile",
"bytes": "691"
},
{
"name": "NSIS",
"bytes": "6208"
},
{
"name": "PHP",
"bytes": "1029"
},
{
"name": "PLpgSQL",
"bytes": "599"
},
{
"name": "Perl",
"bytes": "3578"
},
{
"name": "Python",
"bytes": "192462"
},
{
"name": "Ruby",
"bytes": "1111"
},
{
"name": "Shell",
"bytes": "97267"
}
],
"symlink_target": ""
} |
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, text
from sqlalchemy.orm import relationship
from Houdini.Data import Base
metadata = Base.metadata
class Puffle(Base):
__tablename__ = 'puffle'
ID = Column(Integer, primary_key=True)
PenguinID = Column(ForeignKey(u'penguin.ID', ondelete=u'CASCADE', onupdate=u'CASCADE'), nullable=False, index=True)
Name = Column(String(16), nullable=False)
AdoptionDate = Column(DateTime, nullable=False, server_default=text("current_timestamp()"))
Type = Column(Integer, nullable=False)
Health = Column(Integer, nullable=False)
Hunger = Column(Integer, nullable=False)
Rest = Column(Integer, nullable=False)
Walking = Column(Integer, server_default=text("0"))
penguin = relationship(u'Penguin') | {
"content_hash": "772ab146ffbbe18a6dc09223571c9c3d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 119,
"avg_line_length": 39.9,
"alnum_prop": 0.7280701754385965,
"repo_name": "TunnelBlanket/Houdini",
"id": "20523eb983171d0651b2e704591ed1699cf92bcc",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/as2",
"path": "Houdini/Data/Puffle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196286"
}
],
"symlink_target": ""
} |
r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from collections import namedtuple
import logging
import socket
import ssl
import time
import urllib
import ast
from nogotofail.mitm.connection import handlers
from nogotofail.mitm.connection.handlers import preconditions
from nogotofail.mitm.util import close_quietly, truncate, PiiStore
Application = namedtuple("Application", ["package", "version"])
class Client(object):
"""Class representing a blame client connection.
NOTE: You should avoid using this directly for client queries because if the client reconnects
a new Client will be made."""
CLIENT_TIMEOUT = 21600
class Callback(object):
def __init__(self, fn, timeout, now=None):
self.fn = fn
self.timeout = timeout
self.start = now or time.time()
def __init__(self, socket, server, now=None):
self.socket = socket
self.server = server
self.info = None
self.last_used = now or time.time()
if isinstance(socket, ssl.SSLSocket):
self._select_fn = self._ssl_handshake_select_fn
else:
self._select_fn = self._handshake_select_fn
self.queries = {}
self._txid = 0
self._buffer = ""
self.address = self.socket.getpeername()[0]
self.logger = logging.getLogger("nogotofail.mitm")
self._handshake_completed = False
self.pii_store = None
@property
def available(self):
"""Returns if the client is currently available."""
return self._handshake_completed
@property
def next_txid(self):
"""Returns the next unused transaction id for a blame request."""
id = self._txid
self._txid += 1
return id
def on_select(self):
"""Should be called when select has returned self.socket as ready for reading."""
self.last_used = time.time()
return self._select_fn()
def check_timeouts(self):
"""Returns if the connection or any of its callbacks have timed out."""
now = time.time
if now - self.last_used > self.CLIENT_TIMEOUT:
return False
for callback in self.queries.values():
if now >= callback.start + callback.timeout and callback.timeout != 0:
return False
return True
def close(self):
"""Close the connection to the client. This also notifies all pending queries that their
request has failed."""
close_quietly(self.socket)
for callback in self.queries.values():
callback.fn(False)
def get_applications_async(
self, client_port, server_addr, server_port, callback, timeout=10):
"""See Server.get_applications_async"""
self.last_used = time.time()
txid = self.next_txid
family = socket.AF_INET6 if ":" in server_addr else socket.AF_INET
message = (
unicode(
"%d tcp_client_id %s %s %s\n" %
(txid, client_port,
socket.inet_pton(family, server_addr).encode("hex"),
server_port)))
try:
self.socket.sendall(message)
except socket.error as e:
self.logger.info(
"Blame: Error sending vuln_notify to %s: %s." % (self.address, e))
return False
self.queries[txid] = Client.Callback(
self._generate_on_get_applications_fn(callback), timeout)
return True
def vuln_notify_async(self, server_addr, server_port, id,
type, applications, callback, timeout=10):
"""See Server.vuln_notify_async."""
self.last_used = time.time()
txid = self.next_txid
message = unicode("%d vuln_notify %s %s %s %d %s\n" %
(txid, id, type, server_addr, server_port,
", ".join(
["%s %s" % (urllib.quote(app.package), app.version) for app in applications])))
try:
self.socket.sendall(message)
except socket.error as e:
self.logger.info("AppBlame notify error for %s, %s." % (self.address, e))
return False
self.queries[txid] = Client.Callback(
self._generate_on_vuln_notify_fn(callback), timeout)
return True
def _generate_on_vuln_notify_fn(self, callback):
def on_vuln_notify(success, data=None):
if not success:
callback(False)
self.server.remove_client(self.address)
return
callback(True, data == "OK")
return on_vuln_notify
def _generate_on_get_applications_fn(self, callback):
def on_get_applications(success, data=None):
if not success:
callback(False)
self.server.remove_client(self.address)
return
platform_info = self.info.get(
"Platform-Info", "Unknown")
apps = data.split(",")
try:
callback(True, platform_info,
[Application(*map(urllib.unquote, app.strip().split(" ", 1)))
for app in apps])
except (ValueError, TypeError):
callback(False)
return on_get_applications
def _ssl_handshake_select_fn(self):
self.socket.setblocking(False)
try:
self.socket.do_handshake()
except socket.error:
return True
self.socket.setblocking(True)
self._select_fn = self._handshake_select_fn
return True
def _handshake_select_fn(self):
"""Handle client data during the handshake."""
try:
data = self.socket.recv(8192)
except socket.error:
self.logger.info("Blame: Erorr reading from client %s.", self.address)
return False
if not data:
self.logger.info("Blame: Client %s closed connection.", self.address)
return False
data = self._buffer + data
lines = data.split("\n")
# Check if there is still more data to be read.
# Some clients send \r\n line endings and some \n, so strip extra
# whitespace.
if lines[-1].strip() != "":
self._buffer = data
return
data = data.replace("\r", "")
lines = data[:data.index("\n\n")].split("\n")
try:
self._parse_headers(lines)
self._send_headers()
except (ValueError, KeyError, IndexError, socket.error) as e:
try:
self.socket.sendall("400 Error parsing message\n\n")
except socket.error:
pass
self.logger.info("Blame: Bad handshake from %s: %s" % (self.address, e))
return False
# TODO: Handle any extra data after the handshake, there shouldn't be
# any in the current version of the protocol.
# Done!
self.logger.info("Blame: New client from %s", self.address)
self._select_fn = self._response_select_fn
self._handshake_completed = True
return True
def _send_headers(self):
# Send the OK
self.socket.sendall("0 OK\n")
# Send the configs
prob = self.info.get("Attack-Probability", self.server.default_prob)
self.socket.sendall("Attack-Probability: %f\n" % prob)
attacks = self.info.get("Attacks", self.server.default_attacks)
attacks_str = ",".join([attack.name for attack in attacks])
self.socket.sendall("Attacks: %s\n" % attacks_str)
supported_str = ",".join([
attack
for attack in
handlers.connection.handlers.map])
self.socket.sendall("Supported-Attacks: %s\n" % supported_str)
data = self.info.get("Data-Attacks", self.server.default_data)
data_str = ",".join([attack.name for attack in data])
self.socket.sendall("Data-Attacks: %s\n" % data_str)
supported_data = ",".join([
attack
for attack in handlers.data.handlers.map])
self.socket.sendall("Supported-Data-Attacks: %s\n" % supported_data)
self.socket.sendall("\n")
def _parse_headers(self, lines):
# try:
raw_headers = [line.split(":", 1) for line in lines[1:]]
headers = {entry.strip(): header.strip()
for entry, header in raw_headers}
client_info = {}
# Platform-Info is required, fail if not present
client_info["Platform-Info"] = headers["Platform-Info"]
# Everything else is optional
if "Installation-ID" in headers:
client_info["Installation-ID"] = headers["Installation-ID"]
if "Attack-Probability" in headers:
value = float(headers["Attack-Probability"])
if value < 0 or value > 1.0:
raise ValueError("Attack-Probability outside range")
client_info["Attack-Probability"] = value
if "Attacks" in headers:
attacks = headers["Attacks"].split(",")
attacks = map(str.strip, attacks)
client_info["Attacks"] = preconditions.filter_preconditions([
handlers.connection.handlers.map[attack]
for attack in attacks
if attack in handlers.connection.handlers.map])
if "Data-Attacks" in headers:
attacks = headers["Data-Attacks"].split(",")
attacks = map(str.strip, attacks)
client_info["Data-Attacks"] = preconditions.filter_preconditions(
[handlers.data.handlers.map[attack]
for attack in attacks
if attack in handlers.data.handlers.map])
if ("PII-Items" in headers):
client_pii_items = ast.literal_eval(headers["PII-Items"])
# TODO: Think if HTML encoding is needed for PII information.
# e.g. ',",&,<,> characters.
if ("PII-Location" in headers):
client_pii_location = {}
# Convert personal location string to a dictionary
personal_location = ast.literal_eval(headers["PII-Location"])
if (personal_location):
longitude = personal_location.get("longitude", "0.00000")
latitude = personal_location.get("latitude", "0.00000")
else:
longitude = "0.00000"
latitude = "0.00000"
client_pii_location["longitude"] = \
truncate(float(longitude), 2)
client_pii_location["latitude"] = \
truncate(float(latitude), 2)
# Store the raw headers as well in case a handler needs something the
# client sent in an additional header
client_info["headers"] = headers
self.info = client_info
# Merge client and server pii items.
server_pii_items = self.server.pii["items"]
merge_pii_ids = client_pii_items.copy()
merge_pii_ids.update(server_pii_items)
# Create pii_store attribute which holds pii items.
self.pii_store = PiiStore(merge_pii_ids, client_pii_location)
def _response_select_fn(self):
try:
data = self.socket.recv(8192)
except socket.error:
self.logger.info("Blame: Error reading from client %s.", self.address)
return False
if not data:
self.logger.info("Blame: Client %s closed connection", self.address)
return False
data = self._buffer + data
while "\n" in data:
line, rest = data.split("\n", 1)
self._handle_client_line(line)
data = rest
self._buffer = data
return True
def _handle_client_line(self, line):
# A response is either "id <response>\n" or "id\n" if the command failed.
words = line.strip().split(" ")
txid = int(words[0])
data = " ".join(words[1:])
callback = self.queries.get(txid)
if callback:
del self.queries[txid]
callback.fn(True, data)
else:
self.logger.debug("Blame: Response for unknown txid %d from %s", txid, self.address)
class Server:
"""Server for managing connections to the connection blaming app on devices."""
port = None
clients = None
def __init__(self, port, cert, default_prob, default_attacks, default_data,
config_pii):
self.txid = 0
self.kill = False
self.port = port
self.cert = cert
self.default_prob = default_prob
self.default_attacks = default_attacks
self.default_data = default_data
self.clients = {}
self.fd_map = {}
self.logger = logging.getLogger("nogotofail.mitm")
self.server_socket = None
# Server config pii parameters
self.pii = config_pii
def start_listening(self):
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind(("", self.port))
self.server_socket.listen(5)
self.server_socket.settimeout(2)
if self.cert:
self.server_socket = (
ssl.wrap_socket(
self.server_socket, certfile=self.cert,
server_side=True,
do_handshake_on_connect=False))
def _on_server_socket_select(self):
try:
(client_socket, client_address) = self.server_socket.accept()
except socket.error:
# In a wrapped SSL socket accept() can raise exceptions, if we get
# one the client connection is broken so do nothing.
return
client_addr, client_port = client_address
self.logger.debug("Blame: Connection from %s:%d", client_addr, client_port)
old_client = self.clients.get(client_addr, None)
if old_client:
self.remove_client(client_address)
self.fd_map[client_socket] = client_addr
self.clients[client_addr] = Client(client_socket, self)
def _on_socket_select(self, sock):
if sock is self.server_socket:
self._on_server_socket_select()
return
client_addr = self.fd_map[sock]
client = self.clients[client_addr]
if not client.on_select():
self.remove_client(client_addr)
def client_available(self, client_addr):
"""Returns if the app blame client is running on client_addr.
This is best effort only, it may return True for lost clients.
"""
return client_addr in self.clients and self.clients[client_addr].available
def get_applications_async(
self, client_addr, client_port, server_addr, server_port, callback, timeout=10):
"""Fetch the application information for a given connection tuple calling a callback when
the response is received.
Returns if the request was sent to the client.
NOTE: If False is returned the callback will never be called.
Arguments:
client_addr -- the client ip address to query
client_port -- the source port on the client
server_addr -- the destination ip address as seen by the client
server_port -- the destination port as seen by the client
callback -- function to call when data is ready, should be of the form
def fn(success, platform_info=None, applications=None)
timeout -- timeout for the request"""
if not self.client_available(client_addr):
return False
if not self.clients[client_addr].get_applications_async(client_port,
server_addr, server_port, callback, timeout):
self.remove_client(client_addr)
return False
return True
def vuln_notify_async(self, client_addr, server_addr, server_port, id,
type, applications, callback, timeout=10):
"""Send a notification to client_addr of a vulnerability in applications.
Returns if the notification was sent successfully
Arguments:
client_addr -- Client to notify
server_addr -- remote destination of the vulnerable connection
server_port -- remote port of the vulnerable connection
id -- An opaque blob to identify the connection later on
type -- Type of vuln. See nogotofail.mitm.util.vuln.*
applications -- List of Applications to blame
callback -- Function to call when a response is received. Should be of the form:
def callback(success, result=False)
success -- If the client responded to the notification
result -- If the client showed the vulnerability
"""
if not self.client_available(client_addr):
return False
result = self.clients[client_addr].vuln_notify_async(server_addr, server_port,
id, type, applications, callback, timeout)
if not result:
self.remove_client(client_addr)
return result
def remove_client(self, client_addr):
"""Remove and close a blame client."""
if client_addr not in self.clients:
return
client = self.clients[client_addr]
del self.clients[client_addr]
del self.fd_map[client.socket]
client.close()
def check_timeouts(self):
"""Check the timeouts on all clients and remove those that have timed out."""
for client_addr in self.clients.keys():
if not self.clients[client_addr].check_timeouts():
self.logger.info("Blame: Client %s timed out", client_addr)
self.remove_client(client_addr)
@property
def select_fds(self):
"""Returns the tuple of r,w,x fds to be sent to select."""
return (set([client.socket for client in self.clients.values()] + [self.server_socket])
, set(), set())
def on_select(self, r, w, x):
"""Called whith the results of select.select. Note that all r,w,x is a subset of the values
provided by select_fds."""
for fd in set(r + w + x):
self._on_socket_select(fd)
def shutdown(self):
"""Shutdown the Blame server. The server should not be used after this point."""
self.server_socket.close()
for client in self.clients.values():
try:
client.close()
except:
pass
### Noseyparker methods
def get_pii(self):
""" Function return server config pii values.
"""
if (self.pii):
return self.pii
else:
return {}
| {
"content_hash": "babf676a7bba9f3d9601ed66d8f0538c",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 99,
"avg_line_length": 38.67269076305221,
"alnum_prop": 0.5908925697076691,
"repo_name": "mkenne11/nogotofail-pii",
"id": "d85ebdf9b3708cdd4be104398ff288a6dbdd1191",
"size": "19259",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "nogotofail/mitm/blame/app_blame.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "197243"
},
{
"name": "Python",
"bytes": "384478"
}
],
"symlink_target": ""
} |
from OpenGLCffi.EGL import params
@params(api='egl', prms=['dpy', 'max_formats', 'formats', 'num_formats'])
def eglQueryDmaBufFormatsEXT(dpy, max_formats, formats, num_formats):
pass
@params(api='egl', prms=['dpy', 'format', 'max_modifiers', 'modifiers', 'external_only', 'num_modifiers'])
def eglQueryDmaBufModifiersEXT(dpy, format, max_modifiers, modifiers, external_only, num_modifiers):
pass
| {
"content_hash": "d7826bb481e978af1d0edd437e2db23a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 106,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.7288557213930348,
"repo_name": "cydenix/OpenGLCffi",
"id": "5f6ab038808af19a41f6c5dd875908f306913e5c",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/EGL/EXT/EXT/image_dma_buf_import_modifiers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
} |
from nltk.tokenize import sent_tokenize, WordPunctTokenizer
class Tokenizer:
"""Split text into sentence/words."""
def __init__(self):
self.tokenizer = WordPunctTokenizer()
def sentences(self, text):
"""Split a text into sentences.
:param text: string including sentence(s)
:return: list of sentences
"""
return sent_tokenize(text)
def words(self, sentence):
"""Split a sentence into words.
:param sentences: a sentence
:return: list of words
"""
return self.tokenizer.tokenize(sentence)
| {
"content_hash": "a059afbd6ae390a92a35ab8a0b311ba3",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 59,
"avg_line_length": 24.958333333333332,
"alnum_prop": 0.6193656093489148,
"repo_name": "stephanos/subvoc",
"id": "22f214951cafe9c3561c8846660ca8da4cd760a7",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domain/tokenizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6646"
},
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "JavaScript",
"bytes": "1294927"
},
{
"name": "Python",
"bytes": "64112"
},
{
"name": "Shell",
"bytes": "3154"
}
],
"symlink_target": ""
} |
import matplotlib
from numpy.random import randn
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
def to_percent(y, position):
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * y)
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
x = randn(5000)
# Make a normed histogram. It'll be multiplied by 100 later.
plt.hist(x, bins=50, normed=True)
# Create the formatter using the function to_percent. This multiplies all the
# default labels by 100, making them all percentages
formatter = FuncFormatter(to_percent)
# Set the formatter
plt.gca().yaxis.set_major_formatter(formatter)
plt.show()
| {
"content_hash": "bfa97ef772d6862d08f18679e50e1189",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 26.766666666666666,
"alnum_prop": 0.7173100871731009,
"repo_name": "bundgus/python-playground",
"id": "9d7b5d34423f669ed13f3181cde2f53fd735e641",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "matplotlib-playground/examples/pylab_examples/histogram_percent_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
} |
from pox.core import core
from pox.lib.util import initHelper
from pox.lib.util import hexdump
from pox.lib.addresses import parse_cidr, IPAddr, EthAddr, IPAddr6
import pox.lib.packet as pkt
import pox.openflow.libopenflow_01 as of
from pox.openflow.libopenflow_01 import ofp_header, ofp_vendor_base
from pox.openflow.libopenflow_01 import _PAD, _PAD2, _PAD4, _PAD6
from pox.openflow.libopenflow_01 import _unpack, _read, _skip
import struct
# -----------------------------------------------------------------------
# OpenFlow Stuff
# -----------------------------------------------------------------------
# Technically, this stuff is part of OpenFlow 1.1+ and shouldn't be in
# this file. Since we don't have 1.1+ support yet, it's here at least
# temporarily.
OFPR_INVALID_TTL = 2 # Packet has invalid TTL
OFPC_INVALID_TTL_TO_CONTROLLER = 4
# -----------------------------------------------------------------------
# Nicira extensions
# -----------------------------------------------------------------------
NX_VENDOR_ID = 0x00002320
def _init_constants ():
actions = [
"NXAST_SNAT__OBSOLETE",
"NXAST_RESUBMIT",
"NXAST_SET_TUNNEL",
"NXAST_DROP_SPOOFED_ARP__OBSOLETE",
"NXAST_SET_QUEUE",
"NXAST_POP_QUEUE",
"NXAST_REG_MOVE",
"NXAST_REG_LOAD",
"NXAST_NOTE",
"NXAST_SET_TUNNEL64",
"NXAST_MULTIPATH",
"NXAST_AUTOPATH__DEPRECATED",
"NXAST_BUNDLE",
"NXAST_BUNDLE_LOAD",
"NXAST_RESUBMIT_TABLE",
"NXAST_OUTPUT_REG",
"NXAST_LEARN",
"NXAST_EXIT",
"NXAST_DEC_TTL",
"NXAST_FIN_TIMEOUT",
"NXAST_CONTROLLER",
"NXAST_DEC_TTL_CNT_IDS",
"NXAST_WRITE_METADATA",
"NXAST_PUSH_MPLS",
"NXAST_POP_MPLS",
"NXAST_SET_MPLS_TTL",
"NXAST_DEC_MPLS_TTL",
"NXAST_STACK_PUSH",
"NXAST_STACK_POP",
"NXAST_SAMPLE",
]
for i,name in enumerate(actions):
globals()[name] = i
_init_constants()
NXT_ROLE_REQUEST = 10
NXT_ROLE_REPLY = 11
NXT_SET_FLOW_FORMAT = 12
NXT_FLOW_MOD = 13
NXT_FLOW_MOD_TABLE_ID = 15
NXT_SET_PACKET_IN_FORMAT = 16
NXT_PACKET_IN = 17
NXT_FLOW_AGE = 18
NXT_SET_ASYNC_CONFIG = 19
NXT_SET_CONTROLLER_ID = 20
NXT_FLOW_MONITOR_CANCEL = 21
NXT_FLOW_MONITOR_PAUSED = 22
NXT_FLOW_MONITOR_RESUMED = 23
NXST_FLOW_MONITOR_REQUEST = 2
NXST_FLOW_MONITOR_REPLY = 2
#TODO: Replace with version in pox.lib?
def _issubclass (a, b):
try:
return issubclass(a, b)
except TypeError:
return False
class nicira_base (ofp_vendor_base):
"""
Base class for Nicira extensions
"""
_MIN_LENGTH = 16
vendor = NX_VENDOR_ID
#subtype = None # Set
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
ofp_vendor_base.__init__(self)
self._init(kw)
assert hasattr(self, 'vendor')
assert hasattr(self, 'subtype')
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_vendor_base.pack(self)
packed += struct.pack("!LL", self.vendor, self.subtype)
packed += self._pack_body()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,self.subtype) = _unpack("!LL", raw, offset)
offset = self._unpack_body(raw, offset, length-16)
return offset,length
def __len__ (self):
return 16 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_vendor_base.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.subtype != other.subtype: return False
return self._eq(other)
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_vendor_base.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'subtype: ' + len(self.subtype) + '\n'
outstr += self._show(prefix)
return outstr
class nx_flow_mod_table_id (nicira_base):
"""
Used to enable the flow mod table ID extension
When this is enabled, a slightly altered ofp_flow_mod can be used
to set the table for a flow insertion. A convenient version of this
slightly altered flow_mod is available as ofp_flow_mod_table_id.
"""
subtype = NXT_FLOW_MOD_TABLE_ID
_MIN_LENGTH = 16 + 8
def _init (self, kw):
self.enable = True # Called "set" by OVS
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.enable == other.enable
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!B", 1 if self.enable else 0) + (of._PAD * 7)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(enable,) = of._unpack("!B", raw, offset)
offset = of._skip(raw, offset, 7)
self.enable = True if enable else False
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return prefix + "set: " + str(self.enable) + "\n"
class ofp_flow_mod_table_id (of.ofp_flow_mod):
"""
A subclass of ofp_flow_mod which has a table_id
This is for use with the NXT_FLOW_MOD_TABLE_ID extension.
"""
def __init__ (self, **kw):
self.table_id = 0xff
of.ofp_flow_mod.__init__(self, **kw)
def splice_table_id (func):
"""
Execute wrapped function with table_id temporarily stored as
MSB of command field.
"""
def splice(self, *args):
assert self.command <= 0xff
self.command |= self.table_id << 8
try:
retval = func(self, *args)
finally:
self.table_id = self.command >> 8
self.command &= 0xff
return retval
return splice
@splice_table_id
def pack (self):
return super(ofp_flow_mod_table_id, self).pack()
@splice_table_id
def unpack (self, raw, offset=0):
return super(ofp_flow_mod_table_id, self).unpack()
@splice_table_id
def __eq__ (self, other):
return super(ofp_flow_mod_table_id, self).__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
class nx_flow_mod (of.ofp_flow_mod, of.ofp_vendor_base):
"""
A flow mod command that uses Nicira extended matches
This has a table_id attribute, which only works if you have enabled
the nx_flow_mod_table_id option.
"""
_MIN_LENGTH = 32
header_type = of.OFPT_VENDOR
vendor = NX_VENDOR_ID
subtype = NXT_FLOW_MOD
def __init__ (self, **kw):
self.table_id = 0
of.ofp_flow_mod.__init__(self, **kw)
if 'match' not in kw:
# Superclass created an ofp_match -- replace it
self.match = nx_match()
def _validate (self):
if not isinstance(self.match, nx_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
match = self.match.pack()
match_len = len(match)
command = self.command
command |= (self.table_id << 8)
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LL", self.vendor, self.subtype)
packed += struct.pack("!QHHHHLHHH", self.cookie, command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags, match_len)
packed += _PAD6
packed += match
packed += _PAD * ((match_len + 7)/8*8 - match_len)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
assert len(packed) == len(self)
return packed
def unpack (self, raw, offset=0):
_o = offset
offset,length = self._unpack_header(raw, offset)
offset,(vendor,subtype) = _unpack("!LL", raw, offset)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags, match_len) = \
_unpack("!QHHHHLHHH", raw, offset)
offset = self._skip(raw, offset, 6)
offset = self.match.unpack(raw, offset, match_len)
offset,self.actions = of._unpack_actions(raw,
length-(offset - _o), offset)
assert length == len(self)
return offset,length
def __len__ (self):
match_len = len(self.match)
l = 8 + 4 + 4
l += 8 + 2 + 2 + 2 + 2 + 4 + 2 + 2
l += 2 # match_len
l += 6 # pad
l += match_len
l += (match_len + 7)//8*8 - match_len
for i in self.actions:
l += len(i)
return l
# Packet_in formats
NXPIF_OPENFLOW10 = 0 # Standard OpenFlow 1.0 packet_in format
NXPIF_NXM = 1 # Nicira Extended packet_in format
class nx_packet_in_format (nicira_base):
subtype = NXT_SET_PACKET_IN_FORMAT
_MIN_LENGTH = 16 + 4
def _init (self, kw):
self.format = NXPIF_NXM # Extended packet_in format
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.format == other.format
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!I", self.format)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(self.format,) = of._unpack("!I", raw, offset)
return offset
def _show (self, prefix):
"""
Format additional fields as text
"""
s = prefix + "format: "
if self.format == NXPIF_NXM:
s += "NXM"
elif self.format == NXPIF_OPENFLOW10:
s += "OF1.0"
else:
s += str(self.format)
return s + "\n"
NX_ROLE_OTHER = 0
NX_ROLE_MASTER = 1
NX_ROLE_SLAVE = 2
class nx_role_request (nicira_base):
"""
Requests master/slave/other role type
Can initialize with role=NX_ROLE_x or with, e.g., master=True.
"""
subtype = NXT_ROLE_REQUEST
_MIN_LENGTH = 16 + 4
def _init (self, kw):
self.role = NX_ROLE_OTHER
if kw.pop("other", False):
self.role = NX_ROLE_OTHER
if kw.pop("master", False):
self.role = NX_ROLE_MASTER
if kw.pop("slave", False):
self.role = NX_ROLE_SLAVE
@property
def master (self):
return self.role == NX_ROLE_MASTER
@property
def slave (self):
return self.role == NX_ROLE_SLAVE
@property
def other (self):
return self.role == NX_ROLE_OTHER
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.role == other.role
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!I", self.role)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(self.role,) = of._unpack("!I", raw, offset)
return offset
def _show (self, prefix):
"""
Format additional fields as text
"""
s = prefix + "role: "
s += {NX_ROLE_OTHER:"other",NX_ROLE_MASTER:"master",
NX_ROLE_SLAVE:"slave"}.get(self.role, str(self.role))
return s + "\n"
class nx_role_reply (nx_role_request):
subtype = NXT_ROLE_REPLY
pass
# -----------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------
class nx_output_reg (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_OUTPUT_REG
self.offset = 0
self.nbits = None
self.reg = None # an nxm_entry class
self.max_len = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.offset != other.offset: return False
if self.nbits != other.nbits: return False
if self.reg != other.reg: return False
if self.max_len != other.max_len: return False
return True
def _pack_body (self):
nbits = self.nbits - 1
assert nbits >= 0 and nbits <= 63
assert self.offset >= 0 and self.offset < (1 << 10)
ofs_nbits = self.offset << 6 | nbits
o = self.reg()
o._force_mask = False
reg = o.pack(omittable=False, header_only=True)
p = struct.pack('!HH4sH', self.subtype, ofs_nbits, reg, self.max_len)
p += _PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype, ofs_nbits, reg, self.max_len, _, _) = \
of._unpack('!HH4sHHI', raw, offset)
self.offset = ofs_nbits >> 6
self.nbits = (ofs_nbits & 0x3f) + 1
self.reg = _class_for_nxm_header(reg)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('reg: %s\n' % (self.reg,))
s += prefix + ('max_len: %s\n' % (self.max_len,))
return s
class nx_reg_move (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_REG_MOVE
self.nbits = None
self.dst = None # an nxm_entry class
self.dst_ofs = 0
self.src = None # an nxm_entry_class
self.src_ofs = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.nbits != other.nbits: return False
if self.dst != other.dst: return False
if self.dst_ofs != other.dst_ofs: return False
if self.src != other.src: return False
if self.src_ofs != other.src_ofs: return False
return True
def _pack_body (self):
if self.nbits is None:
a = self.dst._get_size_hint() - self.dst_ofs
b = self.src._get_size_hint() - self.src_ofs
self.nbits = min(a,b)
o = self.dst()
o._force_mask = False
dst = o.pack(omittable=False, header_only=True)
o = self.src()
o._force_mask = False
src = o.pack(omittable=False, header_only=True)
p = struct.pack('!HHHH4s4s', self.subtype, self.nbits, self.src_ofs,
self.dst_ofs, src, dst)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.nbits, self.src_ofs, self.dst_ofs, src, dst) = \
of._unpack('!HHHH4s4s', raw, offset)
self.dst = _class_for_nxm_header(dst)
self.src = _class_for_nxm_header(src)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('src_ofs: %s\n' % (self.src_ofs,))
s += prefix + ('dst_ofs: %s\n' % (self.dst_ofs,))
s += prefix + ('src: %s\n' % (self.src,))
s += prefix + ('dst: %s\n' % (self.dst,))
return s
class nx_reg_load (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_REG_LOAD
self.offset = 0
self.nbits = None
self.dst = None # an nxm_entry class
self.value = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.offset != other.offset: return False
if self.nbits != other.nbits: return False
if self.dst != other.dst: return False
if self.value != other.value: return False
return True
def _pack_body (self):
if self.nbits is None:
self.nbits = self.dst._get_size_hint() - self.offset
nbits = self.nbits - 1
assert nbits >= 0 and nbits <= 63
assert self.offset >= 0 and self.offset < (1 << 10)
ofs_nbits = self.offset << 6 | nbits
o = self.dst()
o._force_mask = False
dst = o.pack(omittable=False, header_only=True)
p = struct.pack('!HH4sQ', self.subtype, ofs_nbits, dst, self.value)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,ofs_nbits, dst, self.value) = \
of._unpack('!HH4sQ', raw, offset)
self.offset = ofs_nbits >> 6
self.nbits = (ofs_nbits & 0x3f) + 1
self.dst = _class_for_nxm_header(dst)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('dst: %s\n' % (self.dst,))
s += prefix + ('value: %s\n' % (self.value,))
return s
class nx_action_controller (of.ofp_action_vendor_base):
"""
Sends packet to controller
This is similar to an output to OFPP_CONTROLLER, but allows setting
the reason field and controller id to send to.
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_CONTROLLER
self.max_len = 0xffFF
self.controller_id = 0
self.reason = of.OFPR_ACTION
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.max_len != other.max_len: return False
if self.controller_id != other.controller_id: return False
if self.reason != other.reason: return False
return True
def _pack_body (self):
p = struct.pack('!HHHB', self.subtype, self.max_len, self.controller_id,
self.reason)
p += of._PAD
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.max_len, self.controller_id, self.reason) = \
of._unpack('!HHHB', raw, offset)
offset = of._skip(raw, offset, 1)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('max_len: %s\n' % (self.max_len,))
s += prefix + ('controller_id: %s\n' % (self.controller_id,))
s += prefix + ('reason: %s\n' % (self.reason,))
return s
class nx_action_push_mpls (of.ofp_action_vendor_base):
"""
Push an MPLS label
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_PUSH_MPLS
self.ethertype = pkt.ethernet.MPLS_TYPE
# The only alternative for ethertype is MPLS_MC_TYPE (multicast)
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.ethertype != other.ethertype: return False
return True
def _pack_body (self):
p = struct.pack('!HHI', self.subtype, self.ethertype, 0) # 4 bytes pad
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.ethertype) = of._unpack('!HH', raw, offset)
offset = of._skip(raw, offset, 4)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('ethertype: %s\n' % (self.ethertype,))
return s
class nx_action_pop_mpls (of.ofp_action_vendor_base):
"""
Pop an MPLS label
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_POP_MPLS
self.ethertype = None # Purposely bad
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.ethertype != other.ethertype: return False
return True
def _pack_body (self):
p = struct.pack('!HHI', self.subtype, self.ethertype, 0) # 4 bytes pad
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.ethertype) = of._unpack('!HH', raw, offset)
offset = of._skip(raw, offset, 4)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('ethertype: %s\n' % (self.ethertype,))
return s
class nx_action_resubmit (of.ofp_action_vendor_base):
"""
Used with both resubmit and resubmit_table.
Generally, you want to use one of the factory methods.
"""
@classmethod
def resubmit (cls, in_port = of.OFPP_IN_PORT):
return cls(subtype = NXAST_RESUBMIT, in_port = in_port, table = 0)
@classmethod
def resubmit_table (cls, table = 255, in_port = of.OFPP_IN_PORT):
return cls(subtype = NXAST_RESUBMIT_TABLE, in_port = in_port,
table = table)
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_RESUBMIT
self.in_port = None # New in_port for checking flow table
self.table = None # NXAST_RESUBMIT_TABLE: table to use
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.in_port != other.in_port: return False
if self.table != other.table: return False
return True
def _pack_body (self):
p = struct.pack('!HHB', self.subtype, self.in_port, self.table)
p += of._PAD3
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.in_port,self.table) = \
of._unpack('!HHB', raw, offset)
offset = of._skip(raw, offset, 3)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('in_port: %s\n' % (self.in_port,))
s += prefix + ('table: %s\n' % (self.table,))
return s
class nx_action_set_tunnel (of.ofp_action_vendor_base):
"""
Set a 32-bit tunnel ID
See also: nx_action_set_tunnel64
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_SET_TUNNEL
self.tun_id = None # Must set
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.tun_id != other.tun_id: return False
return True
def _pack_body (self):
p = struct.pack('!HHI', self.subtype, 0, self.tun_id)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 2)
offset,(self.tun_id,) = of._unpack('!I', raw, offset)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('tub_id: %s\n' % (self.tun_id,))
return s
class nx_action_set_tunnel64 (of.ofp_action_vendor_base):
"""
Set a 64-bit tunnel ID
See also: nx_action_set_tunnel
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_SET_TUNNEL64
self.tun_id = None # Must set
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.tun_id != other.tun_id: return False
return True
def _pack_body (self):
p = struct.pack('!HHIQ', self.subtype, 0, 0, self.tun_id)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
offset,(self.tun_id,) = of._unpack('!Q', raw, offset)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('tub_id: %s\n' % (self.tun_id,))
return s
class nx_action_fin_timeout (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_FIN_TIMEOUT
self.fin_idle_timeout = 1 # New idle timeout, if nonzero.
self.fin_hard_timeout = 1 # New hard timeout, if nonzero.
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.fin_idle_timeout != other.fin_idle_timeout: return False
if self.fin_hard_timeout != other.fin_hard_timeout: return False
return True
def _pack_body (self):
p = struct.pack('!HHH', self.subtype, self.fin_idle_timeout,
self.fin_hard_timeout)
p += of._PAD2
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.fin_idle_timeout,self.fin_hard_timeout) = \
of._unpack('!HHH', raw, offset)
offset = of._skip(raw, offset, 2)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('fin_idle_timeout: %s\n' % (self.fin_idle_timeout,))
s += prefix + ('fin_hard_timeout: %s\n' % (self.fin_hard_timeout,))
return s
class nx_action_exit (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_EXIT
def _eq (self, other):
if self.subtype != other.subtype: return False
return True
def _pack_body (self):
p = struct.pack('!H', self.subtype)
p += of._PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = \
of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
return s
class nx_action_dec_ttl (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_DEC_TTL
def _eq (self, other):
if self.subtype != other.subtype: return False
return True
def _pack_body (self):
p = struct.pack('!H', self.subtype)
p += of._PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
return s
# -----------------------------------------------------------------------
# Learn action
# -----------------------------------------------------------------------
class nx_action_learn (of.ofp_action_vendor_base):
"""
Allows table entries to add table entries
There are different ways of adding flow_mod_specs. For example, the
following are all equivalent:
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
fms = nx.flow_mod_spec.new # Just abbreviating this
learn.spec.append(fms( field=nx.NXM_OF_VLAN_TCI, n_bits=12 ))
learn.spec.append(fms( field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST ))
learn.spec.append(fms( field=nx.NXM_OF_IN_PORT, output=True ))
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
learn.spec.chain(
field=nx.NXM_OF_VLAN_TCI, n_bits=12).chain(
field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST).chain(
field=nx.NXM_OF_IN_PORT, output=True)
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
learn.spec = [
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_VLAN_TCI),
n_bits=12),
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_ETH_SRC),
dst=nx.nx_learn_dst_match(nx.NXM_OF_ETH_DST)),
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_IN_PORT),
dst=nx.nx_learn_dst_output())
]
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_LEARN
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = of.OFP_DEFAULT_PRIORITY
self.cookie = 0
self.flags = 0
self.table_id = 0
self.fin_idle_timeout = 0
self.fin_hard_timeout = 0
self.spec = flow_mod_spec_chain()
@property
def table (self):
"""
Synonym for table_id
"""
return self.table_id
@table.setter
def table (self, value):
self.table_id = value
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.cookie != other.cookie: return False
if self.flags != other.flags: return False
if self.table_id != other.table_id: return False
if self.fin_idle_timeout != other.fin_idle_timeout: return False
if self.fin_hard_timeout != other.fin_hard_timeout: return False
return True
def _pack_body (self):
p = struct.pack('!HHHHQHBBHH',
self.subtype,
self.idle_timeout,
self.hard_timeout,
self.priority,
self.cookie,
self.flags,
self.table_id,
0,
self.fin_idle_timeout,
self.fin_hard_timeout)
for fs in self.spec:
p += fs.pack()
if len(p) % 8:
p += '\x00' * (8-(len(p)%8))
return p
def _unpack_body (self, raw, offset, avail):
orig_offset = offset
offset,(self.subtype, self.idle_timeout, self.hard_timeout,
self.priority, self.cookie, self.flags, self.table_id, _,
self.fin_idle_timeout,
self.fin_hard_timeout) = of._unpack('!HHHHQHBBHH', raw, offset)
avail -= (2+2+2+2+8+2+1+1+2+2)
assert (avail & 1) == 0
while avail > 0:
newoff, fms = flow_mod_spec.unpack_new(raw, offset)
if fms is None: break
self.spec.append(fms)
avail -= (newoff - offset)
offset = newoff
length = offset - orig_offset
if length % 8:
offset = of._skip(raw, offset, 8 - (length%8))
return offset
def _show (self, prefix):
s = ''
ff = ('idle_timeout hard_timeout priority cookie flags table_id '
'fin_idle_timeout fin_hard_timeout').split()
for f in ff:
s += prefix
s += f + ": "
s += str(getattr(self, f))
s += "\n"
return s
NX_LEARN_SRC_FIELD = 0
NX_LEARN_SRC_IMMEDIATE = 1
NX_LEARN_DST_MATCH = 0
NX_LEARN_DST_LOAD = 1
NX_LEARN_DST_OUTPUT = 2
class nx_learn_spec (object):
_is_src = False
_is_dst = False
data = None
n_bits = None
value = None
def pack (self):
return self.data if self.data else b''
@classmethod
def unpack_subclass (cls, spec, n_bits, raw, offset):
"""
Returns (new_offset, object)
"""
assert cls is not nx_learn_spec, "Must call on subclass"
c = _flow_mod_spec_to_class(cls._is_src, spec)
offset,o = c.unpack_new(n_bits, raw, offset)
return offset, o
@classmethod
def unpack_new (cls, n_bits, raw, offset):
"""
Returns (new_offset, object)
"""
o = cls.__new__(cls)
o.n_bits = n_bits
datalen = len(o)
if datalen != 0:
offset,o.data = of._read(raw, offset, datalen)
return offset,o
def __len__ (self):
# Implement. Can't use .data field.
assert False, "__len__ unimplemented in " + type(self).__name__
def __repr__ (self):
return "<%s n_bits:%s>" % (type(self).__name__, self.n_bits)
class nx_learn_spec_src (nx_learn_spec):
_is_src = True
class nx_learn_spec_dst (nx_learn_spec):
_is_dst = True
class _field_and_match (object):
"""
Common functionality for src_field and dst_match
"""
def __init__ (self, field, ofs = 0, n_bits = None):
#if type(field) is type: field = field()
data = field().pack(omittable = False, header_only = True)
data += struct.pack("!H", ofs)
if n_bits is None:
n_bits = field._get_size_hint() - ofs
elif n_bits < 0:
n_bits = field._get_size_hint() - ofs - n_bits
self.n_bits = n_bits
self.data = data
@property
def ofs (self):
return struct.unpack_from("!H", self.data, 4)[0]
@property
def field (self):
t,_,_ = nxm_entry.unpack_header(self.data, 0)
c = _nxm_type_to_class.get(t)
if c is None:
attrs = {'_nxm_type':t}
attrs['_nxm_length'] = length/2 if has_mask else length
c = type('nxm_type_'+str(t), (NXM_GENERIC,), attrs)
return c
def __len__ (self):
return 6
class nx_learn_src_field (_field_and_match, nx_learn_spec_src):
value = NX_LEARN_SRC_FIELD
@property
def matching (self):
"""
Returns a corresponding nx_learn_dst_match
"""
return nx_learn_dst_match(self.field, self.ofs, self.n_bits)
class nx_learn_src_immediate (nx_learn_spec_src):
"""
An immediate value for a flow spec
Probably generally a good idea to use one of the factory methods, e.g., u8().
"""
value = NX_LEARN_SRC_IMMEDIATE
def __init__ (self, data, n_bits = None):
if n_bits is None:
assert (len(data)&1) == 0, "data needs pad; n_bits cannot be inferred"
n_bits = len(data)*8
else:
assert len(data)*8 >= n_bits, "n_bits larger than data"
self.n_bits = n_bits
self.data = data
@classmethod
def u8 (cls, dst, value):
return cls(struct.pack("!H", value))
@classmethod
def u16 (cls, dst, value):
return cls(struct.pack("!H", value))
@classmethod
def u32 (cls, dst, value):
return cls(struct.pack("!L", value))
def __len__ (self):
return ((self.n_bits+15) // 16) * 2
class nx_learn_dst_match (_field_and_match, nx_learn_spec_dst):
value = NX_LEARN_DST_MATCH
class nx_learn_dst_load (nx_learn_spec_dst):
value = NX_LEARN_DST_LOAD
def __init__ (self, field, ofs = 0, n_bits = None):
data = field().pack(omittable = False, header_only = True)
data += struct.pack("!H", ofs)
if n_bits is None:
n_bits = field._get_size_hint() - ofs
elif n_bits < 0:
n_bits = field._get_size_hint() - ofs - n_bits
self.n_bits = n_bits
self.data = data
def __len__ (self):
return ((self.n_bits+15) // 16) * 2
class nx_learn_dst_output (nx_learn_spec_dst):
value = NX_LEARN_DST_OUTPUT
def __init__ (self, dummy = True):
assert dummy is True
super(nx_learn_dst_output,self).__init__()
def __len__ (self):
return 0
def _flow_mod_spec_to_class (is_src, val):
#TODO: Use a class registry and decorator for these instead of this hack
if is_src:
d = {
NX_LEARN_SRC_FIELD: nx_learn_src_field,
NX_LEARN_SRC_IMMEDIATE: nx_learn_src_immediate,
}
else:
d = {
NX_LEARN_DST_MATCH: nx_learn_dst_match,
NX_LEARN_DST_LOAD: nx_learn_dst_load,
NX_LEARN_DST_OUTPUT: nx_learn_dst_output,
}
return d.get(val)
class flow_mod_spec_chain (list):
def chain (self, *args, **kw):
self.append(flow_mod_spec.new(*args,**kw))
return self
#class _meta_fms (type):
# @property
# def chain (self):
# return _flow_mod_spec_chain()
class flow_mod_spec (object):
# __metaclass__ = _meta_fms
@classmethod
def create (cls, src, dst = None, n_bits = None):
#TODO: Remove me
return cls(src, dst, n_bits)
def __init__ (self, src, dst = None, n_bits = None):
assert src._is_src
if dst is None:
# Assume same as src
assert type(src) == nx_learn_src_field
dst = src.matching
assert dst._is_dst
#TODO: Check whether there's enough space in dst
# (This will require figuring out what the right length for output is...
# 16 bits?)
if n_bits is None:
n_bits = src.n_bits
if n_bits is None:
n_bits = dst.n_bits
else:
if dst.n_bits is not None and dst.n_bits > n_bits:
raise RuntimeError("dst n_bits greater than source n_bits "
"(%s and %s); cannot infer" % (n_bits,dst.n_bits))
if n_bits is None:
raise RuntimeError("cannot infer n_bits")
#o = cls.__new__(cls)
#o.src = src
#o.dst = dst
#o.n_bits = n_bits
#return o
#return cls(src, dst, n_bits)
self.src = src
self.dst = dst
self.n_bits = n_bits
def __repr__ (self):
return "%s(src=%s, dst=%s, n_bits=%s)" % (
type(self).__name__, self.src, self.dst, self.n_bits)
# @staticmethod
# def chain ():
# return _flow_mod_spec_chain()
@classmethod
def new (cls, src=None, dst=None, **kw):
if src is not None: kw['src'] = src
if dst is not None: kw['dst'] = dst
src = None
dst = None
srcarg = ()
dstarg = ()
srckw = {}
dstkw = {}
src_inst = None
dst_inst = None
n_bits = None
for k,v in kw.iteritems():
# This is handy, though there's potentially future ambiguity
s = globals().get('nx_learn_' + k)
if not s:
s = globals().get('nx_learn_src_' + k)
if not s:
s = globals().get('nx_learn_dst_' + k)
if not s:
if k.startswith("src_"):
srckw[k[4:]] = v
elif k.startswith("dst_"):
dstkw[k[4:]] = v
elif k == "src":
assert isinstance(v, nx_learn_spec_src)
src_inst = v
elif k == "dst":
assert isinstance(v, nx_learn_spec_dst)
dst_inst = v
elif k == "n_bits":
n_bits = v
else:
raise RuntimeError("Don't know what to do with '%s'", (k,))
continue
if s._is_src:
assert src is None, "src already set"
src = s
srcarg = (v,)
if s._is_dst:
assert dst is None, "dst already set"
dst = s
dstarg = (v,)
if src_inst:
assert src is None, "can't set src and a spec type"
assert len(srckw) == 0, "can't set src params with src instance"
else:
assert src is not None, "no src set"
src_inst = src(*srcarg,**srckw)
if dst_inst:
assert dst is None, "can't set dst and a spec type"
assert len(dstkw) == 0, "can't set dst params with dst instance"
else:
if dst is not None: dst_inst = dst(*dstarg,**dstkw)
return cls.create(src_inst, dst_inst, n_bits)
chain = new
#def __init__ (self, src=None, dst=None, n_bits=0):
# self.src = src
# self.dst = dst
# self.n_bits = n_bits
def pack (self):
assert isinstance(self.src, nx_learn_spec_src),str(self.src)
assert isinstance(self.dst, nx_learn_spec_dst),str(self.dst)
assert self.n_bits < 1024
v = self.src.value << 13 | self.dst.value << 11 | self.n_bits
p = struct.pack("!H", v)
p += self.src.pack() + self.dst.pack()
return p
@classmethod
def unpack_new (cls, raw, offset = 0):
"""
May return a None object if it's padding
"""
offset,(v,) = of._unpack("!H", raw, offset)
if v == 0:
# Special case for padding
return offset, None
n_bits = v & 1023
offset,src = nx_learn_spec_src.unpack_subclass((v >> 13) & 1,
n_bits, raw, offset)
offset,dst = nx_learn_spec_dst.unpack_subclass((v >> 11) & 3,
n_bits, raw, offset)
return offset, cls(src, dst, n_bits)
# -----------------------------------------------------------------------
# NXM support
# -----------------------------------------------------------------------
#def conv (n, s):
# if s == 0: return b''
# nn = struct.pack("B", n & 0xff)
# n >>= 8
# return conv(n, s - 1) + nn
class _nxm_raw (object):
def _pack_value (self, v):
return v
def _unpack_value (self, v):
return v
class _nxm_numeric (object):
_size_table = [None, "!B", "!H", None, "!L", None, None, None, "!Q"]
def _pack_value (self, v):
size = self._size_table[self._nxm_length]
return struct.pack(size, v)
def _unpack_value (self, v):
try:
size = self._size_table[self._nxm_length]
return struct.unpack(size, v)[0]
except:
raise RuntimeError("Can't unpack %i bytes for %s"
% (self._nxm_length, self.__class__.__name__))
class _nxm_ip (object):
"""
Allows setting of IP address in many formats
The value can be any format known by IPAddr. If it's a string, it can
also have a trailing /netmask or /cidr-bits. If it's a tuple, the
first is assumed to be any kind of IP address and the second is either
a netmask or the number of network bits.
"""
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
if isinstance(value, tuple) or isinstance(value, list):
assert len(value) == 2
ip = value[0]
self.mask = value[1]
#if isinstance(mask, (int,long)):
# self.mask = mask
elif isinstance(value, basestring) and len(value)>4 and '/' in value:
temp = parse_cidr(value, infer=False)
ip = temp[0]
self.mask = 32 if temp[1] is None else temp[1]
else:
ip = value
self._value = self._pack_value(ip)
def _pack_value (self, v):
return IPAddr(v, networkOrder=False).toRaw()
def _unpack_value (self, v):
return IPAddr(v, networkOrder=True)
def _pack_mask (self, v):
if isinstance(v, (int, long)):
# Assume CIDR
if v > 32: v = 32
elif v < 0: v = 0
n = (0xffFFffFF << (32-v)) & 0xffFFffFF
return IPAddr(n, networkOrder=False).toRaw()
else:
return IPAddr(v).toRaw()
#def _unpack_mask (self, v):
# # Special unpacking for CIDR-style?
class _nxm_ipv6 (object):
"""
Placeholder until we have real IPv6 support
Allows setting of IP address in many formats
The value can be any format known by IPAddr. If it's a string, it can
also have a trailing /netmask or /cidr-bits. If it's a tuple, the
first is assumed to be any kind of IP address and the second is either
a netmask or the number of network bits.
"""
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
if isinstance(value, tuple) or isinstance(value, list):
assert len(value) == 2
ip = value[0]
self.mask = value[1]
elif isinstance(value, (unicode,str)):
ip,mask = IPAddr6.parse_cidr(value, allow_host = True)
#self.mask = 128 if mask is None else mask
self.mask = mask
else:
ip = value
self._value = self._pack_value(ip)
def _pack_value (self, v):
return IPAddr6(v).raw
def _unpack_value (self, v):
return IPAddr6(v, raw=True)
def _pack_mask (self, v):
if isinstance(v, (int,long)):
# Assume CIDR
if v > 128: v = 128
elif v < 0: v = 0
n = (((1<<128)-1) << (128-v)) & ((1<<128)-1)
return IPAddr6.from_num(n).raw
else:
return IPAddr6(v).raw
# def _unpack_mask (self, v):
# # Special unpacking for CIDR-style?
class _nxm_ether (object):
def _pack_value (self, v):
return EthAddr(v).toRaw()
def _unpack_value (self, v):
return EthAddr(v)
_nxm_type_to_class = {}
_nxm_name_to_type = {}
class nxm_entry (object):
#_nxm_type = _make_type(0x, )
#_nxm_length = # bytes of data not including mask (double for mask)
_size_hint = None
_force_mask = False
#TODO: make mask-omittable a class-level attribute?
@classmethod
def _get_size_hint (self):
"""
Number of significant bits
"""
if self._size_hint is None:
return self._nxm_length * 8
return self._size_hint
@property
def nxm_vendor (self):
return self._nxm_type >> 7
@property
def nxm_field (self):
return self._nxm_type & 0x7f
@staticmethod
def unpack_header (raw, offset):
"""
Parses the NXM_HEADER
Returns (type,has_mask,length)
"""
h, = struct.unpack_from("!L", raw, offset)
offset += 4
t = h >> 9
has_mask = (h & (1<<8)) != 0
length = h & 0x7f
return t,has_mask,length
@staticmethod
def unpack_new (raw, offset):
t,has_mask,length = nxm_entry.unpack_header(raw, offset)
offset += 4
offset,data = of._read(raw, offset, length)
mask = None
if has_mask:
assert not (length & 1), "Odd length with mask"
mask = data[length/2:]
data = data[:length/2]
#NOTE: Should use _class_for_nxm_header?
c = _nxm_type_to_class.get(t)
if c is None:
#TODO: Refactor with learn spec field property?
e = NXM_GENERIC()
e._nxm_length = length
if has_mask:
e._nxm_length /= 2
e._nxm_type = t
# Alternate approach: Generate new subclass. To do: cache gen'd types?
#attrs = {'_nxm_type':t}
#attrs['_nxm_length'] = length/2 if has_mask else length
#c = type('nxm_type_'+str(t), (NXM_GENERIC,), attrs)
#e = c()
else:
e = c()
assert data is not None
assert len(data) == e._nxm_length, "%s != %s" % (len(data), e._nxm_length)
assert mask is None or len(mask) == e._nxm_length
e._value = data
e._mask = mask
if mask is not None:
e._force_mask = True
return offset, e
def clone (self):
n = self.__class__()
n._nxm_type = self._nxm_type
n._nxm_length = self._nxm_length
n._force_mask = self._force_mask
n.mask = self.mask
n.value = self.value
return n
def __init__ (self, value = None, mask = None):
super(nxm_entry, self).__init__()
self._value = None
self._mask = None
if value is None and mask is None: return # Sloppy
self.mask = mask
self.value = value # In case value overrides mask (IP), do value last
def get_length (self, omittable = False):
# Calculating length is slightly tricky with mask omission, etc.,
# so just pack it and find out, rather than duplicate the logic
# here.
return len(self.pack(omittable))
def __len__ (self):
return self.get_length()
def _unpack_mask (self, m):
return self._unpack_value(m)
def _pack_mask (self, m):
return self._pack_value(m)
@property
def is_reg (self):
return False
@property
def allow_mask (self):
return False
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
self._value = self._pack_value(value)
@property
def mask (self):
if self._mask is None: return None
return self._unpack_mask(self._mask)
@mask.setter
def mask (self, value):
if self.allow_mask is False:
if value is not None:
raise RuntimeError("entry has no mask")
if value is None:
# This would normally be up to the pack function, but we add it
# here as a special case
self._mask = None
else:
self._mask = self._pack_mask(value)
def __eq__ (self, other):
if type(self) != type(other): return False
if self._nxm_type != other._nxm_type: return False
if self.value != other.value: return False
if self.mask != other.mask: return False
if self.is_reg != other.is_reg: return False
return True
def pack (self, omittable = False, header_only = False):
h = self._nxm_type << 9
mask = self._mask
if mask is not None:
assert len(mask) == self._nxm_length, "mask is wrong length"
if (mask.count("\x00") == self._nxm_length) and omittable:
return b''
if (mask.count("\xff") == self._nxm_length):
mask = None
if mask is None and self._force_mask:
mask = "\xff" * self._nxm_length
if mask is not None:
h |= (1 << 8)
h |= (self._nxm_length * 2)
else:
h |= self._nxm_length
r = struct.pack("!L", h)
if header_only: return r
value = self._value
assert value is not None
assert len(value) == self._nxm_length, "value is wrong length"
r += value
if mask is not None:
assert 0 == sum(ord(v)&(0xff&~ord(m)) for v,m in zip(value,mask)), \
"nonzero masked bits"
r += mask
return r
def __str__ (self):
r = self.__class__.__name__ + "(" + str(self.value)
if self.mask is not None:
if self.mask.raw != ("\xff" * self._nxm_length):
r += "/" + str(self.mask)
#if self.is_reg: r += "[r]"
return r + ")"
def __repr__ (self):
return str(self)
class _nxm_numeric_entry (_nxm_numeric, nxm_entry):
pass
class _nxm_maskable (object):
@property
def allow_mask (self):
return True
class _nxm_maskable_numeric_entry (_nxm_maskable, _nxm_numeric_entry):
pass
class _nxm_reg (_nxm_maskable_numeric_entry):
@property
def is_reg (self):
return True
class NXM_GENERIC (_nxm_raw, nxm_entry):
@property
def allow_mask (self):
return True
def __str__ (self):
r = "NXM_%08x_%i" % (self.nxm_vendor, self.nxm_field)
r += "("
r += "".join("%02x" % (ord(x),) for x in self.value)
#+ repr(self.value)
if self.mask is not None:
if self.mask != ("\xff" * self._nxm_length):
r += "/" + repr(self.mask)
return r + ")"
def _make_type (vendor, field):
"""
Takes an NXM vendor and field and returns the whole type field
"""
return (vendor << 7) | field
def _fix_types (t):
"""
Helper for _make_nxm(_w)
Normalizes lists of superclasses
"""
try:
_ = t[0]
t = list(t)
except:
t = [t]
ok = False
for tt in t:
if _issubclass(tt, nxm_entry):
ok = True
break
if not ok:
t.append(nxm_entry)
#t = tuple(t)
return t
def _make_nxm (__name, __vendor, __field, __len = None, type = None,
**kw):
"""
Make a simple NXM entry class
"""
if type is None:
type = (_nxm_numeric_entry,)
else:
type = _fix_types(type)
t = _make_type(__vendor, __field)
kw['_nxm_type'] = t
if __len is not None: kw['_nxm_length'] = __len
import __builtin__
typ = __builtin__.type
c = typ(__name, tuple(type), kw)
_nxm_type_to_class[t] = c
_nxm_name_to_type[__name] = t
assert __name not in globals()
globals()[__name] = c
return c
def _make_nxm_w (*args, **kw):
"""
Make a simple wildcarded NXM entry class
"""
t = _fix_types(kw.pop('type', _nxm_maskable_numeric_entry))
ok = False
for tt in t:
if _issubclass(tt, _nxm_maskable):
ok = True
break
if not ok:
t.insert(0, _nxm_maskable)
return _make_nxm(*args, type=t, **kw)
def _class_for_nxm_header (raw):
"""
Given a raw nxm_entry header, return corresponding class
If we don't have a class for this header type, we generate one.
"""
t,has_mask,length = nxm_entry.unpack_header(raw, 0)
c = _nxm_type_to_class.get(t)
if c: return c
# Need to generate a new nxm_entry type.
# This code is totally untested.
vendor = (t >> 7) & 0xffff
field = t & 0x7f
typename = "NXM_UNKNOWN_"
typename += "%04x_%02x" % (vendor,field)
if has_mask: typename += "_MASKABLE"
types = [_nxm_raw]
if has_mask:
types.append(_nxm_maskable)
return _make_nxm(typename, vendor, field, length, types)
# -----------------------------------------------------------------------
# OpenFlow 1.0-compatible nxm_entries
# -----------------------------------------------------------------------
_make_nxm("NXM_OF_IN_PORT", 0, 0, 2)
_make_nxm_w("NXM_OF_ETH_DST", 0, 1, 6, type=_nxm_ether)
_make_nxm_w("NXM_OF_ETH_SRC", 0, 2, 6, type=_nxm_ether)
# Packet ethertype
_make_nxm("NXM_OF_ETH_TYPE", 0, 3, 2)
_make_nxm_w("NXM_OF_VLAN_TCI", 0, 4, 2)
_make_nxm_w("NXM_OF_IP_TOS", 0, 5, 1)
_make_nxm_w("NXM_OF_IP_PROTO", 0, 6, 1)
_make_nxm_w("NXM_OF_IP_SRC", 0, 7, 4, type=_nxm_ip)
_make_nxm_w("NXM_OF_IP_DST", 0, 8, 4, type=_nxm_ip)
# Maskable in OVS 1.6+
_make_nxm_w("NXM_OF_TCP_SRC", 0, 9, 2)
_make_nxm_w("NXM_OF_TCP_DST", 0, 10, 2)
# Maskable in OVS 1.6+
_make_nxm_w("NXM_OF_UDP_SRC", 0, 11, 2)
_make_nxm_w("NXM_OF_UDP_DST", 0, 12, 2)
_make_nxm("NXM_OF_ICMP_TYPE", 0, 13, 1)
_make_nxm("NXM_OF_ICMP_CODE", 0, 14, 1)
_make_nxm("NXM_OF_ARP_OP", 0, 15, 2)
# The IP address in an ethernet+IP ARP packet
# Fully maskable in OVS 1.8+, only CIDR-compatible masks before that
_make_nxm_w("NXM_OF_ARP_SPA", 0, 16, 4, type=_nxm_ip)
_make_nxm_w("NXM_OF_ARP_TPA", 0, 17, 4, type=_nxm_ip)
# -----------------------------------------------------------------------
# Nicira register nxm_entries
# -----------------------------------------------------------------------
NXM_NX_MAX_REGS = 16
# Array with all the register entries indexed by their number
# (they are also available as NXM_NX_REG0, etc.)
NXM_NX_REG = []
def _init_regs ():
for i in range(0, NXM_NX_MAX_REGS):
assert len(NXM_NX_REG) == i
n = "NXM_NX_REG" + str(i)
r = _make_nxm_w(n, 1, i, 4, type=_nxm_reg)
NXM_NX_REG.append(r)
globals()[n] = r
_init_regs()
def NXM_IS_NX_REG (o):
"""
Simulates macro from OVS
"""
return o.is_reg
# -----------------------------------------------------------------------
# Nicira nxm_entries
# -----------------------------------------------------------------------
# Tunnel properties
_make_nxm_w("NXM_NX_TUN_ID", 1, 16, 8)
_make_nxm_w("NXM_NX_TUN_IPV4_SRC", 1, 31, 4, type=_nxm_ip)
_make_nxm_w("NXM_NX_TUN_IPV4_DST", 1, 32, 4, type=_nxm_ip)
# The ethernet address in an ethernet+IP ARP packet
_make_nxm("NXM_NX_ARP_SHA", 1, 17, 6, type=_nxm_ether)
_make_nxm("NXM_NX_ARP_THA", 1, 18, 6, type=_nxm_ether)
# Fully maskable in OVS 1.8+, only CIDR-compatible masks before that
_make_nxm_w("NXM_NX_IPV6_SRC", 1, 19, 16, type=_nxm_ipv6)
_make_nxm_w("NXM_NX_IPV6_DST", 1, 20, 16, type=_nxm_ipv6)
_make_nxm("NXM_NX_ICMPV6_TYPE", 1, 21, 1)
_make_nxm("NXM_NX_ICMPV6_CODE", 1, 22, 1)
# IPv6 Neighbor Discovery target address
_make_nxm_w("NXM_NX_ND_TARGET", 1, 23, 16, type=_nxm_ipv6)
# IPv6 Neighbor Discovery source link-layer address
_make_nxm("NXM_NX_ND_SLL", 1, 24, 6, type=_nxm_ether)
# IPv6 Neighbor Discovery target link-layer address
_make_nxm("NXM_NX_ND_TLL", 1, 25, 6, type=_nxm_ether)
# Bits for NXM_NX_IP_FRAG
NX_IP_FRAG_ANY = 1 # It's the first/only fragment
NX_IP_FRAG_LATER = 3 # It's not the first fragment
# IP fragment information
#TODO: A custom type or types would make this nicer to use.
# For now, use with above flags.
_make_nxm_w("NXM_NX_IP_FRAG", 1, 26, 1)
# IPv6 flow label
_make_nxm("NXM_NX_IPV6_LABEL", 1, 27, 4)
# IP ECN bits
_make_nxm("NXM_NX_IP_ECN", 1, 28, 1)
_make_nxm("NXM_NX_IP_TTL", 1, 29, 1)
# Flow cookie
_make_nxm_w("NXM_NX_COOKIE", 1, 30, 8)
# MPLS label, traffic class, and bottom-of-stack flag
# Note that these are from OpenFlow 1.2 and I think BOS is from 1.3,
# so technically these don't belong here. They do work with OVS through
# NXM match and flow mod, though.
_make_nxm("OXM_OF_MPLS_LABEL", 0x8000, 34, 4, _size_hint=20)
_make_nxm("OXM_OF_MPLS_TC", 0x8000, 35, 1, _size_hint=3)
_make_nxm("OXM_OF_MPLS_BOS", 0x8000, 36, 1, _size_hint=1)
#@vendor_s_message('NXT_SET_ASYNC_CONFIG', 19)
class nx_async_config (nicira_base):
subtype = NXT_SET_ASYNC_CONFIG
_MIN_LENGTH = 40
def _init (self, kw):
# For master or other role
self.packet_in_mask = 0
self.port_status_mask = 0
self.flow_removed_mask = 0
# For slave role
self.packet_in_mask_slave = 0
self.port_status_mask_slave = 0
self.flow_removed_mask_slave = 0
def set_packet_in (self, bit, master=True, slave=True):
if master: self.packet_in_mask |= bit
if slave: self.packet_in_mask_slave |= bit
def set_port_status (self, bit, master=True, slave=True):
if master: self.port_status_mask |= bit
if slave: self.port_status_mask_slave |= bit
def set_flow_removed (self, bit, master=True, slave=True):
if master: selfflow_removed_mask |= bit
if slave: self.flow_removed_mask_slave |= bit
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
for a in "packet_in port_status flow_removed".split():
a += "_mask"
if getattr(self, a) != getattr(other, a): return False
a += "_slave"
if getattr(self, a) != getattr(other, a): return False
return True
def _pack_body (self):
return struct.pack("!IIIIII",
self.packet_in_mask, self.packet_in_mask_slave,
self.port_status_mask, self.port_status_mask_slave,
self.flow_removed_mask, self.flow_removed_mask_slave)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,tmp = of._unpack("!IIIIII", raw, offset)
self.packet_in_mask = tmp[0]
self.packet_in_mask_slave = tmp[1]
self.port_status_mask = tmp[2]
self.port_status_mask_slave = tmp[3]
self.flow_removed_mask = tmp[4]
self.flow_removed_mask_slave = tmp[5]
return offset
#@vendor_s_message('NXT_PACKET_IN', 17)
class nxt_packet_in (nicira_base, of.ofp_packet_in):
subtype = NXT_PACKET_IN
_MIN_LENGTH = 34
def _init (self, kw):
ofp_header.__init__(self)
self._buffer_id = None
self.reason = 0
self.data = None
self._total_len = None
self._match = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
def _validate (self):
if self.data and (self.total_len < len(self.packed_data)):
return "total len less than data len"
@property
def in_port (self):
return self.match.of_in_port
@property
def match (self):
if self._match is None:
self._match = nx_match()
return self._match
@match.setter
def match (self, v):
self._match = v
def pack (self):
assert self._assert()
match_len = len(self.match)
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LL", NX_VENDOR_ID, self.subtype)
packed += struct.pack("!LHBBQH", self._buffer_id, self.total_len,
self.reason, self.table_id, self.cookie,
match_len)
packed += _PAD6
packed += match.pack()
packed += _PAD * ((match_len + 7)/8*8 - match_len)
packed += _PAD2
packed += self.packed_data
return packed
@property
def packed_data (self):
if self.data is None:
return b''
if hasattr(self.data, 'pack'):
# I don't think this is ever encountered...
return self.data.pack()
else:
return self.data
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(vendor,subtype) = _unpack("!LL", raw, offset)
assert subtype == self.subtype
#print "vendor %08x subtype %i" % (vendor,subtype)
offset,(self._buffer_id, self._total_len, self.reason, self.table_id,
self.cookie, match_len) = _unpack("!LHBBQH", raw, offset)
offset = _skip(raw, offset, 6)
self.match = None
offset = self.match.unpack(raw, offset, match_len)
offset = _skip(raw, offset, (match_len + 7)//8*8 - match_len)
offset = _skip(raw, offset, 2)
offset,self.data = _read(raw, offset, length-(offset-_offset))
assert length == len(self)
return offset,length
def __len__ (self):
match_len = len(self.match)
l = 8 + 4 + 4
l += 4 + 2 + 1 + 1 + 8 + 2
l += 6
l += match_len
l += (match_len + 7)//8*8 - match_len
l += 2
l += len(self.packed_data)
return l
def __eq__ (self, other):
if not of.ofp_packet_in.__eq__(self, other): return False
if self.table_id != other.table_id: return False
if self.cookie != other.cookie: return False
if self.match != other.match: return False
return True
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: ' + str(self.match) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
#from pox.lib.util import hexdump
#outstr += prefix + 'data: ' + hexdump(self.data) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
return outstr
def field (self, t):
for i in self.match:
if type(i) == t:
return i
return None
class nx_match (object):
"""
A flexible match container
This has some magic. It acts as if it has properties for each
registered nxm_entry type. For example, there's a NXM_OF_IP_SRC
nxm_entry type for the source IP address, so you can do:
m = nx_match()
m.of_ip_src = IPAddr("192.168.1.1")
Since nxm_entries can have masks, you actually get a number of pseudo-
properties, by appending "_mask", "_with_mask", or "_entry":
m.of_ip_src_with_mask = ("192.168.1.0", "255.255.255.0")
# or...
m.of_ip_src = "192.168.1.0"
m.of_ip_src_mask = "255.255.255.0"
# or...
m.of_ip_src_entry = NXM_OF_IP_SRC("192.168.1.1", "255.255.255.0")
nxm_entries themselves may have magic. For example, IP address
nxm_entries understand CIDR bits as part of the value, so you can do:
m.of_ip_src = "192.168.1.0/24"
print m.of_ip_src
> NXM_OF_IP_SRC(192.168.1.0/255.255.255.0)
*The order you add entries is significant*. If you have an entry
with a prerequisite, you must add the prerequisite first. It would be
really nice if nx_match could automatically adjust orderings to try to
satisfy nxm_entry prerequisties, and throw an exception if it's not
possible. This is a TODO item.
"""
#TODO: Test!
#TODO: Handle prerequisites (as described above)
_locked = False # When True, can't add new attributes
def __init__ (self, *parts, **kw):
"""
Initialize this match
You can initialize either from a list of parts or from a bunch of
key/value pairs which are just like a shortcut for setting individual
properties.
"""
self._parts = list(parts)
self._dirty()
for k,v in kw:
setattr(self, k, v)
self._locked = True
def unpack (self, raw, offset, avail):
del self._parts[:]
self._dirty()
stop = avail+offset
while offset < stop:
_o = offset
offset,entry = nxm_entry.unpack_new(raw, offset)
if offset == _o:
raise RuntimeError("No progress unpacking nxm_entries")
self._parts.append(entry)
#assert offset == stop
return offset
def pack (self, omittable = False):
return ''.join(x.pack(omittable) for x in self._parts)
def __eq__ (self, other):
if not isinstance(other, self.__class__): return False
return self._parts == other.__parts
def clone (self):
n = nx_match()
for p in self._parts:
n.append(p.clone())
return n
def __str__ (self):
return ','.join(str(m) for m in self._parts)
def show (self, prefix = ''):
return prefix + str(self)
@property
def _map (self):
if self._cache is None:
self._cache = {}
for i in self._parts:
assert i._nxm_type not in self._cache
self._cache[i._nxm_type] = i
return self._cache
def __len__ (self):
return sum(len(x) for x in self._parts)
def __getitem__ (self, index):
return self._parts[index]
def remove (self, t):
"""
Remove an entry
"""
if isinstance(t, nxm_entry):
t = t._nxm_type
if t not in self._map:
return
t = self._map[t]
self._parts.remove(t)
self._dirty()
def find (self, t):
"""
Returns nxm_entry of given type
"""
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
t = t._nxm_type
return self._map.get(t)
def index (self, t):
"""
Returns index of nxm_entry of given type
"""
if isinstance(t, nxm_entry):
t = t._nxm_type
if t not in self._map:
return -1 # Exception? None?
return self._parts.find(t)
def _dirty (self):
self._cache = None
def insert (self, position, item):
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
position = self.find(position)
if position == None:
self.append(item)
return
self._parts.insert(position, item)
def insert_after (self, position, item):
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
position = self.find(position)
if position == None:
self.append(item)
return
self._parts.insert(position+1, item)
def append (self, item):
"""
Add another nxm_entry to this match
"""
#TODO: check prereqs
if not isinstance(item, nxm_entry):
raise ValueError("Not an nxm_entry")
if self.find(item) is not None:
raise ValueError("Type already exists in this match")
self._parts.append(item)
self._dirty()
def __iadd__ (self, other):
self.append(other)
@staticmethod
def _fixname (name):
name = name.upper()
is_mask = with_mask = is_entry = False
if name.endswith("_MASK"):
if name.endswith("_WITH_MASK"):
with_mask = True
name = name.rsplit("_WITH_MASK", 1)[0]
else:
is_mask = True
name = name.rsplit("_MASK", 1)[0]
elif name.endswith("_ENTRY"):
name = name.rsplit("_ENTRY", 1)[0]
is_entry = True
n = name
for prefix in ('', 'NXM_', 'NXM_OF_', 'OXM_', 'OXM_OF_', 'NXM_NX_'):
nxt = _nxm_name_to_type.get(prefix + n)
if nxt is not None: break
#print n, nxt, is_mask, with_mask, is_entry
return n, nxt, is_mask, with_mask, is_entry
def __getattr__ (self, name):
name,nxt,is_mask,with_mask,is_entry = self._fixname(name)
if nxt is None:
raise AttributeError("No attribute " + name)
if nxt not in self._map:
if with_mask: return None,None
if is_mask: return None # Exception?
if is_entry: return None # Synthesize?
return None
v = self._map[nxt]
if with_mask: return (v.value,v.mask)
if is_mask: return v.mask
if is_entry: return v
return v.value
def __setattr__ (self, name, value):
if name.startswith('_'):
return object.__setattr__(self, name, value)
n,nxt,is_mask,with_mask,is_entry = self._fixname(name)
if nxt is None:
if self._locked:
raise AttributeError("No attribute " + name)
return object.__setattr__(self, name, value)
entry = self.find(nxt)
if is_entry: assert isinstance(value, nxm_entry)
if is_entry and (value is None) and (entry is not None):
# Shortcut entry removal
# Allow for non is_entry? Doing so is ambiguous if there are
# ever nxm_entries with None as a legal value.
self.remove(nxt)
return
if isinstance(value, nxm_entry):
if nxt != nxm_entry._nxm_type:
raise ValueError("Unmatched types")
if entry is None:
self.append(value)
else:
# hacky
entry.value = value.value
entry.mask = value.mask
else:
if entry is None:
entry = _nxm_type_to_class[nxt]()
self.append(entry)
# hacky
if with_mask:
entry.mask = value[1]
entry.value = value[0]
elif is_mask:
entry.mask = value
else:
entry.value = value
#from pox.lib.revent import Event
#class NXPacketIn (Event):
# def __init__ (self, connection, ofp):
# Event.__init__(self)
# self.connection = connection
# self.ofp = ofp
# self.port = ofp.in_port
# self.data = ofp.data
# self._parsed = None
# self.dpid = connection.dpid
#
# def parse (self):
# if self._parsed is None:
# self._parsed = ethernet(self.data)
# return self._parsed
#
# @property
# def parsed (self):
# """
# The packet as parsed by pox.lib.packet
# """
# return self.parse()
#
#core.openflow._eventMixin_events.add(NXPacketIn)
_old_unpacker = None
def _unpack_nx_vendor (raw, offset):
from pox.lib.util import hexdump
v = _unpack("!L", raw, offset + 8)[1][0]
if v != NX_VENDOR_ID:
return _old_unpacker(raw, offset)
subtype = _unpack("!L", raw, offset+8+4)[1][0]
if subtype == NXT_PACKET_IN:
npi = nxt_packet_in()
return npi.unpack(raw, offset)[0], npi
elif subtype == NXT_ROLE_REPLY:
nrr = nxt_role_reply()
return nrr.unpack(raw, offset)[0], nrr
else:
print "NO UNPACKER FOR",subtype
return _old_unpacker(raw, offset)
def _init_unpacker ():
global _old_unpacker
from pox.openflow.of_01 import unpackers
_old_unpacker = unpackers[of.OFPT_VENDOR]
unpackers[of.OFPT_VENDOR] = _unpack_nx_vendor
_old_handler = None
from pox.openflow import PacketIn
def _handle_VENDOR (con, msg):
if isinstance(msg, nxt_packet_in) and core.NX.convert_packet_in:
e = con.ofnexus.raiseEventNoErrors(PacketIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PacketIn, con, msg)
# elif isinstance(msg, nxt_role_reply):
# pass
# #TODO
else:
_old_handler(con, msg)
def _init_handler ():
global _old_handler
from pox.openflow.of_01 import handlerMap, _set_handlers
_old_handler = handlerMap.get(of.OFPT_VENDOR)
handlerMap[of.OFPT_VENDOR] = _handle_VENDOR
_set_handlers()
class NX (object):
"""
Nicira extension component
"""
convert_packet_in = False
def launch (convert_packet_in = False):
_init_handler()
_init_unpacker()
nx = NX()
if convert_packet_in:
nx.convert_packet_in = True
core.register("NX", nx)
| {
"content_hash": "4ea332c9e31f5113a81d5e80aa8fcca1",
"timestamp": "",
"source": "github",
"line_count": 2594,
"max_line_length": 79,
"avg_line_length": 27.116422513492676,
"alnum_prop": 0.5907307364230878,
"repo_name": "routeflow/AutomaticConfigurationRouteFlow",
"id": "af1f63272e73736892e9f2bf091274eacd5b4381",
"size": "71003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "POX_CONTROLLER/pox/openflow/nicira.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17883"
},
{
"name": "C++",
"bytes": "150794"
},
{
"name": "CSS",
"bytes": "4351"
},
{
"name": "Java",
"bytes": "472555"
},
{
"name": "JavaScript",
"bytes": "517476"
},
{
"name": "Perl",
"bytes": "3871"
},
{
"name": "Python",
"bytes": "2642731"
},
{
"name": "Shell",
"bytes": "43123"
}
],
"symlink_target": ""
} |
import os
import pickle
import random
import item, player, monster, cave
class Game(object):
def __init__(self):
self.caves = self.create_caves()
cave1 = self.caves[0]
sword = item.Item("sword", "A pointy sword.", cave1)
coin = item.Item("coin", "A shiny gold coin. "
"Your first piece of treasure!", cave1)
#orc = monster.Monster(self, self.caves[1],
# 'orc', 'A generic dungeon monster')
#self.player = player.Player(cave1)
self.players = []
self.player_store = {}
self.start_loc = cave1
self.load_players()
def do_input(self):
get_input_from = [thing for cave in self.caves
for thing in cave.here
if 'get_input' in dir(thing)]
for thing in get_input_from:
thing.events = []
thing.input = thing.get_input()
#if thing.name == "Player":
# print str(thing) + " input is: " + thing.input
def do_update(self):
things_to_update = [thing for cave in self.caves
for thing in cave.here
if 'update' in dir(thing)]
for thing in things_to_update:
thing.update()
#if thing.name == "Player":
# print str(thing) + " result is: " + '\n'.join(thing.result)
def send_results(self):
"""Send results of actions to players"""
things_to_update = [thing for cave in self.caves
for thing in cave.here
if 'send_results' in dir(thing)]
for thing in things_to_update:
#if thing.name == "Player":
# print str(thing) + " result is: " + '\n'.join(thing.result)
# print "-----"
thing.send_results()
def close_dead_connections(self):
dead_players = [thing for cave in self.caves
for thing in cave.here
if 'connection' in dir(thing) and not thing.playing]
for player in dead_players:
player.connection.transport.loseConnection()
cave_names = [
"Arched cavern",
"Twisty passages",
"Dripping cave",
"Dusty crawlspace",
"Underground lake",
"Black pit",
"Fallen cave",
"Shallow pool",
"Icy underground river",
"Sandy hollow",
"Old firepit",
"Tree root cave",
"Narrow ledge",
"Winding steps",
"Echoing chamber",
"Musty cave",
"Gloomy cave",
"Low ceilinged cave",
"Wumpus lair",
"Spooky Chasm",
]
def create_caves(self):
random.shuffle(self.cave_names)
caves = [cave.Cave(self.cave_names[0])]
for name in self.cave_names[1:]:
new_cave = cave.Cave(name)
eligible_caves = [each_cave for each_cave in caves
if each_cave.can_tunnel_to()]
old_cave = random.choice(eligible_caves)
directions = [direction for direction, each_cave
in old_cave.tunnels.items()
if each_cave is None]
direction = random.choice(directions)
old_cave.tunnel_to(direction, new_cave)
caves.append(new_cave)
return caves
def run(self):
print "\n".join(self.player.location.look(player, ''))
while self.player.playing:
self.do_input()
self.do_update()
print "\n".join(self.player.events)
print "\n".join(self.player.result)
ignored = raw_input("Hit enter to continue.")
def run_one_tick(self):
"""Intended to be run once per second.
Pull the next action from each monster/player and go!"""
self.do_input()
self.do_update()
self.send_results()
self.close_dead_connections()
def save(self):
"""find all players in game, update their
status, and save everyone to the file"""
for player in self.players:
self.player_store[player.name] = player.save()
print "Saving:", self.player_store
save_file = open('players.pickle', 'wb')
pickle.dump(self.player_store, save_file)
def load_players(self):
if not os.access('players.pickle', os.F_OK) == 1:
return
load_file = open('players.pickle', 'rb')
self.player_store = pickle.load(load_file)
if __name__ == '__main__':
game = Game()
game.run()
"""
look self.look
inv self.inv
go north self.location.go
north self.location.go
look sword sword.look
get sword sword.get
kill orc orc.kill
"""
| {
"content_hash": "efd263eae0064598f6d4aeb6b2e826bd",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 76,
"avg_line_length": 32.57823129251701,
"alnum_prop": 0.5318438087283358,
"repo_name": "geekbert/HelloPythonSourceCode",
"id": "9e7f01da89af70f813a7cbb7a092ede15d58738a",
"size": "4790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter 10/mud-4/game.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "238138"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, product, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable(x), optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the 'hybr' `method` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
info['message'] = errors['unknown']
return sol
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info)
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An M-length sequence or an (k,M)-shaped array for functions with k predictors
The independent variable where the data is measured.
ydata : M-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or N-length sequence, optional
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-d `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-d `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-d `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
define the data to be fit with some noise
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> plt.plot(xdata, func(xdata, *popt), 'r-', label='fit')
Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``
and ``0 < c < 1``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))
>>> plt.plot(xdata, func(xdata, *popt), 'g--', label='fit-with-bounds')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata)
else:
ydata = np.asarray(ydata)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata)
else:
xdata = np.asarray(xdata)
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-d, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-d, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ydata.size > p0.size:
s_sq = cost / (ydata.size - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (product(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| {
"content_hash": "5f3da0b6d112bfecfbd92fd899708483",
"timestamp": "",
"source": "github",
"line_count": 887,
"max_line_length": 91,
"avg_line_length": 37.346110484780155,
"alnum_prop": 0.586940771599348,
"repo_name": "boomsbloom/dtm-fmri",
"id": "411ac2f27a27e7b9370b7c37a08b8b5f87804930",
"size": "33126",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "DTM/for_gensim/lib/python2.7/site-packages/scipy/optimize/minpack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "413670"
},
{
"name": "C++",
"bytes": "262666"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "14725"
},
{
"name": "HTML",
"bytes": "555708"
},
{
"name": "JavaScript",
"bytes": "23921"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Matlab",
"bytes": "36260"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "46698963"
},
{
"name": "R",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "11728"
},
{
"name": "TeX",
"bytes": "18567"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
PACKAGE = 'nexus'
VERSION = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'oldowan', PACKAGE, 'VERSION')).read().strip()
desc_lines = open('README', 'r').readlines()
setup(name='oldowan.%s' % PACKAGE,
version=VERSION,
description=desc_lines[0],
long_description=''.join(desc_lines[2:]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics"],
keywords='',
platforms=['Any'],
author='Ryan Raaum',
author_email='code@raaum.org',
url='http://www.raaum.org/software/oldowan',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=False,
namespace_packages=['oldowan'],
data_files=[("oldowan/%s" % PACKAGE, ["oldowan/%s/VERSION" % PACKAGE])],
zip_safe=False,
test_suite='nose.collector',
)
| {
"content_hash": "65926a2cc90734d5f9e601d8635b4ae9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 35.3235294117647,
"alnum_prop": 0.6061615320566195,
"repo_name": "ryanraaum/oldowan.nexus",
"id": "751107c7f880be54036f2da70699ae7385a40c5d",
"size": "1201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15730"
}
],
"symlink_target": ""
} |
from __future__ import division
import unittest
import numpy as np
from wyrm.processing import lfilter_zi
class TestLFilterZi(unittest.TestCase):
COEFFS = 10
def setUp(self):
self.b, self.a = np.ones(self.COEFFS), np.ones(self.COEFFS)
def test_lfilter_1d(self):
"""Output has the correct shape for n=1."""
zi = lfilter_zi(self.b, self.a)
self.assertEqual(len(zi), self.COEFFS - 1)
def test_lfilter_nd(self):
"""Output has the correct shape for n>1."""
zi = lfilter_zi(self.b, self.a, 7)
self.assertEqual(zi.shape, (self.COEFFS - 1, 7))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d8da91b671ffba70946d019f12acfdca",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 22.96551724137931,
"alnum_prop": 0.6156156156156156,
"repo_name": "venthur/wyrm",
"id": "a651ff379412090f599a92a6f58e1dc1359f958b",
"size": "666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_lfilter_zi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7753"
},
{
"name": "Python",
"bytes": "264955"
},
{
"name": "Shell",
"bytes": "6697"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Le Chef',
'author': 'Okkar Than',
'url': '',
'download_url': '',
'author_email': '',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['lechef'],
'scripts': [],
'name': 'lechef'
}
setup(**config)
| {
"content_hash": "486e62868c0cd1cfe6c61d5d2de1a34f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 36,
"avg_line_length": 19.526315789473685,
"alnum_prop": 0.555256064690027,
"repo_name": "r2k0/fe4retail",
"id": "0183a8c6a8adb44f182c24f081d92db86f03ebf4",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lechef/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29727"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import itertools
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.exceptions import neutronclient # noqa
LOG = logging.getLogger(__name__)
NOVA_QUOTA_FIELDS = ("metadata_items",
"cores",
"instances",
"injected_files",
"injected_file_content_bytes",
"ram",
"floating_ips",
"fixed_ips",
"security_groups",
"security_group_rules",)
MISSING_QUOTA_FIELDS = ("key_pairs",
"injected_file_path_bytes",)
CINDER_QUOTA_FIELDS = ("volumes",
"snapshots",
"gigabytes",)
NEUTRON_QUOTA_FIELDS = ("network",
"subnet",
"port",
"router",
"floatingip",
"security_group",
"security_group_rule",
)
QUOTA_FIELDS = NOVA_QUOTA_FIELDS + CINDER_QUOTA_FIELDS + NEUTRON_QUOTA_FIELDS
class QuotaUsage(dict):
"""Tracks quota limit, used, and available for a given set of quotas."""
def __init__(self):
self.usages = defaultdict(dict)
def __contains__(self, key):
return key in self.usages
def __getitem__(self, key):
return self.usages[key]
def __setitem__(self, key, value):
raise NotImplemented("Directly setting QuotaUsage values is not "
"supported. Please use the add_quota and "
"tally methods.")
def __repr__(self):
return repr(dict(self.usages))
def get(self, key, default=None):
return self.usages.get(key, default)
def add_quota(self, quota):
"""Adds an internal tracking reference for the given quota."""
if quota.limit is None or quota.limit == -1:
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit)
def tally(self, name, value):
"""Adds to the "used" metric for the given quota."""
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name)
def update_available(self, name):
"""Updates the "available" metric for the given quota."""
available = self.usages[name]['quota'] - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available
def _get_quota_data(request, method_name, disabled_quotas=None,
tenant_id=None):
quotasets = []
if not tenant_id:
tenant_id = request.user.tenant_id
quotasets.append(getattr(nova, method_name)(request, tenant_id))
qs = base.QuotaSet()
if disabled_quotas is None:
disabled_quotas = get_disabled_quotas(request)
if 'volumes' not in disabled_quotas:
quotasets.append(getattr(cinder, method_name)(request, tenant_id))
for quota in itertools.chain(*quotasets):
if quota.name not in disabled_quotas:
qs[quota.name] = quota.limit
return qs
def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
return _get_quota_data(request,
"default_quota_get",
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
qs = _get_quota_data(request,
"tenant_quota_get",
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
# TODO(jpichon): There is no API to get the default system quotas
# in Neutron (cf. LP#1204956), so for now handle tenant quotas here.
# This should be handled in _get_quota_data() eventually.
if disabled_quotas and 'floating_ips' in disabled_quotas:
# Neutron with quota extension disabled
if 'floatingip' in disabled_quotas:
qs.add(base.QuotaSet({'floating_ips': -1}))
# Neutron with quota extension enabled
else:
tenant_id = tenant_id or request.user.tenant_id
neutron_quotas = neutron.tenant_quota_get(request, tenant_id)
# Rename floatingip to floating_ips since that's how it's
# expected in some places (e.g. Security & Access' Floating IPs)
fips_quota = neutron_quotas.get('floatingip').limit
qs.add(base.QuotaSet({'floating_ips': fips_quota}))
return qs
def get_disabled_quotas(request):
disabled_quotas = []
# Cinder
if not base.is_service_enabled(request, 'volume'):
disabled_quotas.extend(CINDER_QUOTA_FIELDS)
# Neutron
if not base.is_service_enabled(request, 'network'):
disabled_quotas.extend(NEUTRON_QUOTA_FIELDS)
else:
# Remove the nova network quotas
disabled_quotas.extend(['floating_ips', 'fixed_ips'])
if neutron.is_security_group_extension_supported(request):
# If Neutron security group is supported, disable Nova quotas
disabled_quotas.extend(['security_groups', 'security_group_rules'])
else:
# If Nova security group is used, disable Neutron quotas
disabled_quotas.extend(['security_group', 'security_group_rule'])
try:
if not neutron.is_quotas_extension_supported(request):
disabled_quotas.extend(NEUTRON_QUOTA_FIELDS)
except Exception:
LOG.exception("There was an error checking if the Neutron "
"quotas extension is enabled.")
return disabled_quotas
@memoized
def tenant_quota_usages(request):
# Get our quotas and construct our usage object.
disabled_quotas = get_disabled_quotas(request)
usages = QuotaUsage()
for quota in get_tenant_quota_data(request,
disabled_quotas=disabled_quotas):
usages.add_quota(quota)
# Get our usages.
try:
floating_ips = network.tenant_floating_ip_list(request)
except neutronclient.NeutronClientException:
floating_ips = []
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
instances, has_more = nova.server_list(request)
# Fetch deleted flavors if necessary.
missing_flavors = [instance.flavor['id'] for instance in instances
if instance.flavor['id'] not in flavors]
for missing in missing_flavors:
if missing not in flavors:
try:
flavors[missing] = nova.flavor_get(request, missing)
except Exception:
flavors[missing] = {}
exceptions.handle(request, ignore=True)
usages.tally('instances', len(instances))
usages.tally('floating_ips', len(floating_ips))
if 'volumes' not in disabled_quotas:
volumes = cinder.volume_list(request)
snapshots = cinder.volume_snapshot_list(request)
usages.tally('gigabytes', sum([int(v.size) for v in volumes]))
usages.tally('volumes', len(volumes))
usages.tally('snapshots', len(snapshots))
# Sum our usage based on the flavors of the instances.
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
usages.tally('cores', getattr(flavor, 'vcpus', None))
usages.tally('ram', getattr(flavor, 'ram', None))
# Initialise the tally if no instances have been launched yet
if len(instances) == 0:
usages.tally('cores', 0)
usages.tally('ram', 0)
return usages
def tenant_limit_usages(request):
#TODO(licostan): This method shall be removed from Quota module.
#ProjectUsage/BaseUsage maybe used instead on volume/image dashboards.
limits = {}
try:
limits.update(nova.tenant_absolute_limits(request))
except Exception:
msg = _("Unable to retrieve compute limit information.")
exceptions.handle(request, msg)
if base.is_service_enabled(request, 'volume'):
try:
limits.update(cinder.tenant_absolute_limits(request))
volumes = cinder.volume_list(request)
total_size = sum([getattr(volume, 'size', 0) for volume
in volumes])
limits['gigabytesUsed'] = total_size
limits['volumesUsed'] = len(volumes)
except Exception:
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(request, msg)
return limits
| {
"content_hash": "4fa1b23025724200b386794c4268ca4d",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 36.694117647058825,
"alnum_prop": 0.603184781447045,
"repo_name": "yanheven/console",
"id": "0210ff8d78249c95fc78c42a809dbb2754981cb0",
"size": "9903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/usage/quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "73539"
},
{
"name": "JavaScript",
"bytes": "695555"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "3288795"
},
{
"name": "Shell",
"bytes": "15578"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from .views import EmailAlternativeView
urlpatterns = patterns(
'',
url(r'^email_alternative/(?P<pk>\d+)/$',
EmailAlternativeView.as_view(),
name='email_alternative'),
) | {
"content_hash": "a480d57b4d85728283ec101cdb723fed",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 23.8,
"alnum_prop": 0.6680672268907563,
"repo_name": "bigmassa/django_mail_save",
"id": "d3032fbe9867aeaeef7f49981b386162299cbed1",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail_save/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "239"
},
{
"name": "Python",
"bytes": "10565"
}
],
"symlink_target": ""
} |
""" Custom decorators defined for Open Anharmonic.
All of these decorators were built using the class form per the exposition
`here <http://www.artima.com/weblogs/viewpost.jsp?thread=240845>`__
|extlink|.
Decorators
----------
.. autoclass:: arraysqueeze
.. autoclass:: kwargfetch
"""
# Imports
from functools import wraps as _wraps
# Decorators
class arraysqueeze(object):
""" Converts selected arguments to squeezed np.arrays
Pre-applies an ``np.asarray(...).squeeze()`` conversion to all positional
arguments according to integer indices passed, and to any keyword arguments
according to any strings passed.
Each |int| argument passed instructs the decorator to convert the
corresponding positional argument in the function definition.
Each |str| argument passed instructs the decorator to convert the
corresponding keyword argument.
|str| parameters corresponding to keyword arguments absent in a
particular function call and positional/optional argument indices
beyond the range of the actual `\*args` of the decorated
function are ignored.
.. warning:: Likely fragile with optional arguments; needs to be tested.
Arguments
---------
\*args : |int| or |str|
Arguments to convert to squeezed |nparray|.
"""
def __init__(self, *args):
""" Pass the positions of arguments to be arraysqueezed as integers
"""
# Check for integers and strings
for arg in args:
if not (isinstance(arg, int) or isinstance(arg, str)):
raise ValueError("Invalid decorator argument: {0}".format(arg))
# If all ok, store
self.arglist = args
def __call__(self, f):
""" Call the wrapped function after arraysqueezing selected arguments.
Absent keyword arguments and positional/optional argument indices
beyond the range of the actual `*args` of `f` are ignored.
"""
@_wraps(f)
def wrapped_f(*args, **kwargs):
# Must import numpy
import numpy as np
# Working list of args values, since args is a tuple
w_args = list(args)
# Parse the arguments and arraysqueeze them. If squeezed to a
# singleton array, rewrap as a dimension-one array.
for mod_arg in self.arglist:
if isinstance(mod_arg, int) and mod_arg < len(w_args):
w_args[mod_arg] = np.asarray(w_args[mod_arg]).squeeze()
if not w_args[mod_arg].shape:
w_args[mod_arg] = w_args[mod_arg][np.newaxis]
elif isinstance(mod_arg, str) and mod_arg in kwargs:
kwargs[mod_arg] = np.asarray(kwargs[mod_arg]).squeeze()
if not kwargs[mod_arg].shape:
kwargs[mod_arg] = kwargs[mod_arg][np.newaxis]
# no 'else:' since type checked in __init__
# Execute the function and return its result
return f(*w_args, **kwargs)
# end def wrapped_f
# Return the decorated function
return wrapped_f
# end def __call__
# end class arraysqueeze
class kwargfetch(object):
"""Fetch a missing keyword argument with a custom callable & arguments
This decorator implements a form of non-persistent memoization for
use in networks of inter-related and/or nested functions, where:
* External users may have reason to call any of the functions directly
* Most or all of the functions call one or more of the same specific
"supporting" functions that potentially represent significant
computational overhead
* Calls with identical function arguments are not likely to recur
in typical use by external users, and thus fully persistent memoization
would in general be a waste of memory
The memoization is implemented via injection of a specific keyword
argument into a call to the wrapped function, where the inserted value
is obtained from a call in turn to a specified callable using
arguments drawn from the wrapped call. If the target keyword argument
is already present in the wrapped call, no action is taken.
.. note::
The API description below is wholly non-intuitive and likely
impossible to follow. The examples provided in the
:doc:`User's Guide </userguide/usage/utils/decorate>` will probably
be much more illuminating.
Arguments
---------
args[0]
|str| --
Name of the keyword argument to be injected into the call to the
wrapped function
args[1]
|callable| --
Object to call to generate value to be injected into the target
keyword argument (`args[0]`)
args[2..n]
|int| or |str| --
Indicate which positional (|int|) and keyword (|str|) parameters
of the wrapped function call are to be passed to the
|callable| of `args[1]` as positional parameters, in the
order provided within `args[2..n]`
kwargs
|int| or |str| --
Indicate which positional (|int|) and keyword (|str|) parameters
of the wrapped function call are to be passed to the
|callable| of `args[1]` as keyword parameters, where the keys
indicated here in `kwargs` are those used in the call to
`args[1]`
.. Decorator built using the class form per the exposition
`here <http://www.artima.com/weblogs/viewpost.jsp?thread=240845>`__
|extlink|.
"""
@staticmethod
def ok_kwarg(val):
"""Helper method for screening keyword arguments"""
import keyword
try:
return str.isidentifier(val) and not keyword.iskeyword(val)
except TypeError:
# Non-string values are never a valid keyword arg
return False
@classmethod
def ok_argarg(cls, val):
"""Helper method for screening valid arguments of any type"""
return cls.ok_kwarg(val) or isinstance(val, int)
def __init__(self, *args, **kwargs):
"""Initialize with the keyword, callable, and relevant arguments"""
# Don't want named arguments anywhere in this initializer, since
# that would constrain the keywords allowable for calls to the
# fetching callable. 'kw' and 'c' probably aren't all that common,
# but better to make it more robust/flexible, especially since
# the fix is pretty simple.
# Convert args to a list
args = list(args)
# Retrieve the keyword id and the callable
try:
kw = args.pop(0)
except IndexError as e:
raise TypeError("'Target keyword' argument absent") from e
try:
c = args.pop(0)
except IndexError as e:
raise TypeError("'Callable' argument absent") from e
# Proof and store all of the things
if not self.ok_kwarg(kw):
raise ValueError("'kw' argument must be a valid non-keyword "
"identifier")
self.kw = kw
if not callable(c):
raise TypeError("'c' argument must be callable")
self.c = c
if not all(map(self.ok_argarg, args)):
raise ValueError("All 'args' must be valid non-keyword "
"identifier strings or integers")
if not all(map(self.ok_argarg, kwargs.values())):
raise ValueError("All 'kwargs' values must be valid non-keyword "
"identifier strings or integers")
self.arglist = args
self.kwarglist = kwargs
def __call__(self, f):
"""Call the wrapped function after any needed fetch"""
from inspect import signature as sig
@_wraps(f)
def wrapped_f(*args, **kwargs):
# Check for if the target kwarg is missing
if self.kw not in kwargs:
# Missing. Must fetch.
# Retrieve and materialize the enumerated arguments list.
# Depends on the parameters being contained in an OrderedDict
# so that signature order is retained.
params = list(sig(f).parameters)
# Initialize as empty the arguments list to pass to the
# callable
fetch_args = []
# Populate fetch_args sequentially in the order specified by
# the particular decorator constructor
for a in self.arglist:
# Simple type checks on the argument specifiers
# should suffice since they were checked at
# construction.
if isinstance(a, str):
# Keyword argument; handle possible absence with get()
fetch_args.append(kwargs.get(a))
else:
# Integer argument for (optional-)positional args.
# Could be present as positional or as keyword,
# or could be absent.
if len(args) > a:
# Sufficient positional arguments; assume
# present and passed as optional-positional
fetch_args.append(args[a])
else:
# The **kwargs is not valid for this, so exclude
pname = params[:-1][a]
if pname in kwargs:
# Present in the passed-in kwargs
fetch_args.append(kwargs[pname])
else:
# Not found; pass the function default
fetch_args.append(sig(f).parameters[pname]
.default)
# Populate fetch_kwargs according to what was specified
# at decorator construction
fetch_kwargs = {}
for item in self.kwarglist.items():
# Same as above -- simple type checks should suffice
if isinstance(item[1], str):
# Keyword argument; handle possible absence with get()
fetch_kwargs.update({item[0]: kwargs.get(item[1])})
else:
# Optional-positional
if len(args) > item[1]:
# Sufficient positional args
fetch_kwargs.update({item[0]: args[item[1]]})
else:
# The **kwargs is not valid for this, so exclude
pname = params[:-1][item[1]]
if pname in kwargs:
# Insufficient positional; add from kwargs
# if present
fetch_kwargs.update({item[0]: kwargs[pname]})
else:
# Not found; store the function default
fetch_kwargs.update({item[0]:
sig(f)
.parameters[pname].default})
# Call the callable and store the result into the target
# keyword
c_result = self.c(*fetch_args, **fetch_kwargs)
kwargs.update({self.kw: c_result})
# Whether the target kwarg was present or generated/injected,
# call the wrapped function
return f(*args, **kwargs)
# end def wrapped_f
# Return the wrapped function
return wrapped_f
# end def __call__
# end class kwarg_fetch
if __name__ == '__main__':
print("Module not executable.")
| {
"content_hash": "174c03edded0c5b9ea91065e8df76b61",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 80,
"avg_line_length": 37.16459627329193,
"alnum_prop": 0.563549761845074,
"repo_name": "bskinn/opan",
"id": "ba4f93921da3df1c710b458aad493292f4cbc1b0",
"size": "12754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opan/utils/decorate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "562309"
},
{
"name": "Shell",
"bytes": "2464"
}
],
"symlink_target": ""
} |
from seahub.profile.models import Profile
from seahub.test_utils import BaseTestCase
class ProfileManagerTest(BaseTestCase):
def setUp(self):
pass
def test_get_contact_email_by_user(self):
# no profile for user, contact email should be username
username = self.user.username
assert username == Profile.objects.get_contact_email_by_user(username)
# user has profile, but no contact email, contact email should be username
p = Profile.objects.add_or_update(username, 'nickname')
assert username == Profile.objects.get_contact_email_by_user(username)
# user has profile, and have contact email
p.contact_email = 'contact@foo.com'
p.save()
assert 'contact@foo.com' == Profile.objects.get_contact_email_by_user(username)
| {
"content_hash": "28c65a73d167d74bd35e6776343c64ec",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 87,
"avg_line_length": 38.904761904761905,
"alnum_prop": 0.6915544675642595,
"repo_name": "saukrIppl/seahub",
"id": "bac58301f2e7695bf3e54e1c0a51cbd3847f5e87",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/seahub/profile/models/test_profile_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from mcedit2.rendering.blockmodels import BlockModels
from mcedit2 import editortools
from mcedit2.command import SimpleRevisionCommand
from mcedit2.editorcommands.fill import fillCommand
from mcedit2.editorcommands.find_replace import FindReplaceDialog
from mcedit2.editorcommands.analyze import AnalyzeOutputDialog
from mcedit2.editortools.select import SelectCommand
from mcedit2.panels.player import PlayerPanel
from mcedit2.panels.map import MapPanel
from mcedit2.panels.worldinfo import WorldInfoPanel
from mcedit2.util import minecraftinstall
from mcedit2.util.dialogs import NotImplementedYet
from mcedit2.util.directories import getUserSchematicsDirectory
from mcedit2.util.mimeformats import MimeFormats
from mceditlib.util import exhaust
from mceditlib.util.lazyprop import weakrefprop
from mcedit2.util.raycast import rayCastInBounds
from mcedit2.util.showprogress import showProgress
from mcedit2.util.undostack import MCEUndoStack
from mcedit2.widgets.inspector import InspectorWidget
from mcedit2.worldview.viewaction import UseToolMouseAction, TrackingMouseAction
from mcedit2.rendering import chunkloader
from mcedit2.rendering.scenegraph import scenenode
from mcedit2.rendering.geometrycache import GeometryCache
from mcedit2.rendering.textureatlas import TextureAtlas
from mcedit2.widgets.layout import Column, Row
from mcedit2.util.settings import Settings
from mcedit2.worldview.camera import CameraWorldViewFrame
from mcedit2.worldview.cutaway import CutawayWorldViewFrame
from mcedit2.worldview.minimap import MinimapWorldView
from mcedit2.worldview.overhead import OverheadWorldViewFrame
from mceditlib import util, nbt, faces
from mceditlib.anvil.biome_types import BiomeTypes
from mceditlib.geometry import Vector
from mceditlib.operations import ComposeOperations
from mceditlib.operations.entity import RemoveEntitiesOperation
from mceditlib.selection import BoundingBox
from mceditlib.exceptions import PlayerNotFound, ChunkNotPresent
from mceditlib.revisionhistory import UndoFolderExists, RevisionChanges
from mceditlib.worldeditor import WorldEditor
from mceditlib.blocktypes import BlockType
log = logging.getLogger(__name__)
sessionSettings = Settings().getNamespace("editorsession")
currentViewSetting = sessionSettings.getOption("currentview", unicode, "cam")
# An EditorSession is a world currently opened for editing, the state of the editor including the
# current selection box, the editor tab containing its viewports, its command history, its shared OpenGL context,
# a separate instance of each editor tool (why?), and the ChunkLoader that coordinates loading
# chunks into its viewports.
class PendingImport(object):
def __init__(self, schematic, pos, text):
self.text = text
self.pos = pos
self.schematic = schematic
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.schematic, self.pos)
@property
def bounds(self):
return BoundingBox(self.pos, self.schematic.getDimension().bounds.size)
class PasteImportCommand(QtGui.QUndoCommand):
def __init__(self, editorSession, pendingImport, text, *args, **kwargs):
super(PasteImportCommand, self).__init__(*args, **kwargs)
self.setText(text)
self.editorSession = editorSession
self.pendingImport = pendingImport
def undo(self):
self.editorSession.moveTool.removePendingImport(self.pendingImport)
def redo(self):
self.editorSession.moveTool.addPendingImport(self.pendingImport)
self.editorSession.chooseTool("Move")
class EditorSession(QtCore.QObject):
def __init__(self, filename, configuredBlocks, readonly=False,
progressCallback=None):
"""
:param filename:
:type filename: str
:param configuredBlocks:
:type configuredBlocks: dict???
:param readonly:
:type readonly: bool
:param progressCallback:
:type progressCallback: callable
:return:
:rtype:
"""
from mcedit2 import __version__ as v
progressMax = 8 # fixme
if progressCallback is None:
def progress(status):
pass
else:
def progress(status):
progressCallback(progress.progressCount, progressMax, status)
progress.progressCount += 1
progress.progressCount = 0
QtCore.QObject.__init__(self)
self.undoStack = MCEUndoStack()
self.resourceLoader = minecraftinstall.getResourceLoaderForFilename(filename)
self.loader = None
self.blockModels = None
self.textureAtlas = None
self.editorTab = None
self.filename = filename
self.dockWidgets = []
self.undoBlock = None
self.currentTool = None
self.dirty = False
self.configuredBlocks = None
self.copiedSchematic = None # xxx should be app global!!
""":type : WorldEditor"""
# --- Open world editor ---
try:
progress("Creating WorldEditor...")
self.worldEditor = WorldEditor(filename, readonly=readonly)
except UndoFolderExists:
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Warning)
msgBox.setWindowTitle(self.tr("MCEdit %(version)s") % {"version": v})
msgBox.setText(self.tr("This world was not properly closed by MCEdit."))
msgBox.setInformativeText(self.tr(
"MCEdit may have crashed. An undo history was found for this world. You may try "
"to resume editing with the saved undo history, or start over with the current "
"state of the world."))
resumeBtn = msgBox.addButton("Resume Editing", QtGui.QMessageBox.ApplyRole)
msgBox.addButton("Discard History", QtGui.QMessageBox.DestructiveRole)
# msgBox.exec_()
# clicked = msgBox.clickedButton()
# xxxxx resume editing not implemented in session - need to restore undo history!
clicked = None
resume = clicked is resumeBtn
try:
self.worldEditor = WorldEditor(filename, readonly=readonly, resume=resume)
except NotImplementedError:
NotImplementedYet()
raise IOError("Uh-oh")
self.worldEditor.requireRevisions()
self.currentDimension = None
progress("Creating menus...")
# --- Menus ---
self.menus = []
# - Edit -
self.menuEdit = QtGui.QMenu(self.tr("Edit"))
self.menuEdit.setObjectName("menuEdit")
self.actionCut = QtGui.QAction(self.tr("Cut"), self, triggered=self.cut, enabled=False)
self.actionCut.setShortcut(QtGui.QKeySequence.Cut)
self.actionCut.setObjectName("actionCut")
self.actionCopy = QtGui.QAction(self.tr("Copy"), self, triggered=self.copy, enabled=False)
self.actionCopy.setShortcut(QtGui.QKeySequence.Copy)
self.actionCopy.setObjectName("actionCopy")
self.actionPaste = QtGui.QAction(self.tr("Paste"), self, triggered=self.paste,
enabled=False)
self.actionPaste.setShortcut(QtGui.QKeySequence.Paste)
self.actionPaste.setObjectName("actionPaste")
self.actionPaste_Blocks = QtGui.QAction(self.tr("Paste Blocks"), self,
triggered=self.pasteBlocks, enabled=False)
self.actionPaste_Blocks.setShortcut(QtGui.QKeySequence("Ctrl+Shift+V"))
self.actionPaste_Blocks.setObjectName("actionPaste_Blocks")
self.actionPaste_Entities = QtGui.QAction(self.tr("Paste Entities"), self,
triggered=self.pasteEntities, enabled=False)
self.actionPaste_Entities.setShortcut(QtGui.QKeySequence("Ctrl+Alt+V"))
self.actionPaste_Entities.setObjectName("actionPaste_Entities")
self.actionClear = QtGui.QAction(self.tr("Delete"), self, triggered=self.deleteSelection,
enabled=False)
self.actionClear.setShortcut(QtGui.QKeySequence.Delete)
self.actionClear.setObjectName("actionClear")
self.actionDeleteBlocks = QtGui.QAction(self.tr("Delete Blocks"), self,
triggered=self.deleteBlocks, enabled=False)
self.actionDeleteBlocks.setShortcut(QtGui.QKeySequence("Shift+Del"))
self.actionDeleteBlocks.setObjectName("actionDeleteBlocks")
self.actionDeleteEntities = QtGui.QAction(self.tr("Delete Entities"), self,
triggered=self.deleteEntities, enabled=False)
self.actionDeleteEntities.setShortcut(QtGui.QKeySequence("Shift+Alt+Del"))
self.actionDeleteEntities.setObjectName("actionDeleteEntities")
self.actionFill = QtGui.QAction(self.tr("Fill"), self, triggered=self.fill, enabled=False)
self.actionFill.setShortcut(QtGui.QKeySequence("Shift+Ctrl+F"))
self.actionFill.setObjectName("actionFill")
self.actionFindReplace = QtGui.QAction(self.tr("Find/Replace"), self,
triggered=self.findReplace, enabled=True)
self.actionFindReplace.setShortcut(QtGui.QKeySequence.Find)
self.actionFindReplace.setObjectName("actionFindReplace")
self.actionAnalyze = QtGui.QAction(self.tr("Analyze"), self, triggered=self.analyze,
enabled=True)
# self.actionAnalyze.setShortcut(QtGui.QKeySequence.Analyze)
self.actionAnalyze.setObjectName("actionAnalyze")
undoAction = self.undoStack.createUndoAction(self.menuEdit)
undoAction.setShortcut(QtGui.QKeySequence.Undo)
redoAction = self.undoStack.createRedoAction(self.menuEdit)
redoAction.setShortcut(QtGui.QKeySequence.Redo)
self.menuEdit.addAction(undoAction)
self.menuEdit.addAction(redoAction)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionCut)
self.menuEdit.addAction(self.actionCopy)
self.menuEdit.addAction(self.actionPaste)
self.menuEdit.addAction(self.actionPaste_Blocks)
self.menuEdit.addAction(self.actionPaste_Entities)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionClear)
self.menuEdit.addAction(self.actionDeleteBlocks)
self.menuEdit.addAction(self.actionDeleteEntities)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionFill)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionFindReplace)
self.menuEdit.addAction(self.actionAnalyze)
self.menus.append(self.menuEdit)
# - Select -
self.menuSelect = QtGui.QMenu(self.tr("Select"))
self.actionSelectAll = QtGui.QAction(self.tr("Select All"), self, triggered=self.selectAll)
self.actionSelectAll.setShortcut(QtGui.QKeySequence.SelectAll)
self.menuSelect.addAction(self.actionSelectAll)
self.actionDeselect = QtGui.QAction(self.tr("Deselect"), self, triggered=self.deselect)
self.actionDeselect.setShortcut(QtGui.QKeySequence("Ctrl+D"))
self.menuSelect.addAction(self.actionDeselect)
self.menus.append(self.menuSelect)
# - Import/Export -
self.menuImportExport = QtGui.QMenu(self.tr("Import/Export"))
self.actionExport = QtGui.QAction(self.tr("Export"), self, triggered=self.export)
self.actionExport.setShortcut(QtGui.QKeySequence("Ctrl+Shift+E"))
self.menuImportExport.addAction(self.actionExport)
self.actionImport = QtGui.QAction(self.tr("Import"), self, triggered=self.import_)
self.actionImport.setShortcut(QtGui.QKeySequence("Ctrl+Shift+D"))
self.menuImportExport.addAction(self.actionImport)
self.actionImport = QtGui.QAction(self.tr("Show Exports Library"), self,
triggered=QtGui.qApp.libraryDockWidget.toggleViewAction().trigger)
self.actionImport.setShortcut(QtGui.QKeySequence("Ctrl+Shift+L"))
self.menuImportExport.addAction(self.actionImport)
self.menus.append(self.menuImportExport)
# - Chunk -
self.menuChunk = QtGui.QMenu(self.tr("Chunk"))
self.actionDeleteChunks = QtGui.QAction(self.tr("Delete Chunks"), self, triggered=self.deleteChunks)
self.actionCreateChunks = QtGui.QAction(self.tr("Create Chunks"), self, triggered=self.createChunks)
self.actionRepopChunks = QtGui.QAction(self.tr("Mark Chunks For Repopulation"),
self, triggered=self.repopChunks)
self.menuChunk.addAction(self.actionDeleteChunks)
self.menuChunk.addAction(self.actionCreateChunks)
self.menuChunk.addAction(self.actionRepopChunks)
self.menus.append(self.menuChunk)
# --- Resources ---
self.geometryCache = GeometryCache()
progress("Loading textures and models...")
self.setConfiguredBlocks(configuredBlocks) # Must be called after resourceLoader is in place
self.editorOverlay = scenenode.Node()
self.biomeTypes = BiomeTypes()
# --- Panels ---
progress("Loading panels...")
self.playerPanel = PlayerPanel(self)
self.mapPanel = MapPanel(self)
self.worldInfoPanel = WorldInfoPanel(self)
self.panels = [self.playerPanel, self.worldInfoPanel, self.mapPanel]
self.panelActions = []
# --- Tools ---
progress("Loading tools...")
self.toolClasses = list(editortools.ToolClasses())
self.toolActionGroup = QtGui.QActionGroup(self)
self.tools = [cls(self) for cls in self.toolClasses]
self.toolActions = [tool.pickToolAction() for tool in self.tools]
self.actionsByName = {action.toolName: action for action in self.toolActions}
for tool in self.tools:
tool.toolPicked.connect(self.chooseTool)
for action in self.toolActions:
self.toolActionGroup.addAction(action)
self.selectionTool = self.getTool("Select")
self.moveTool = self.getTool("Move")
# --- Dimensions ---
def _dimChanged(f):
def _changed():
self.gotoDimension(f)
return _changed
dimButton = self.changeDimensionButton = QtGui.QToolButton()
dimButton.setText(self.dimensionMenuLabel(""))
dimAction = self.changeDimensionAction = QtGui.QWidgetAction(self)
dimAction.setDefaultWidget(dimButton)
dimMenu = self.dimensionsMenu = QtGui.QMenu()
for dimName in self.worldEditor.listDimensions():
displayName = self.dimensionDisplayName(dimName)
action = dimMenu.addAction(displayName)
action._changed = _dimChanged(dimName)
action.triggered.connect(action._changed)
dimButton.setMenu(dimMenu)
dimButton.setPopupMode(QtGui.QToolButton.InstantPopup)
self.panelActions.append(dimAction)
mcVersionButton = self.changeMCVersionButton = QtGui.QToolButton()
mcVersionButton.setText(self.minecraftVersionLabel())
mcVersionAction = self.changeMCVersionAction = QtGui.QWidgetAction(self)
mcVersionAction.setDefaultWidget(mcVersionButton)
self.mcVersionMenu = QtGui.QMenu()
mcVersionButton.setMenu(self.mcVersionMenu)
mcVersionButton.setPopupMode(QtGui.QToolButton.InstantPopup)
self.panelActions.append(mcVersionAction)
resourcePackButton = self.changeResourcePackButton = QtGui.QToolButton()
resourcePackButton.setText(self.resourcePackLabel())
resourcePackAction = self.changeResourcePackAction = QtGui.QWidgetAction(self)
resourcePackAction.setDefaultWidget(resourcePackButton)
self.resourcePackMenu = QtGui.QMenu()
resourcePackButton.setMenu(self.resourcePackMenu)
resourcePackButton.setPopupMode(QtGui.QToolButton.InstantPopup)
self.panelActions.append(resourcePackAction)
self._updateVersionsAndResourcePacks()
progress("Loading overworld dimension")
self.gotoDimension("")
# --- Editor stuff ---
progress("Creating EditorTab...")
self.editorTab = EditorTab(self)
self.toolChanged.connect(self.toolDidChange)
self.editorTab.urlsDropped.connect(self.urlsWereDropped)
self.editorTab.mapItemDropped.connect(self.mapItemWasDropped)
self.undoStack.indexChanged.connect(self.undoIndexChanged)
self.findReplaceDialog = FindReplaceDialog(self)
for resultsWidget in self.findReplaceDialog.resultsWidgets:
self.dockWidgets.append((Qt.BottomDockWidgetArea, resultsWidget))
self.inspectorWidget = InspectorWidget(self)
self.inspectorDockWidget = QtGui.QDockWidget(self.tr("Inspector"), objectName="inspector")
self.inspectorDockWidget.setWidget(self.inspectorWidget)
self.inspectorDockWidget.hide()
self.dockWidgets.append((Qt.RightDockWidgetArea, self.inspectorDockWidget))
if len(self.toolActions):
# Must be called after toolChanged is connected to editorTab
self.toolActions[0].trigger()
if hasattr(progress, 'progressCount') and progress.progressCount != progressMax:
log.info("Update progressMax to %d, please.", progress.progressCount)
def minecraftVersionLabel(self):
version = minecraftinstall.currentVersionOption.value()
return "Minecraft Version: %s" % version
def resourcePackLabel(self):
resourcePack = minecraftinstall.currentResourcePackOption.value()
return "Resource Pack: %s" % resourcePack
def _updateVersionsAndResourcePacks(self):
self.mcVersionMapper = QtCore.QSignalMapper()
self.mcVersionMapper.mapped[str].connect(self.changeMCVersion)
self.resourcePackMapper = QtCore.QSignalMapper()
self.resourcePackMapper.mapped[str].connect(self.changeResourcePack)
self.mcVersionMenu.clear()
self.resourcePackMenu.clear()
defaultAction = self.resourcePackMenu.addAction(self.tr("(No resource pack)"))
self.resourcePackMapper.setMapping(defaultAction, "")
install = minecraftinstall.GetInstalls().getCurrentInstall()
for version in sorted(install.versions, reverse=True):
versionAction = self.mcVersionMenu.addAction(version)
self.mcVersionMapper.setMapping(versionAction, version)
versionAction.triggered.connect(self.mcVersionMapper.map)
for resourcePack in sorted(install.resourcePacks):
resourcePackAction = self.resourcePackMenu.addAction(resourcePack)
self.resourcePackMapper.setMapping(resourcePackAction, resourcePack)
resourcePackAction.triggered.connect(self.resourcePackMapper.map)
def changeResourcePack(self, packName):
minecraftinstall.currentResourcePackOption.setValue(packName or "")
self.resourceLoader = minecraftinstall.getResourceLoaderForFilename(self.filename)
self.changeResourcePackButton.setText(self.resourcePackLabel())
self.reloadModels()
def changeMCVersion(self, version):
minecraftinstall.currentVersionOption.setValue(version)
self.resourceLoader = minecraftinstall.getResourceLoaderForFilename(self.filename)
self.changeMCVersionButton.setText(self.minecraftVersionLabel())
self.reloadModels()
# Connecting these signals to the EditorTab creates a circular reference through
# the Qt objects, preventing the EditorSession from being destroyed
def focusWorldView(self):
self.editorTab.currentView().setFocus()
def updateView(self):
self.editorTab.currentView().update()
def toolDidChange(self, tool):
self.editorTab.toolDidChange(tool)
# --- Block config ---
# Emitted when configuredBlocks is changed. TextureAtlas and BlockModels will also have changed.
configuredBlocksChanged = QtCore.Signal()
def setConfiguredBlocks(self, configuredBlocks):
blocktypes = self.worldEditor.blocktypes
if self.configuredBlocks is not None:
# Remove all previously configured blocks
deadJsons = []
for json in blocktypes.blockJsons:
if '__configured__' in json:
deadJsons.append(json)
deadIDs = set((j['internalName'], j['meta']) for j in deadJsons)
blocktypes.allBlocks[:] = [
bt for bt in blocktypes.allBlocks
if (bt.internalName, bt.meta) not in deadIDs
]
for json in deadJsons:
internalName = json['internalName']
fakeState = json['blockState']
blocktypes.blockJsons.remove(json)
ID = blocktypes.IDsByName[internalName]
del blocktypes.IDsByState[internalName + fakeState]
del blocktypes.statesByID[ID, json['meta']]
for blockDef in configuredBlocks:
internalName = blockDef.internalName
if internalName not in blocktypes.IDsByName:
# no ID mapped to this name, skip
continue
if blockDef.meta == 0:
blockType = blocktypes[internalName]
blockJson = blockType.json
else:
# not automatically created by FML mapping loader
ID = blocktypes.IDsByName[internalName]
fakeState = '[%d]' % blockDef.meta
nameAndState = internalName + fakeState
blocktypes.blockJsons[nameAndState] = {
'displayName': internalName,
'internalName': internalName,
'blockState': fakeState,
'unknown': False,
'meta': blockDef.meta,
}
blockType = BlockType(ID, blockDef.meta, blocktypes)
blocktypes.allBlocks.append(blockType)
blocktypes.IDsByState[nameAndState] = ID, blockDef.meta
blocktypes.statesByID[ID, blockDef.meta] = nameAndState
blockJson = blockType.json
blockJson['forcedModel'] = blockDef.modelPath
blockJson['forcedModelTextures'] = blockDef.modelTextures
blockJson['forcedModelRotation'] = blockDef.modelRotations
blockJson['forcedRotationFlags'] = blockDef.rotationFlags
blockJson['__configured__'] = True
self.configuredBlocks = configuredBlocks
self.reloadModels()
self.configuredBlocksChanged.emit()
def reloadModels(self):
self.blockModels = BlockModels(self.worldEditor.blocktypes, self.resourceLoader)
self.textureAtlas = TextureAtlas(self.worldEditor, self.resourceLoader, self.blockModels)
# May be called before editorTab is created
if self.editorTab:
for view in self.editorTab.views:
view.setTextureAtlas(self.textureAtlas)
# --- Selection ---
selectionChanged = QtCore.Signal(BoundingBox)
_currentSelection = None
@property
def currentSelection(self):
return self._currentSelection
@currentSelection.setter
def currentSelection(self, box):
self._currentSelection = box
self.enableSelectionCommands(box is not None and box.volume != 0)
self.enableChunkSelectionCommands(box is not None)
self.selectionChanged.emit(box)
def enableSelectionCommands(self, enable):
self.actionCut.setEnabled(enable)
self.actionCopy.setEnabled(enable)
self.actionPaste.setEnabled(enable)
self.actionPaste_Blocks.setEnabled(enable)
self.actionPaste_Entities.setEnabled(enable)
self.actionClear.setEnabled(enable)
self.actionDeleteBlocks.setEnabled(enable)
self.actionDeleteEntities.setEnabled(enable)
self.actionFill.setEnabled(enable)
self.actionExport.setEnabled(enable)
def enableChunkSelectionCommands(self, enable):
self.actionDeleteChunks.setEnabled(enable)
self.actionCreateChunks.setEnabled(enable)
self.actionRepopChunks.setEnabled(enable)
# --- Menu commands ---
# - World -
def save(self):
self.undoStack.clearUndoBlock()
saveTask = self.worldEditor.saveChangesIter()
showProgress("Saving...", saveTask)
self.dirty = False
# - Edit -
def cut(self):
command = SimpleRevisionCommand(self, "Cut")
with command.begin():
task = self.currentDimension.exportSchematicIter(self.currentSelection)
self.copiedSchematic = showProgress("Cutting...", task)
task = self.currentDimension.fillBlocksIter(self.currentSelection, "air")
showProgress("Cutting...", task)
self.undoStack.push(command)
def copy(self):
task = self.currentDimension.exportSchematicIter(self.currentSelection)
self.copiedSchematic = showProgress("Copying...", task)
def paste(self):
if self.copiedSchematic is None:
return
view = self.editorTab.currentView()
imp = PendingImport(self.copiedSchematic, view.mouseBlockPos, self.tr("<Pasted Object>"))
command = PasteImportCommand(self, imp, "Paste")
self.undoStack.push(command)
def pasteBlocks(self):
NotImplementedYet()
def pasteEntities(self):
NotImplementedYet()
def findReplace(self):
self.findReplaceDialog.exec_()
def analyze(self):
if self.currentSelection is None:
return
task = self.currentDimension.analyzeIter(self.currentSelection)
showProgress("Analyzing...", task)
outputDialog = AnalyzeOutputDialog(self, task.blocks,
task.entityCounts,
task.tileEntityCounts,
task.dimension.worldEditor.displayName)
def deleteSelection(self):
command = SimpleRevisionCommand(self, "Delete")
with command.begin():
fillTask = self.currentDimension.fillBlocksIter(self.currentSelection, "air")
entitiesTask = RemoveEntitiesOperation(self.currentDimension, self.currentSelection)
task = ComposeOperations(fillTask, entitiesTask)
showProgress("Deleting...", task)
self.pushCommand(command)
def deleteBlocks(self):
command = SimpleRevisionCommand(self, "Delete Blocks")
with command.begin():
fillTask = self.currentDimension.fillBlocksIter(self.currentSelection, "air")
showProgress("Deleting...", fillTask)
self.pushCommand(command)
def deleteEntities(self):
command = SimpleRevisionCommand(self, "Delete Entities")
with command.begin():
entitiesTask = RemoveEntitiesOperation(self.currentDimension, self.currentSelection)
showProgress("Deleting...", entitiesTask)
self.pushCommand(command)
def fill(self):
fillCommand(self)
# - Select -
def selectAll(self):
command = SelectCommand(self, self.currentDimension.bounds, self.tr("Select All"))
self.pushCommand(command)
def deselect(self):
command = SelectCommand(self, None)
command.setText(self.tr("Deselect"))
self.pushCommand(command)
# - Chunk -
def deleteChunks(self):
if self.currentSelection is None:
return
command = SimpleRevisionCommand(self, self.tr("Delete Chunks"))
with command.begin():
for cx in range(self.currentSelection.mincx, self.currentSelection.maxcx):
for cz in range(self.currentSelection.mincz, self.currentSelection.maxcz):
self.currentDimension.deleteChunk(cx, cz)
self.pushCommand(command)
def createChunks(self):
QtGui.QMessageBox.warning(QtGui.qApp.mainWindow, "Not implemented.", "Create chunks is not implemented yet!")
def repopChunks(self):
QtGui.QMessageBox.warning(QtGui.qApp.mainWindow, "Not implemented.", "Repop chunks is not implemented yet!")
# - Dimensions -
dimensionChanged = QtCore.Signal(object)
_dimDisplayNames = {"": "Overworld",
"DIM-1": "Nether",
"DIM1": "The End",
}
def dimensionDisplayName(self, dimName):
return self._dimDisplayNames.get(dimName, dimName)
def dimensionMenuLabel(self, dimName):
return self.tr("Dimension: %s" % self.dimensionDisplayName(dimName))
def gotoDimension(self, dimName):
dim = self.worldEditor.getDimension(dimName)
if dim is self.currentDimension:
return
log.info("Going to dimension %s", dimName)
self.changeDimensionButton.setText(self.dimensionMenuLabel(dimName))
self.currentDimension = dim
self.loader = chunkloader.ChunkLoader(self.currentDimension)
self.loader.chunkCompleted.connect(self.chunkDidComplete)
self.loader.allChunksDone.connect(self.updateView)
self.revisionChanged.connect(self.loader.revisionDidChange)
self.dimensionChanged.emit(dim)
# - Import/export -
def import_(self):
# prompt for a file to import
startingDir = Settings().value("import_dialog/starting_dir", getUserSchematicsDirectory())
result = QtGui.QFileDialog.getOpenFileName(QtGui.qApp.mainWindow, self.tr("Import"),
startingDir,
"All files (*.*)")
if result:
filename = result[0]
if filename:
self.importSchematic(filename)
def export(self):
# prompt for filename and format. maybe use custom browser to save to export library??
startingDir = Settings().value("import_dialog/starting_dir", getUserSchematicsDirectory())
result = QtGui.QFileDialog.getSaveFileName(QtGui.qApp.mainWindow,
self.tr("Export Schematic"),
startingDir,
"Schematic files (*.schematic)")
if result:
filename = result[0]
if filename:
task = self.currentDimension.exportSchematicIter(self.currentSelection)
schematic = showProgress("Copying...", task)
schematic.saveToFile(filename)
# --- Drag-and-drop ---
def urlsWereDropped(self, mimeData, position, face):
log.info("URLs dropped:\n%s", mimeData.urls())
def mapItemWasDropped(self, mimeData, position, face):
log.info("Map item dropped.")
assert mimeData.hasFormat(MimeFormats.MapItem)
mapIDString = mimeData.data(MimeFormats.MapItem).data()
mapIDs = mapIDString.split(", ")
mapIDs = [int(m) for m in mapIDs]
mapID = mapIDs[0] # xxx only one at a time for now
position = position + face.vector
x, y, z = position
cx = x >> 4
cz = z >> 4
try:
chunk = self.currentDimension.getChunk(cx, cz)
except ChunkNotPresent:
log.info("Refusing to import map into non-existent chunk %s", (cx, cz))
return
ref = self.worldEditor.createEntity("ItemFrame")
if ref is None:
return
facing = ref.facingForMCEditFace(face)
if facing is None:
# xxx by camera vector?
facing = ref.SouthFacing
ref.Item.Damage = mapID
ref.Item.id = "minecraft:filled_map"
ref.Position = position + (0.5, 0.5, 0.5)
ref.TilePos = position # 1.7/1.8 issues should be handled by ref...
ref.Facing = facing
log.info("Created map ItemFrame with ID %s, importing...", mapID)
command = SimpleRevisionCommand(self, self.tr("Import map %(mapID)s") % {"mapID": mapID})
with command.begin():
chunk.addEntity(ref)
log.info(nbt.nested_string(ref.rootTag))
self.pushCommand(command)
# --- Library support ---
def importSchematic(self, filename):
schematic = WorldEditor(filename, readonly=True)
ray = self.editorTab.currentView().rayAtCenter()
pos, face = rayCastInBounds(ray, self.currentDimension)
if pos is None:
pos = ray.point
name = os.path.basename(filename)
imp = PendingImport(schematic, pos, name)
command = PasteImportCommand(self, imp, "Import %s" % name)
self.undoStack.push(command)
# --- Undo support ---
revisionChanged = QtCore.Signal(RevisionChanges)
def undoIndexChanged(self, index):
self.editorTab.currentView().update()
def pushCommand(self, command):
log.info("Pushing command %s" % command.text())
self.undoStack.push(command)
def setUndoBlock(self, callback):
self.undoStack.setUndoBlock(callback)
def removeUndoBlock(self, callback):
self.undoStack.removeUndoBlock(callback)
def beginUndo(self):
self.undoStack.clearUndoBlock()
self.dirty = True
self.worldEditor.beginUndo()
def commitUndo(self):
exhaust(self.commitUndoIter())
def commitUndoIter(self):
for status in self.worldEditor.commitUndoIter():
yield status
changes = self.worldEditor.getRevisionChanges(self.currentRevision-1, self.currentRevision)
self.revisionChanged.emit(changes)
def undoForward(self):
self.worldEditor.redo()
changes = self.worldEditor.getRevisionChanges(self.currentRevision-1, self.currentRevision)
self.revisionChanged.emit(changes)
def undoBackward(self):
self.worldEditor.undo()
changes = self.worldEditor.getRevisionChanges(self.currentRevision, self.currentRevision+1)
self.revisionChanged.emit(changes)
def gotoRevision(self, index):
if index != self.currentRevision:
changes = self.worldEditor.getRevisionChanges(self.currentRevision, index)
self.worldEditor.gotoRevision(index)
self.revisionChanged.emit(changes)
@property
def currentRevision(self):
return self.worldEditor.currentRevision
# --- Misplaced startup code? ---
def loadDone(self):
# Called by MCEditApp after the view is on screen to make sure view.center() works correctly
# xxx was needed because view.centerOnPoint used a depthbuffer read for that, now what?
try:
try:
player = self.worldEditor.getPlayer()
center = Vector(*player.Position) + (0, 1.8, 0)
dimNo = player.Dimension
dimName = self.worldEditor.dimNameFromNumber(dimNo)
log.info("Setting view angle to single-player player's view in dimension %s.",
dimName)
rotation = player.Rotation
if dimName:
self.gotoDimension(dimName)
try:
self.editorTab.currentView().yawPitch = rotation
except AttributeError:
pass
except PlayerNotFound:
try:
center = self.worldEditor.getWorldMetadata().Spawn
log.info("Centering on spawn position.")
except AttributeError:
log.info("Centering on world center")
center = self.currentDimension.bounds.origin + (self.currentDimension.bounds.size * 0.5)
self.editorTab.miniMap.centerOnPoint(center)
self.editorTab.currentView().centerOnPoint(center, distance=0)
except Exception as e:
log.exception("Error while centering on player for world editor: %s", e)
# --- Tools ---
def toolShortcut(self, name):
toolShortcuts = {
"Select": "S",
"Create": "D",
}
return toolShortcuts.get(name, "")
def getTool(self, name):
for t in self.tools:
if t.name == name:
return t
def chooseTool(self, name):
oldTool = self.currentTool
self.currentTool = self.getTool(name)
if oldTool is not self.currentTool:
if oldTool:
oldTool.toolInactive()
self.currentTool.toolActive()
self.toolChanged.emit(self.currentTool)
self.actionsByName[name].setChecked(True)
toolChanged = QtCore.Signal(object)
def chunkDidComplete(self):
from mcedit2 import editorapp
editorapp.MCEditApp.app.updateStatusLabel(None, None, None, self.loader.cps,
self.editorTab.currentView().fps)
def updateStatusFromEvent(self, event):
from mcedit2 import editorapp
if event.blockPosition:
id = self.currentDimension.getBlockID(*event.blockPosition)
data = self.currentDimension.getBlockData(*event.blockPosition)
block = self.worldEditor.blocktypes[id, data]
biomeID = self.currentDimension.getBiomeID(event.blockPosition[0],
event.blockPosition[2])
biome = self.biomeTypes.types.get(biomeID)
if biome is not None:
biomeName = biome.name
else:
biomeName = "Unknown biome"
biomeText = "%s (%d)" % (biomeName, biomeID)
editorapp.MCEditApp.app.updateStatusLabel(event.blockPosition, block, biomeText,
self.loader.cps, event.view.fps)
else:
editorapp.MCEditApp.app.updateStatusLabel('(N/A)', None, None, self.loader.cps,
event.view.fps)
def viewMousePress(self, event):
self.updateStatusFromEvent(event)
if hasattr(self.currentTool, 'mousePress') and event.blockPosition is not None:
self.currentTool.mousePress(event)
self.editorTab.currentView().update()
def viewMouseMove(self, event):
self.updateStatusFromEvent(event)
if hasattr(self.currentTool, 'mouseMove'):
self.currentTool.mouseMove(event)
self.editorTab.currentView().update()
def viewMouseDrag(self, event):
self.updateStatusFromEvent(event)
if hasattr(self.currentTool, 'mouseDrag'):
self.currentTool.mouseDrag(event)
self.editorTab.currentView().update()
def viewMouseRelease(self, event):
self.updateStatusFromEvent(event)
if hasattr(self.currentTool, 'mouseRelease'):
self.currentTool.mouseRelease(event)
self.editorTab.currentView().update()
# --- EditorTab handling ---
def tabCaption(self):
return util.displayName(self.filename)
def closeTab(self):
if self.worldEditor is None:
return True
if self.dirty:
msgBox = QtGui.QMessageBox(self.editorTab.window())
msgBox.setText("The world has been modified.")
msgBox.setInformativeText("Do you want to save your changes?")
msgBox.setStandardButtons(
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Save)
ret = msgBox.exec_()
if ret == QtGui.QMessageBox.Save:
self.save()
if ret == QtGui.QMessageBox.Cancel:
return False
for panel in self.panels:
panel.close()
self.editorTab.saveState()
self.worldEditor.close()
self.worldEditor = None
return True
# --- Inspector ---
def inspectBlock(self, pos):
self.inspectorDockWidget.show()
self.inspectorWidget.inspectBlock(pos)
def inspectEntity(self, entity):
self.inspectorDockWidget.show()
self.inspectorWidget.inspectEntity(entity)
def inspectChunk(self, cx, cz):
self.inspectorDockWidget.show()
self.inspectorWidget.inspectChunk(cx, cz)
# --- Zooming ---
def zoomAndInspectBlock(self, pos):
self.zoomToPoint(pos)
self.inspectBlock(pos)
def zoomAndInspectEntity(self, entity):
self.zoomToPoint(entity.Position)
self.inspectEntity(entity)
def zoomToPoint(self, point):
self.editorTab.currentView().centerOnPoint(point, 15)
# --- Blocktype handling ---
def unknownBlocks(self):
for blocktype in self.worldEditor.blocktypes:
if blocktype.unknown:
yield blocktype.internalName
class EditorTab(QtGui.QWidget):
def __init__(self, editorSession):
"""
EditorTab is the widget containing the editor viewports, the minimap, and
the settings panel for the currently selected tool and its dockwidget.
:type editorSession: mcedit2.editorsession.EditorSession
:rtype: EditorTab
"""
QtGui.QWidget.__init__(self)
self.setContentsMargins(0, 0, 0, 0)
self.editorSession = editorSession
self.editorSession.dimensionChanged.connect(self.dimensionDidChange)
self.debugLastCenters = []
self.viewButtonGroup = QtGui.QButtonGroup(self)
self.viewButtonToolbar = QtGui.QToolBar()
self.viewButtons = {}
self.views = []
for name, handler in (
("2D", self.showCutawayView),
("Over", self.showOverheadView),
# ("Iso", self.showIsoView),
("Cam", self.showCameraView),
# ("4-up", self.showFourUpView),
):
button = QtGui.QToolButton(text=name, checkable=True)
button.clicked.connect(handler)
self.viewButtonGroup.addButton(button)
self.viewButtonToolbar.addWidget(button)
self.viewButtons[name] = button
self.viewStack = QtGui.QStackedWidget()
self.miniMap = MinimapWorldView(editorSession.currentDimension, editorSession.textureAtlas, editorSession.geometryCache)
self.miniMapDockWidget = QtGui.QDockWidget("Minimap", objectName="MinimapWidget", floating=True)
self.miniMapDockWidget.setWidget(self.miniMap)
self.miniMapDockWidget.setFixedSize(256, 256)
self.views.append(self.miniMap)
self.toolOptionsArea = QtGui.QScrollArea()
self.toolOptionsArea.setWidgetResizable(True)
self.toolOptionsDockWidget = QtGui.QDockWidget("Tool Options", objectName="ToolOptionsWidget", floating=True)
self.toolOptionsDockWidget.setWidget(self.toolOptionsArea)
editorSession.dockWidgets.append((Qt.LeftDockWidgetArea, self.miniMapDockWidget))
editorSession.dockWidgets.append((Qt.LeftDockWidgetArea, self.toolOptionsDockWidget))
editorSession.loader.addClient(self.miniMap)
self.overheadViewFrame = OverheadWorldViewFrame(editorSession.currentDimension, editorSession.textureAtlas, editorSession.geometryCache, self.miniMap)
self.overheadViewFrame.worldView.viewID = "Over"
self._addView(self.overheadViewFrame)
self.cutawayViewFrame = CutawayWorldViewFrame(editorSession.currentDimension, editorSession.textureAtlas, editorSession.geometryCache, self.miniMap)
self.cutawayViewFrame.worldView.viewID = "2D"
self._addView(self.cutawayViewFrame)
#
# self.fourUpViewFrame = FourUpWorldViewFrame(editorSession.currentDimension, editorSession.textureAtlas, editorSession.geometryCache, self.miniMap)
# self.fourUpViewFrame.worldView.viewID = "4-up"
# self._addView(self.fourUpViewFrame)
self.cameraViewFrame = CameraWorldViewFrame(editorSession.currentDimension, editorSession.textureAtlas, editorSession.geometryCache, self.miniMap)
self.cameraViewFrame.worldView.viewID = "Cam"
self.cameraView = self.cameraViewFrame.worldView
self._addView(self.cameraViewFrame)
self.viewStack.currentChanged.connect(self._viewChanged)
self.viewChanged.connect(self.viewDidChange)
self.setLayout(Column(self.viewButtonToolbar,
Row(self.viewStack, margin=0), margin=0))
currentViewName = currentViewSetting.value()
if currentViewName not in self.viewButtons:
currentViewName = "Cam"
self.viewButtons[currentViewName].click()
def destroy(self):
self.editorSession = None
for view in self.views:
view.destroy()
super(EditorTab, self).destroy()
editorSession = weakrefprop()
urlsDropped = QtCore.Signal(QtCore.QMimeData, Vector, faces.Face)
mapItemDropped = QtCore.Signal(QtCore.QMimeData, Vector, faces.Face)
def dimensionDidChange(self, dim):
for view in self.views:
view.setDimension(dim)
# EditorSession has a new loader now, so re-add minimap and current view
self.editorSession.loader.addClient(self.miniMap)
view = self.currentView()
if view is not None:
self.editorSession.loader.addClient(view)
def toolDidChange(self, tool):
if tool.toolWidget:
self.toolOptionsArea.takeWidget() # setWidget gives ownership to the scroll area
self.toolOptionsArea.setWidget(tool.toolWidget)
self.toolOptionsDockWidget.setWindowTitle(self.tr(tool.name) + self.tr(" Tool Options"))
log.info("Setting cursor %r for tool %r on view %r", tool.cursorNode, tool,
self.currentView())
self.currentView().setToolCursor(tool.cursorNode)
def saveState(self):
pass
viewChanged = QtCore.Signal(object)
def _viewChanged(self, index):
self.viewChanged.emit(self.currentView())
def viewDidChange(self, view):
self.miniMap.centerOnPoint(view.viewCenter())
if self.editorSession.currentTool:
view.setToolCursor(self.editorSession.currentTool.cursorNode)
overlayNodes = [tool.overlayNode
for tool in self.editorSession.tools
if tool.overlayNode is not None]
overlayNodes.insert(0, self.editorSession.editorOverlay)
view.setToolOverlays(overlayNodes)
view.setFocus()
def viewOffsetChanged(self, view):
self.miniMap.centerOnPoint(view.viewCenter())
self.miniMap.currentViewMatrixChanged(view)
def _addView(self, frame):
self.views.append(frame.worldView)
frame.stackIndex = self.viewStack.addWidget(frame)
frame.worldView.viewportMoved.connect(self.viewOffsetChanged)
frame.worldView.viewActions.extend([
UseToolMouseAction(self),
TrackingMouseAction(self)
])
frame.worldView.urlsDropped.connect(self.urlsDropped.emit)
frame.worldView.mapItemDropped.connect(self.mapItemDropped.emit)
def currentView(self):
"""
:rtype: mcedit2.worldview.worldview.WorldView
"""
widget = self.viewStack.currentWidget()
if widget is None:
return None
return widget.worldView
def showViewFrame(self, frame):
center = self.currentView().viewCenter()
self.debugLastCenters.append(center)
log.info("Going from %s to %s: Center was %s", self.currentView(), frame.worldView, center)
self.editorSession.loader.removeClient(self.currentView())
self.editorSession.loader.addClient(frame.worldView, 0)
self.viewStack.setCurrentIndex(frame.stackIndex)
frame.worldView.centerOnPoint(center)
log.info("Center is now %s", self.currentView().viewCenter())
def showOverheadView(self):
self.showViewFrame(self.overheadViewFrame)
#
# def showIsoView(self):
# self.showViewFrame(self.isoViewFrame)
#
# def showFourUpView(self):
# self.showViewFrame(self.fourUpViewFrame)
def showCutawayView(self):
self.showViewFrame(self.cutawayViewFrame)
def showCameraView(self):
self.showViewFrame(self.cameraViewFrame)
| {
"content_hash": "acc99877da4049c3c0a77b77c7dc48ec",
"timestamp": "",
"source": "github",
"line_count": 1227,
"max_line_length": 158,
"avg_line_length": 39.727791361043195,
"alnum_prop": 0.6560128010503425,
"repo_name": "vorburger/mcedit2",
"id": "0138825873c137d0f88a785f43132a7e767a7619",
"size": "48746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mcedit2/editorsession.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "8578"
},
{
"name": "Makefile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "1639144"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 ManerFan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from graph import canvas
from search.queue import PriorityQueue
from search import tools
def cal(start, end, obstacles=[], show_details=False):
"""
使用A*算法搜索路径
:param start: 起点
:param end: 终点
:param obstacles: 障碍
:param show_details: 是否显示细节
:return: 返回路径
"""
if tools.cell_equal(start, end):
return []
if tools.is_adjacent(start, end):
return [end]
queue = PriorityQueue()
start_node = Node(start, 0, Node.cal_h(start, end))
queue.put(start_node, start_node.f)
mark = {tools.cal_cell_id(start): None}
while not queue.empty():
node = queue.get()
adjs = tools.adj(node.cell, mark, obstacles)
for adj in adjs:
if tools.cal_cell_id(adj) in mark.keys():
continue
mark[tools.cal_cell_id(adj)] = node.cell
adj_node = Node(adj, node.g + 1, Node.cal_h(adj, end))
queue.put(adj_node, adj_node.f)
if tools.cell_equal(adj, end):
path = tools.cal_path(mark, tools.cal_cell_id(adj))
return path[1:] + [end]
if show_details:
canvas.draw_cell(adj, canvas.COLOR.DARK_GREEN.value)
canvas.update()
return None
class Node:
def __init__(self, cell, g, h):
self._cell = cell
self._g = g # start走到cell的距离
self._h = h # cell到end的距离
self._f = g + h
@property
def cell(self):
return self._cell
@property
def f(self):
return self._f
@property
def g(self):
return self._g
@staticmethod
def cal_h(cell1, cell2):
(x1, y1) = cell1
(x2, y2) = cell2
return abs(x1 - x2) + abs(y1 - y2)
| {
"content_hash": "725648c2c80668501bdf9915fefb7993",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 72,
"avg_line_length": 25.91111111111111,
"alnum_prop": 0.5943396226415094,
"repo_name": "manerfan/python-game-snake",
"id": "9a76f37dbe3594f4eded4758b3729fea7d65ea2c",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/search/a_star.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24897"
}
],
"symlink_target": ""
} |
"""
Upload a multilambda.domain.zip to the S3 bucket. Useful when
developing and small changes need to be made to a lambda function, but a full
rebuild of the entire zip file isn't required.
"""
import alter_path
from lib import configuration
from lib.lambdas import upload_lambda_zip
if __name__ == '__main__':
parser = configuration.BossParser(
description='Script for downloading lambda function code from S3. ' +
'To supply arguments from a file, provide the filename prepended with an `@`.',
fromfile_prefix_chars = '@')
parser.add_bosslet()
parser.add_argument(
'zip_name',
help='Name of zip file to upload to S3.')
args = parser.parse_args()
upload_lambda_zip(args.bosslet_config, args.zip_name)
| {
"content_hash": "b24e23f16f3ecde695e1b7be1e9e2ad2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 87,
"avg_line_length": 32.125,
"alnum_prop": 0.6848249027237354,
"repo_name": "jhuapl-boss/boss-manage",
"id": "50d3e330bb8ff996fc876728ad5fb1a70ec5d728",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/upload_lambda_zip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1509"
},
{
"name": "HCL",
"bytes": "2082"
},
{
"name": "Jinja",
"bytes": "5096"
},
{
"name": "Python",
"bytes": "796230"
},
{
"name": "SaltStack",
"bytes": "57550"
},
{
"name": "Shell",
"bytes": "223494"
}
],
"symlink_target": ""
} |
"""
Test main module.
Might get removed in future.
"""
| {
"content_hash": "2f6ce87173a0d5737aa4fbd6c1b3c194",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 9.5,
"alnum_prop": 0.6491228070175439,
"repo_name": "LosoiP/pathwalue",
"id": "bf3c43009d78efff2e4de880137c0d904c8b4cc2",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1526"
},
{
"name": "HTML",
"bytes": "21623"
},
{
"name": "JavaScript",
"bytes": "55381"
},
{
"name": "Python",
"bytes": "143099"
},
{
"name": "R",
"bytes": "2295"
}
],
"symlink_target": ""
} |
import json
from awsclpy import AWSCLPy
class AmiEc2(object):
def __init__(self, **kwrags):
self.__quiet = kwrags.get('quiet', False)
self.__recipe = kwrags['recipe']
self.__awscli = AWSCLPy(quiet=self.__quiet,
**self.__recipe.awscli_args.__dict__)
def instantiate(self):
security_group = self.__recipe.security_groups
if not security_group:
self.__create_security_group()
security_group = self.security_group
key_name = self.__recipe.key_name
if not key_name:
self.__generate_key_pair()
key_name = self.key_name
instance_profile = self.__recipe.iam_instance_profile
iam_roles = self.__recipe.iam_roles
if instance_profile:
instance_profile_arn = instance_profile.arn
instance_profile_name = instance_profile.name
elif isinstance(iam_roles, list):
instance_profile_arn, instance_profile_name = \
self.__create_iam_instance_profile(iam_roles)
else:
instance_profile_arn = instance_profile_name = None
iam_instance_profile = []
if instance_profile_arn:
iam_instance_profile.append(
'='.join(['Arn', instance_profile_arn])
)
if instance_profile_name:
iam_instance_profile.append(
'='.join(['Name', instance_profile_name])
)
if iam_instance_profile:
iam_instance_profile = [
'--iam-instance-profile',
','.join(iam_instance_profile)
]
associate_public_ip_address = \
'--associate-public-ip-address' \
if self.__recipe.associate_public_ip \
else '--no-associate-public-ip-address'
instance = self.__awscli.ec2(
'run-instances',
'--image-id', self.__recipe.base_ami,
'--key-name', key_name,
'--security-group-ids', security_group,
'--instance-type', self.__recipe.instance_type,
'--subnet-id', self.__recipe.subnet_id,
associate_public_ip_address,
iam_instance_profile
)
self.__instance = instance['Instances'][0]
self.tag(self.__instance['InstanceId'], self.__recipe.ec2_tags)
self.__describe_instance()
def get_instance(self, ec2_id):
self.__describe_instance(ec2_id)
def terminate(self):
self.__awscli.ec2('terminate-instances',
'--instance-ids', self.__instance['InstanceId'])
if hasattr(self, 'security_group'):
self.wait_until_terminated()
self.__delete_security_group()
if hasattr(self, 'key_name'):
self.__delete_key_pair()
if hasattr(self, 'iam_instance_profile'):
self.__delete_iam_instance_profile()
def wait_until_running(self):
self.__awscli.ec2('wait', 'instance-running',
'--instance-ids', self.__instance['InstanceId'])
def wait_until_healthy(self):
self.__awscli.ec2('wait', 'instance-status-ok',
'--instance-ids', self.__instance['InstanceId'])
def wait_until_stopped(self):
self.__awscli.ec2('wait', 'instance-stopped',
'--instance-ids', self.__instance['InstanceId'])
def wait_until_terminated(self):
self.__awscli.ec2('wait', 'instance-terminated',
'--instance-ids', self.__instance['InstanceId'])
def wait_until_image_available(self):
self.__awscli.ec2('wait', 'image-available',
'--image-ids', self.__image['ImageId'])
def stop(self):
self.__awscli.ec2('stop-instances',
'--instance-ids', self.__instance['InstanceId'])
def get_hostname(self):
if self.__instance.get('PublicDnsName'):
return self.__instance['PublicDnsName']
else:
if self.__instance.get('PublicIpAddress'):
return self.__instance['PublicIpAddress']
elif self.__instance.get('PrivateIpAddress'):
return self.__instance['PrivateIpAddress']
def get_username(self):
return self.__recipe.ssh_username
def tag(self, resource, tags):
tags = ["Key=%s,Value=%s" % (key, value) for key, value in
tags.iteritems()]
self.__awscli.ec2('create-tags',
'--resources', resource,
'--tags', tags)
def create_image(self):
if self.__recipe.imaging_behaviour == 'stop':
self.stop()
self.wait_until_stopped()
reboot = ''
elif self.__recipe.imaging_behaviour == 'reboot':
reboot = '--reboot'
self.__image = self.__awscli.ec2(
'create-image',
'--instance-id', self.__instance['InstanceId'],
'--name', self.__recipe.ami_tags.Name,
reboot)
ami_permissions = self.__recipe.ami_permissions
if ami_permissions:
self.__share_image(ami_permissions)
if not self.__image:
raise Exception('Image creation for instance %s failed.' %
self.__instance['InstanceId'])
self.tag(self.__image['ImageId'], self.__recipe.ami_tags)
return self.__image['ImageId']
def __share_image(self, account_ids):
permissions = {'Add': []}
for account_id in account_ids:
permissions['Add'].append({'UserId': str(account_id)})
self.wait_until_image_available()
self.__awscli.ec2('modify-image-attribute',
'--image-id', self.__image['ImageId'],
'--launch-permission', json.dumps(permissions))
def __describe_instance(self, instance_id=None):
if instance_id:
instance = self.__awscli.ec2('describe-instances',
'--instance-ids',
instance_id)
else:
self.wait_until_running()
instance = self.__awscli.ec2('describe-instances',
'--instance-ids',
self.__instance['InstanceId'])
self.__instance = instance['Reservations'][0]['Instances'][0]
def __get_vpc_id(self):
subnet = self.__awscli.ec2('describe-subnets',
'--subnet-ids', self.__recipe.subnet_id)
return subnet['Subnets'][0]['VpcId']
def __create_security_group(self):
vpc_id = self.__get_vpc_id()
security_group = self.__awscli.ec2(
'create-security-group',
'--group-name', self.__recipe.ec2_tags.Name,
'--description', 'Allows temporary SSH access to the box.',
'--vpc-id', vpc_id)
self.__awscli.ec2('authorize-security-group-ingress',
'--group-id', security_group['GroupId'],
'--protocol', 'tcp',
'--port', 22,
'--cidr', '0.0.0.0/0')
self.__awscli.ec2('authorize-security-group-egress',
'--group-id', security_group['GroupId'],
'--protocol', 'tcp',
'--port', '0-65535',
'--cidr', '0.0.0.0/0')
self.security_group = security_group['GroupId']
def __delete_security_group(self):
self.__awscli.ec2('delete-security-group',
'--group-id', self.security_group)
def __generate_key_pair(self):
# TODO: generate keypair if not provided
self.key_name = None
def __delete_key_pair(self):
# TODO: delete key pair if autogenerated
pass
def __create_iam_instance_profile(self, iam_roles):
iam_instance_profile = self.__awscli.iam(
'create-instance-profile',
'--instance-profile-name', 'AmiBaker')
self.iam_instance_profile = iam_instance_profile['InstanceProfile']
for role in iam_roles:
self.__awscli.iam('add-role-to-instance-profile',
'--instance-profile-name', 'AmiBaker',
'--role-name', role)
return (self.iam_instance_profile['InstanceProfileName'],
self.iam_instance_profile['Arn'])
def __delete_iam_instance_profile(self):
self.__awscli.iam('delete-instance-profile',
'--instance-profile-name', 'AmiBaker')
self.iam_instance_profile = None
| {
"content_hash": "ccac6e0442fb09fb758f9a28d27c3213",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 75,
"avg_line_length": 35.522267206477736,
"alnum_prop": 0.5235924321860042,
"repo_name": "lyricnz/amibaker",
"id": "6c228b3bed1d2ef94b70587961bdd0e16a8998c0",
"size": "8774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "amibaker/ami_ec2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33489"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import click
import ctadc
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
@click.group()
def cli():
"""CTA-DC command line interface script.
For further information, see `README.md`.
"""
@cli.group('skymodels')
def cli_skymodels():
"""Make files in `sky_model` folder.
"""
@cli_skymodels.command('xml')
def make_skymodels_xml():
"""Make sky model XML files.
"""
ctadc.sky_model.make_sky_models_xml()
@cli_skymodels.command('images')
def make_skymodels_images():
"""Make sky model images.
"""
ctadc.sky_model.make_sky_models_images()
@cli.command('observations')
def make_observations():
"""Make files in `observations` folder.
"""
ctadc.observations.make_all_obslists()
@cli.command('data')
def make_data():
"""Make files in `data` folder.
(Event lists and other files.)
"""
ctadc.data.make_all_data()
if __name__ == '__main__':
cli()
| {
"content_hash": "56cf8d691aa4f70112eba13791049e84",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 82,
"avg_line_length": 19.12727272727273,
"alnum_prop": 0.6511406844106464,
"repo_name": "gammasky/cta-dc",
"id": "9e79122eb667cdba49f5b8baf8b94d6fc6c3895a",
"size": "1074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "603813"
},
{
"name": "Python",
"bytes": "177997"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_lost_aqualish_soldier_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","aqualish_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "052a90067cc4ea6442a84046e75d09c2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 83,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.703125,
"repo_name": "anhstudios/swganh",
"id": "395483b35d84009cf44704f870d17104734c20ba",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_dressed_lost_aqualish_soldier_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""demo_uicallback.py - An application that executes Python code from a Java UI
It would be handy to have a Java class that called Python through the Javabridge,
perhaps something like::
public class PythonEnv {
public native void exec(String script);
public native String eval(String script);
};
but there are myriad difficulties - a Python stack uses Javabridge to
create a Java stack which then uses JNI to execute Python on... what stack?
The easiest strategy is to use a Python thread dedicated to executing or
evaluating scripts sent from Java, using that thread's local context to
hold variables. The Python thread communicates with Java in this example using
two SynchronousQueue objects, one to transmit messages from Java to Python
and another to go in the reverse direction. This example shows how a Java UI
can use ActionListener anonymous classes to talk through the queues to Python.
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
"""
import javabridge
import sys
import traceback
def main(args):
javabridge.activate_awt()
script = """
//--------------------------------------
//
// The anonymous callable runs on the thread
// that started Java - that's the rule with AWT.
//
// The callable returns a Java Map whose keys
// have the labels of objects like "qUp" for
// the upward queue. Python can then fetch
// whichever ones it wants and do Java stuff
// with them.
//
//--------------------------------------
new java.util.concurrent.Callable() {
call: function() {
importClass(javax.swing.SpringLayout);
importClass(javax.swing.JFrame);
importClass(javax.swing.JTextField);
importClass(javax.swing.JButton);
importClass(javax.swing.JScrollPane);
importClass(javax.swing.JTextArea);
importClass(java.util.Hashtable);
importClass(java.awt.event.ActionListener);
importClass(java.awt.event.WindowAdapter);
importClass(java.util.concurrent.SynchronousQueue);
d = new Hashtable();
frame = new JFrame("Callbacks in Java");
d.put("frame", frame);
contentPane = frame.getContentPane();
layout = new SpringLayout();
contentPane.setLayout(layout);
textField = new JTextField("'Hello, world.'", 60);
d.put("textField", textField);
contentPane.add(textField);
execButton = new JButton("Exec");
contentPane.add(execButton);
evalButton = new JButton("Eval");
contentPane.add(evalButton);
result = new JTextArea("None");
scrollPane = new JScrollPane(result)
contentPane.add(scrollPane);
d.put("result", result);
//-----------------------------------------------------
//
// The layout is:
//
// [ textField] [execButton] [evalButton]
// [ scrollPane ]
//
//-----------------------------------------------------
layout.putConstraint(SpringLayout.WEST, textField,
5, SpringLayout.WEST, contentPane);
layout.putConstraint(SpringLayout.NORTH, textField,
5, SpringLayout.NORTH, contentPane);
layout.putConstraint(SpringLayout.WEST, execButton,
5, SpringLayout.EAST, textField);
layout.putConstraint(SpringLayout.NORTH, execButton,
0, SpringLayout.NORTH, textField);
layout.putConstraint(SpringLayout.WEST, evalButton,
5, SpringLayout.EAST, execButton);
layout.putConstraint(SpringLayout.NORTH, evalButton,
0, SpringLayout.NORTH, textField);
layout.putConstraint(SpringLayout.NORTH, scrollPane,
5, SpringLayout.SOUTH, textField);
layout.putConstraint(SpringLayout.WEST, scrollPane,
0, SpringLayout.WEST, textField);
layout.putConstraint(SpringLayout.EAST, scrollPane,
0, SpringLayout.EAST, evalButton);
layout.putConstraint(SpringLayout.EAST, contentPane,
5, SpringLayout.EAST, evalButton);
layout.putConstraint(SpringLayout.SOUTH, contentPane,
20, SpringLayout.SOUTH, scrollPane);
//------------------------------------------------
//
// qUp sends messages from Java to Python
// qDown sends messages from Python to Java
//
// The communications protocol is that qUp sends
// a command. For Exec and Eval commands, qUp sends
// text and qDown must send a reply to continue.
// For the Exit command, qUp sends the command and
// Python must dispose of Java
//
//-------------------------------------------------
qUp = new SynchronousQueue();
qDown = new SynchronousQueue();
d.put("qUp", qUp);
d.put("qDown", qDown);
//-----------------------------------------------
//
// Create an action listener that binds the execButton
// action to a function that instructs Python to
// execute the contents of the text field.
//
//-----------------------------------------------
alExec = new ActionListener() {
actionPerformed: function(e) {
qUp.put("Exec");
qUp.put(textField.getText());
result.setText(qDown.take());
}
};
execButton.addActionListener(alExec);
//-----------------------------------------------
//
// Create an action listener that binds the evalButton
// action to a function that instructs Python to
// evaluate the contents of the text field.
//
//-----------------------------------------------
alEval = new ActionListener() {
actionPerformed: function(e) {
qUp.put("Eval");
qUp.put(textField.getText());
result.setText(qDown.take());
}
};
evalButton.addActionListener(alEval);
//-----------------------------------------------
//
// Create a window listener that binds the frame's
// windowClosing action to a function that instructs
// Python to exit.
//
//-----------------------------------------------
wl = new WindowAdapter() {
windowClosing: function(e) {
qUp.put("Exit");
}
};
frame.addWindowListener(wl);
frame.pack();
frame.setVisible(true);
return d;
}
};"""
c = javabridge.run_script(script);
f = javabridge.make_future_task(c)
d = javabridge.execute_future_in_main_thread(f);
d = javabridge.get_map_wrapper(d)
qUp = d["qUp"]
qDown = d["qDown"]
frame = d["frame"]
while True:
cmd = javabridge.run_script("qUp.take();", dict(qUp=qUp))
if cmd == "Exit":
break
text = javabridge.run_script("qUp.take();", dict(qUp=qUp))
if cmd == "Eval":
try:
result = eval(text, globals(), locals())
except Exception as e:
result = "%s\n%s" % (str(e), traceback.format_exc())
except:
result = "What happened?"
else:
try:
exec(text, globals(), locals())
result = "Operation succeeded"
except Exception as e:
result = "%s\n%s" % (str(e), traceback.format_exc())
except:
result = "What happened?"
javabridge.run_script("qDown.put(result);",
dict(qDown=qDown, result = str(result)))
javabridge.run_script("frame.dispose();", dict(frame=frame))
if __name__=="__main__":
javabridge.start_vm()
if sys.platform == 'darwin':
#
# For Mac, we need to start an event loop
# on the main thread and run the UI code
# on a worker thread.
#
import threading
javabridge.mac_run_loop_init()
class Runme(threading.Thread):
def run(self):
javabridge.attach()
try:
main(sys.argv)
finally:
javabridge.detach()
t = Runme()
t.start()
javabridge.mac_enter_run_loop()
else:
#
# For everyone else, the event loop
# is run by Java and we do everything
# on the main thread.
#
main(sys.argv)
javabridge.deactivate_awt()
javabridge.kill_vm()
| {
"content_hash": "2d7a5a92e74ec64ef169bc8114413330",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 81,
"avg_line_length": 38.676,
"alnum_prop": 0.49870720860481954,
"repo_name": "CellProfiler/python-javabridge",
"id": "929c88e74bdc298296a83d76ffb0f29241fce4df",
"size": "9692",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demo/demo_uicallback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27337"
},
{
"name": "Java",
"bytes": "15113"
},
{
"name": "Python",
"bytes": "1850110"
},
{
"name": "Shell",
"bytes": "1213"
}
],
"symlink_target": ""
} |
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2011, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.11.dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, urllib, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
try: from json import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3,0,0)
py25 = py < (2,6,0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, parse_qsl, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from cgi import parse_qsl
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from urlparse import parse_qsl
from collections import MutableMapping as DictMixin
json_loads = json_lds
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
NCTextIOWrapper = None
if (3,0,0) < py < (3,2,0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
#TODO: This should subclass BaseRequest
class HTTPResponse(BottleException):
""" Used to break execution and immediately finish the response """
def __init__(self, output='', status=200, header=None):
super(BottleException, self).__init__("HTTP Response %d" % status)
self.status = int(status)
self.output = output
self.headers = HeaderDict(header) if header else None
def apply(self, response):
if self.headers:
for key, value in self.headers.allitems():
response.headers[key] = value
response.status = self.status
class HTTPError(HTTPResponse):
""" Used to generate an error page """
def __init__(self, code=500, output='Unknown Error', exception=None,
traceback=None, header=None):
super(HTTPError, self).__init__(output, code, header)
self.exception = exception
self.traceback = traceback
def __repr__(self):
return tonat(template(ERROR_PAGE_TEMPLATE, e=self))
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router """
class RouteBuildError(RouteError):
""" The route could not been built """
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: Sorry for the mess. It works. Trust me.
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def __init__(self, strict=False):
self.rules = {} # A {rule: Rule} mapping
self.builder = {} # A rule/name->build_info mapping
self.static = {} # Cache for static routes: {path: {method: target}}
self.dynamic = [] # Cache for dynamic routes. See _compile()
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {'re': self.re_filter, 'int': self.int_filter,
'float': self.float_filter, 'path': self.path_filter}
def re_filter(self, conf):
return conf or self.default_pattern, None, None
def int_filter(self, conf):
return r'-?\d+', int, lambda x: str(int(x))
def float_filter(self, conf):
return r'-?[\d.]+', float, lambda x: str(float(x))
def path_filter(self, conf):
return r'.+?', None, None
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
def parse_rule(self, rule):
''' Parses a rule into a (name, filter, conf) token stream. If mode is
None, name contains a static rule part. '''
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix: yield prefix, None, None
name, filtr, conf = g[1:4] if not g[2] is None else g[4:7]
if not filtr: filtr = self.default_filter
yield name, filtr, conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new route or replace the target for an existing route. '''
if rule in self.rules:
self.rules[rule][method] = target
if name: self.builder[name] = self.builder[rule]
return
target = self.rules[rule] = {method: target}
# Build pattern and other structures for dynamic routes
anons = 0 # Number of anonymous wildcards
pattern = '' # Regular expression pattern
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self.parse_rule(rule):
if mode:
is_static = False
mask, in_filter, out_filter = self.filters[mode](conf)
if key:
pattern += '(?P<%s>%s)' % (key, mask)
else:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons; anons += 1
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static[self.build(rule)] = target
return
def fpat_sub(m):
return m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:'
flat_pattern = re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))', fpat_sub, pattern)
try:
re_match = re.compile('^(%s)$' % pattern).match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
def match(path):
""" Return an url-argument dictionary. """
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
try:
combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern)
self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1])
self.dynamic[-1][1].append((match, target))
except (AssertionError, IndexError): # AssertionError: Too many groups
self.dynamic.append((re.compile('(^%s$)' % flat_pattern),
[(match, target)]))
return match
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
path, targets, urlargs = environ['PATH_INFO'] or '/', None, {}
if path in self.static:
targets = self.static[path]
else:
for combined, rules in self.dynamic:
match = combined.match(path)
if not match: continue
getargs, targets = rules[match.lastindex - 1]
urlargs = getargs(path) if getargs else {}
break
if not targets:
raise HTTPError(404, "Not found: " + repr(environ['PATH_INFO']))
method = environ['REQUEST_METHOD'].upper()
if method in targets:
return targets[method], urlargs
if method == 'HEAD' and 'GET' in targets:
return targets['GET'], urlargs
if 'ANY' in targets:
return targets['ANY'], urlargs
allowed = [verb for verb in targets if verb != 'ANY']
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
header=[('Allow',",".join(allowed))])
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict(config)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.")
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.')
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def __repr__(self):
return '<%s %r %r>' % (self.method, self.rule, self.callback)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
self.catchall = catchall
#: A :cls:`ResourceManager` for application files
self.resources = ResourceManager()
#: A :cls:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config.autojson = autojson
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.hooks = HooksPlugin()
self.install(self.hooks)
if self.config.autojson:
self.install(JSONPlugin())
self.install(TemplatePlugin())
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
body = itertools.chain(rs.body, body)
return HTTPResponse(body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.hooks.trigger('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. Three hooks
are currently implemented:
- before_request: Executed once before each request
- after_request: Executed once after each request
- app_reset: Called whenever :meth:`reset` is called.
"""
def wrapper(func):
self.hooks.add(name, func)
return func
return wrapper
def handle(self, path, method='GET'):
""" (deprecated) Execute the first matching route callback and return
the result. :exc:`HTTPResponse` exceptions are caught and returned.
If :attr:`Bottle.catchall` is true, other exceptions are caught as
well and returned as :exc:`HTTPError` instances (500).
"""
depr("This method will change semantics in 0.10. Try to avoid it.")
if isinstance(path, dict):
return self._handle(path)
return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()})
def _handle(self, environ):
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
route, args = self.router.match(environ)
environ['route.handle'] = environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status, repr)(out)
if isinstance(out, HTTPResponse):
depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.output)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
out = iter(out)
first = next(out)
while not first:
first = next(out)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
if isinstance(first, bytes):
return itertools.chain([first], out)
if isinstance(first, unicode):
return imap(lambda x: x.encode(response.charset),
itertools.chain([first], out))
return self._cast(HTTPError(500, 'Unsupported response type: %s'\
% type(first)))
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
if isinstance(response._status_line, unicode):
response._status_line = str(response._status_line)
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
#: Maximum number pr GET or POST parameters per request
MAX_PARAMS = 100
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
cookies = list(cookies.values())[:self.MAX_PARAMS]
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
pairs = parse_qsl(self.query_string, keep_blank_values=True)
get = self.environ['bottle.get'] = FormsDict()
for key, value in pairs[:self.MAX_PARAMS]:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is retuned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not hasattr(item, 'filename'):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The values are instances of
:class:`cgi.FieldStorage`. The most important attributes are:
filename
The filename, if specified; otherwise None; this is the client
side filename, *not* the file name on which it is stored (that's
a temporary file you don't deal with)
file
The file(-like) object from which you can read the data.
value
The value as a *string*; for file uploads, this transparently
reads the file every time you request the value. Do not do this
on big files.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if hasattr(item, 'filename'):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', '') \
and 0 < self.content_length < self.MEMFILE_MAX:
return json_loads(self.body.read(self.MEMFILE_MAX))
return None
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = BytesIO() if maxread < self.MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, self.MEMFILE_MAX))
if not part: break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
body.seek(0)
return body
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
if NCTextIOWrapper:
fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n')
else:
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in (data.list or [])[:self.MAX_PARAMS]:
post[item.name] = item if item.filename else item.value
return post
@property
def COOKIES(self):
''' Alias for :attr:`cookies` (deprecated). '''
depr('BaseRequest.COOKIES was renamed to BaseRequest.cookies (lowercase).')
return self.cookies
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, **headers):
self._status_line = None
self._status_code = None
self._cookies = None
self._headers = {'Content-Type': [self.default_content_type]}
self.body = body
self.status = status or self.default_status
if headers:
for name, value in headers.items():
self[name] = value
def copy(self):
''' Returns a copy of self. '''
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = status or ('%d Unknown' % code)
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value, append=False):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
if append:
self.add_header(name, value)
else:
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
headers = self._headers.items()
bad_headers = self.bad_headers.get(self._status_code)
if bad_headers:
headers = [h for h in headers if h[0] not in bad_headers]
for name, values in headers:
for value in values:
yield name, value
if self._cookies:
for c in self._cookies.values():
yield 'Set-Cookie', c.OutputString()
def wsgiheader(self):
depr('The wsgiheader method is deprecated. See headerlist.') #0.10
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
return list(self.iter_headers())
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
@property
def charset(self):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
""" A dict-like SimpleCookie instance. This should not be used directly.
See :meth:`set_cookie`. """
depr('The COOKIES dict is deprecated. Use `set_cookie()` instead.') # 0.10
if not self._cookies:
self._cookies = SimpleCookie()
return self._cookies
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse`
#: attributes.
_lctx = threading.local()
def local_property(name):
return property(lambda self: getattr(_lctx, name),
lambda self, value: setattr(_lctx, name, value),
lambda self: delattr(_lctx, name),
'Thread-local property stored in :data:`_lctx.%s`' % name)
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property('request_environ')
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attribues for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property('response_status_line')
_status_code = local_property('response_status_code')
_cookies = local_property('response_cookies')
_headers = local_property('response_headers')
body = local_property('response_body')
Response = LocalResponse # BC 0.9
Request = LocalRequest # BC 0.9
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return rv
return wrapper
class HooksPlugin(object):
name = 'hooks'
api = 2
_names = 'before_request', 'after_request', 'app_reset'
def __init__(self):
self.hooks = dict((name, []) for name in self._names)
self.app = None
def _empty(self):
return not (self.hooks['before_request'] or self.hooks['after_request'])
def setup(self, app):
self.app = app
def add(self, name, func):
''' Attach a callback to a hook. '''
was_empty = self._empty()
self.hooks.setdefault(name, []).append(func)
if self.app and was_empty and not self._empty(): self.app.reset()
def remove(self, name, func):
''' Remove a callback from a hook. '''
was_empty = self._empty()
if name in self.hooks and func in self.hooks[name]:
self.hooks[name].remove(func)
if self.app and not was_empty and self._empty(): self.app.reset()
def trigger(self, name, *a, **ka):
''' Trigger a hook and return a list of results. '''
hooks = self.hooks[name]
if ka.pop('reversed', False): hooks = hooks[::-1]
return [hook(*a, **ka) for hook in hooks]
def apply(self, callback, context):
if self._empty(): return callback
def wrapper(*a, **ka):
self.trigger('before_request')
rv = callback(*a, **ka)
self.trigger('after_request', reversed=True)
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str) and 'template_opts' in route.config:
depr('The `template_opts` parameter is deprecated.') #0.9
return view(conf, **route.config['template_opts'])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname, modname = fullname.rsplit('.', 1)
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
packname, modname = fullname.rsplit('.', 1)
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
s = s.encode('latin1')
if isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a 'HTTP_' prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-subclass with some extras: You can access keys like attributes.
Uppercase attributes create new ConfigDicts and act as name-spaces.
Other missing attributes return None. Calling a ConfigDict updates its
values and returns itself.
>>> cfg = ConfigDict()
>>> cfg.Namespace.value = 5
>>> cfg.OtherNamespace(a=1, b=2)
>>> cfg
{'Namespace': {'value': 5}, 'OtherNamespace': {'a': 1, 'b': 2}}
'''
def __getattr__(self, key):
if key not in self and key[0].isupper():
self[key] = ConfigDict()
return self.get(key)
def __setattr__(self, key, value):
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], ConfigDict):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self: del self[key]
def __call__(self, *a, **ka):
for key, value in dict(*a, **ka).items(): setattr(self, key, value)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
aplication-bound resources (files).
:param base: default value for same-named :meth:`add_path` parameter.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. `res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if it does
not exist.
:param path: The new search path. Relative paths are turned into an
absolute and normalized form. If the path looks like a file (not
ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to `:attr:base` which defaults to ``./``.
:param index: Position within the list of search paths. Defaults to
last index (appends to the list).
:param create: Create non-existent search paths. Off by default.
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.mkdirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(name, mode=mode, *args, **kwargs)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error: Application stopped.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if code is None:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
location = urljoin(request.url, url)
raise HTTPResponse("", status=code, header=dict(Location=location))
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 401 or 404. Set Content-Type, Content-Encoding,
Content-Length and Last-Modified header. Obey If-Modified-Since header
and HEAD requests.
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
header = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if mimetype: header['Content-Type'] = mimetype
if encoding: header['Content-Encoding'] = encoding
elif mimetype:
header['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
header['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
header['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
header['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, header=header)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
header["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
header["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
header["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, header=header, status=206)
return HTTPResponse(body, header=header)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
DEBUG = bool(mode)
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n','%#10;')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/:x/:y'
c(x, y=5) -> '/c/:x' and '/c/:x/:y'
d(x=5, y=6) -> '/d' and '/d/:x' and '/d/:x/:y'
"""
import inspect # Expensive module. Only import if necessary.
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = inspect.getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/:%s' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/:%s' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def validate(**vkargs):
"""
Validates and manipulates keyword arguments by user defined callables.
Handles ValueError and missing arguments by raising HTTPError(403).
"""
depr('Use route wildcard filters instead.')
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kargs):
for key, value in vkargs.items():
if key not in kargs:
abort(403, 'Missing parameter: %s' % key)
try:
kargs[key] = value(kargs[key])
except ValueError:
abort(403, 'Wrong parameter format for: %s' % key)
return func(*args, **kargs)
return wrapper
return decorator
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
response.headers['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return HTTPError(401, text)
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **config):
self.options = config
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer((self.host, self.port), handler)
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
if not self.quiet:
from paste.translogger import TransLogger
handler = TransLogger(handler)
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `monkey` (default: True) fixes the stdlib to use greenthreads.
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
"""
def run(self, handler):
from gevent import wsgi as wsgi_fast, pywsgi, monkey, local
if self.options.get('monkey', True):
if not threading.local is local.local: monkey.patch_all()
wsgi = wsgi_fast if self.options.get('fast') else pywsgi
log = None if self.quiet else 'default'
wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=False, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
_debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (*args)
or directly, as keywords (**kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTALTemplate(BaseTemplate):
''' Deprecated, do not use. '''
def prepare(self, **options):
depr('The SimpleTAL template handler is deprecated'\
' and will be removed in 0.12')
from simpletal import simpleTAL
if self.source:
self.tpl = simpleTAL.compileHTMLTemplate(self.source)
else:
with open(self.filename, 'rb') as fp:
self.tpl = simpleTAL.compileHTMLTemplate(tonat(fp.read()))
def render(self, *args, **kwargs):
from simpletal import simpleTALES
for dictarg in args: kwargs.update(dictarg)
context = simpleTALES.Context()
for k,v in self.defaults.items():
context.addGlobal(k, v)
for k,v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
return output.getvalue()
class SimpleTemplate(BaseTemplate):
blocks = ('if', 'elif', 'else', 'try', 'except', 'finally', 'for', 'while',
'with', 'def', 'class')
dedent_blocks = ('elif', 'else', 'except', 'finally')
@lazy_attribute
def re_pytokens(cls):
''' This matches comments and all kinds of quoted strings but does
NOT match comments (#...) within quoted strings. (trust me) '''
return re.compile(r'''
(''(?!')|""(?!")|'{6}|"{6} # Empty strings (all 4 types)
|'(?:[^\\']|\\.)+?' # Single quotes (')
|"(?:[^\\"]|\\.)+?" # Double quotes (")
|'{3}(?:[^\\]|\\.|\n)+?'{3} # Triple-quoted strings (')
|"{3}(?:[^\\]|\\.|\n)+?"{3} # Triple-quoted strings (")
|\#.* # Comments
)''', re.VERBOSE)
def prepare(self, escape_func=html_escape, noescape=False, **kwargs):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
if noescape:
self._str, self._escape = self._escape, self._str
@classmethod
def split_comment(cls, code):
""" Removes comments (#...) from python code. """
if '#' not in code: return code
#: Remove comments only (leave quoted strings as they are)
subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)
return re.sub(cls.re_pytokens, subf, code)
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
stack = [] # Current Code indentation
lineno = 0 # Current line of code
ptrbuffer = [] # Buffer for printable strings and token tuple instances
codebuffer = [] # Buffer for generated python code
multiline = dedent = oneline = False
template = self.source or open(self.filename, 'rb').read()
def yield_tokens(line):
for i, part in enumerate(re.split(r'\{\{(.*?)\}\}', line)):
if i % 2:
if part.startswith('!'): yield 'RAW', part[1:]
else: yield 'CMD', part
else: yield 'TXT', part
def flush(): # Flush the ptrbuffer
if not ptrbuffer: return
cline = ''
for line in ptrbuffer:
for token, value in line:
if token == 'TXT': cline += repr(value)
elif token == 'RAW': cline += '_str(%s)' % value
elif token == 'CMD': cline += '_escape(%s)' % value
cline += ', '
cline = cline[:-2] + '\\\n'
cline = cline[:-2]
if cline[:-1].endswith('\\\\\\\\\\n'):
cline = cline[:-7] + cline[-1] # 'nobr\\\\\n' --> 'nobr'
cline = '_printlist([' + cline + '])'
del ptrbuffer[:] # Do this before calling code() again
code(cline)
def code(stmt):
for line in stmt.splitlines():
codebuffer.append(' ' * len(stack) + line.strip())
for line in template.splitlines(True):
lineno += 1
line = touni(line, self.encoding)
sline = line.lstrip()
if lineno <= 2:
m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline)
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if sline and sline[0] == '%' and sline[:2] != '%%':
line = line.split('%',1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
if cmd in self.blocks or multiline:
cmd = multiline or cmd
dedent = cmd in self.dedent_blocks # "else:"
if dedent and not oneline and not multiline:
cmd = stack.pop()
code(line)
oneline = not cline.endswith(':') # "if 1: pass"
multiline = cmd if cline.endswith('\\') else False
if not oneline and not multiline:
stack.append(cmd)
elif cmd == 'end' and stack:
code('#end(%s) %s' % (stack.pop(), line.strip()[3:]))
elif cmd == 'include':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("_=_include(%s, _stdout, %s)" % (repr(p[0]), p[1]))
elif p:
code("_=_include(%s, _stdout)" % repr(p[0]))
else: # Empty %include -> reverse of %rebase
code("_printlist(_base)")
elif cmd == 'rebase':
p = cline.split(None, 2)[1:]
if len(p) == 2:
code("globals()['_rebase']=(%s, dict(%s))" % (repr(p[0]), p[1]))
elif p:
code("globals()['_rebase']=(%s, {})" % repr(p[0]))
else:
code(line)
else: # Line starting with text (not '%') or '%%' (escaped)
if line.strip().startswith('%%'):
line = line.replace('%%', '%', 1)
ptrbuffer.append(yield_tokens(line))
flush()
return '\n'.join(codebuffer) + '\n'
def subtemplate(self, _name, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(_stdout, kwargs)
def execute(self, _stdout, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
env = self.defaults.copy()
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'_include': self.subtemplate, '_str': self._str,
'_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__})
env.update(kwargs)
eval(self.co, env)
if '_rebase' in env:
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
return self.subtemplate(subtpl,_stdout,rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
for dictarg in args: kwargs.update(dictarg)
stdout = []
self.execute(stdout, kwargs)
return ''.join(stdout)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
template_adapter = kwargs.pop('template_adapter', SimpleTemplate)
if tpl not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
if isinstance(tpl, template_adapter):
TEMPLATES[tpl] = tpl
if settings: TEMPLATES[tpl].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tpl]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tpl].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
simpletal_template = functools.partial(template, template_adapter=SimpleTALTemplate)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
simpletal_view = functools.partial(view, template_adapter=SimpleTALTemplate)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
%%status_name = HTTP_CODES.get(e.status, 'Unknown').title()
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error {{e.status}}: {{status_name}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error {{e.status}}: {{status_name}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.output}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host:
host, port = host.rsplit(':', 1)
run(args[0], host=host, port=port, server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| {
"content_hash": "825a1dbddc2b5d6f42ac2ebf31cb1680",
"timestamp": "",
"source": "github",
"line_count": 3206,
"max_line_length": 103,
"avg_line_length": 39.71085464753587,
"alnum_prop": 0.5820850973584787,
"repo_name": "thejeshgn/pyg2fa",
"id": "fbc19a22d8ac2edf48eb12bd5812b2a33e89461a",
"size": "127359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/bottle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "134176"
}
],
"symlink_target": ""
} |
"""
A demo of using lxml's xpath to parse html document
"""
from lxml import etree
import requests
grab_pattern_dict = {
'img': {'//*[@id="J-detail-content"]/div//div/img': 'data-lazyload'},
'price': {'//*[@id="jd-price"]': 'text'},
'product': {'//*[@id="name"]/h1': 'text'},
# 'parameter': {'//*[@id="product-detail-2"]/table': 'text'}
}
render_engine_api = 'http://10.19.8.61:32809'
send_data = {"url": "http://item.jd.com/1287950.html"}
r = requests.post(render_engine_api, data=send_data)
html_data = r.text
#要存到elasticsearch中的document
saved_doc = dict()
tree = etree.HTML(html_data)
for (key, value) in grab_pattern_dict.items():
xpath_selector = value.keys()[0]#取出键中存放的xpath查询路径
res = tree.xpath(xpath_selector)
temp_list = []
for item in res:
attr = value.values()[0]
if attr == 'text':
parsed_value = item.text
else:
parsed_value = item.attrib[attr]
temp_list.append(parsed_value)
saved_doc[key] = temp_list
print saved_doc
| {
"content_hash": "9d7a3f1247d81a600acd6ca1f35cf93d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 73,
"avg_line_length": 27.13157894736842,
"alnum_prop": 0.6071774975751697,
"repo_name": "tiny-cell/tinycell",
"id": "f76637b603d44246385e94a00f35df439151249f",
"size": "1109",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/lxmltest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "714"
},
{
"name": "Python",
"bytes": "11864"
},
{
"name": "Shell",
"bytes": "2001"
}
],
"symlink_target": ""
} |
"""Deletes uploaded photos (debug tool)."""
import argparse
from google.cloud import datastore
import common.service_account as sa
from common.chunks import chunks
# This is not a valid project name (for safety)
DEFAULT_PROJECT = 'eclipse-2017-test'
def get_arguments():
parser = argparse.ArgumentParser(description='Delete uploaded photos.')
parser.add_argument('--project_id', type=str, default=DEFAULT_PROJECT)
parser.add_argument('--upload_session_id', type=str, default=None)
parser.add_argument('--user_id', type=str, default=None)
return parser.parse_args()
def main():
args = get_arguments()
client = datastore.Client(project=args.project_id)
query = client.query(kind="Photo")
query.keys_only()
if args.upload_session_id != None:
query.add_filter('upload_session_id', '=', args.upload_session_id)
if args.user_id != None:
print client.key(u'User', unicode(args.user_id))
query.add_filter('user', '=', client.key('User', args.user_id))
entities = list(query.fetch())
entity_chunks = chunks(entities, 500)
for entity_chunk in entity_chunks:
print "creating batch"
batch = client.batch()
batch.begin()
for entity in entity_chunk:
batch.delete(entity.key)
batch.commit()
print "batch committed"
if __name__ == '__main__':
main()
| {
"content_hash": "b8c217e01a1f6c5fe73c8882c9bd682b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 30.866666666666667,
"alnum_prop": 0.6573074154067674,
"repo_name": "google/eclipse2017",
"id": "68d73bd010fe28927619b06f86ff189b129bf85f",
"size": "1967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/delete_photos.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "174182"
},
{
"name": "JavaScript",
"bytes": "72747"
},
{
"name": "Python",
"bytes": "665417"
},
{
"name": "Shell",
"bytes": "47103"
}
],
"symlink_target": ""
} |
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
User uploads apllication in the form of JSON data
"""
import requests
from endpoint_proxy import get_baseurl
def test_upload_json_data():
"""
User uploads JSON application in json payload
"""
upload_base_url = get_baseurl("upload-service")
payload = {
"case_id": "123A",
"name": "William",
"employer_name": "Quantiphi",
"employer_phone_no": "9282112222",
"context": "Callifornia",
"dob": "7 Feb 1997",
"document_type": "application_form",
"document_class": "unemployment_form",
"ssn": "1234567",
"phone_no": "9730388333",
"application_apply_date": "2022/03/16",
"mailing_address": "Arizona USA",
"mailing_city": "Phoniex",
"mailing_zip": "123-33-22",
"residential_address": "Phoniex , USA",
"work_end_date": "2022/03",
"sex": "Female"
}
response_app = requests.post(
f"{upload_base_url}/upload_service/v1/upload_json",
json = payload)
response_app.status_code = 200
| {
"content_hash": "af56adb3a91652e972cd244d34fa915a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 28.88679245283019,
"alnum_prop": 0.6838667537557153,
"repo_name": "GoogleCloudPlatform/document-intake-accelerator",
"id": "b0b2cd4e9f82bd11cc210b7fbcd8f6e643ac7ca5",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "e2e/api_tests/vuuj17_upload_json_data_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20025"
},
{
"name": "Dockerfile",
"bytes": "4235"
},
{
"name": "HCL",
"bytes": "58065"
},
{
"name": "HTML",
"bytes": "3546"
},
{
"name": "JavaScript",
"bytes": "171699"
},
{
"name": "Python",
"bytes": "518924"
},
{
"name": "Shell",
"bytes": "20892"
}
],
"symlink_target": ""
} |
"""Classes and functions for configuring iWorkflow"""
from f5.iworkflow.cm.cloud.connectors.local import Locals
from f5.iworkflow.resource import OrganizingCollection
class Connectors(OrganizingCollection):
def __init__(self, cloud):
super(Connectors, self).__init__(cloud)
self._meta_data['allowed_lazy_attributes'] = [
Locals
]
| {
"content_hash": "4347e2d10eafe3eaed1f148a0df4de7c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.6916890080428955,
"repo_name": "F5Networks/f5-common-python",
"id": "6c11ff564bf093d5d346a5f1454174fb3d421809",
"size": "971",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "f5/iworkflow/cm/cloud/connectors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
} |
'''
Just a stub for locating Maya plugins.
Normally this file would be the __init__.py of the plugins package, but Maya
would then pick it up as a plugin too.
'''
import os
def get_envvars(realpath=False):
mayatools_plugins = os.path.abspath(os.path.join(__file__, '..', 'plugins'))
if realpath:
mayatools_plugins = os.path.realpath(rmantools)
return {
'MAYA_PLUG_IN_PATH': [mayatools_plugins],
# 'XBMLANGPATH': os.path.join(mayatools_plugins, 'icons'),
}
| {
"content_hash": "2c4ab6d9a28d94d712752717fcf4c49e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 22.130434782608695,
"alnum_prop": 0.6463654223968566,
"repo_name": "westernx/mayatools",
"id": "b44b7dfe33d3f63e9557780679ad3ddc8f9cb3b5",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mayatools/plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23494"
},
{
"name": "Makefile",
"bytes": "2255"
},
{
"name": "Python",
"bytes": "233847"
}
],
"symlink_target": ""
} |
import urllib
from tornado import gen, httpclient as hc
from graphite_beacon.handlers import AbstractHandler, LOGGER
class HttpHandler(AbstractHandler):
name = 'http'
# Default options
defaults = {
'params': {},
'method': 'GET',
}
def init_handler(self):
self.url = self.options.get('url')
assert self.url, 'URL is not defined'
self.params = self.options['params']
self.method = self.options['method']
self.client = hc.AsyncHTTPClient()
@gen.coroutine
def notify(self, level, alert, value, target=None, ntype=None, rule=None):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_short(level, alert, value, target=target, ntype=ntype, rule=rule)
data = {'alert': alert.name, 'desc': message, 'level': level}
if target:
data['target'] = target
if rule:
data['rule'] = rule['raw']
if alert.source == 'graphite':
data['graph_url'] = alert.get_graph_url(target)
data['value'] = value
data.update(self.params)
body = urllib.urlencode(data)
yield self.client.fetch(self.url, method=self.method, body=body)
| {
"content_hash": "05c0f203e9bb1fe81fd6d511915a175d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 92,
"avg_line_length": 29.30952380952381,
"alnum_prop": 0.5987002437043054,
"repo_name": "YuriyIlyin/graphite-beacon",
"id": "97a857769f93426d125c3b4d241e8166386d7c09",
"size": "1231",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "graphite_beacon/handlers/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16633"
},
{
"name": "Makefile",
"bytes": "3801"
},
{
"name": "Python",
"bytes": "58584"
},
{
"name": "Ruby",
"bytes": "236"
}
],
"symlink_target": ""
} |
"""Creates hierarchy related DAG files and tables."""
# TODO: Make file fully lintable, and remove all pylint disabled flags.
import yaml
import sys
import logging
import os
import datetime
from dag_hierarchies_module import generate_hier
from generate_query import check_create_hiertable, generate_hier_dag_files
from generate_query import copy_to_storage
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not sys.argv[1]:
raise SystemExit("No Source Project provided")
source_project = sys.argv[1]
if not sys.argv[2]:
raise SystemExit("No Source Dataset provided")
source_dataset = sys.argv[1] + "." + sys.argv[2]
if not sys.argv[3]:
raise SystemExit("No Target Project provided")
target_project = sys.argv[3]
if not sys.argv[4]:
raise SystemExit("No Target Project/Dataset provided")
target_dataset = sys.argv[3] + "." + sys.argv[4]
if not sys.argv[5]:
raise SystemExit("No GCS bucket provided")
gcs_bucket = sys.argv[5]
os.makedirs("../generated_dag", exist_ok=True)
os.makedirs("../generated_sql", exist_ok=True)
client = bigquery.Client()
# Process hierarchies
with open("../sets.yaml", encoding="utf-8") as f:
datasets = yaml.load(f, Loader=yaml.SafeLoader)
for dataset in datasets["sets_data"]:
logging.info(f"== Processing dataset {dataset['setname']} ==")
nodes = []
full_table = "{tgtd}.{tab}_hier".format(
tgtd=target_dataset, tab=dataset["table"]
)
query = """SELECT 1
FROM `{src_dataset}.setnode`
WHERE setname = \'{setname}\'
AND setclass = \'{setclass}\'
AND subclass = \'{org_unit}\'
AND mandt = \'{mandt}\'
LIMIT 1 """.format(
src_dataset=source_dataset,
setname=dataset["setname"],
mandt=dataset["mandt"],
setclass=dataset["setclass"],
org_unit=dataset["orgunit"],
)
query_job = client.query(query)
print(query_job)
if not query_job:
logging.info(f"Dataset {dataset['setname']} not found in SETNODES")
continue
# Check if table exists, create if not and populate with full initial
# load.
try:
check_create_hiertable(full_table, dataset["key_field"])
logging.info(f"Generating dag for {full_table}")
today = datetime.datetime.now()
substitutes = {
"setname": dataset["setname"],
"full_table": full_table,
"year": today.year,
"month": today.month,
"day": today.day,
"src_project": source_project,
"src_dataset": source_dataset,
"setclass": dataset["setclass"],
"orgunit": dataset["orgunit"],
"mandt": dataset["mandt"],
"table": dataset["table"],
"select_key": dataset["key_field"],
"where_clause": dataset["where_clause"],
"load_frequency": dataset["load_frequency"],
}
dag_file_name = "cdc_" + full_table.replace(".", "_") + ".py"
generate_hier_dag_files(dag_file_name, **substitutes)
generate_hier(**substitutes)
except NotFound as e:
# logging, but keep going
logging.error(f"Table {full_table} not found")
# Copy template python processor used by all into specific directory
copy_to_storage(
gcs_bucket, "dags/hierarchies", "./", "dag_hierarchies_module.py"
)
| {
"content_hash": "1287ca4a51c8fd06ac2a9face1582fde",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 31.879310344827587,
"alnum_prop": 0.5903190914007571,
"repo_name": "GoogleCloudPlatform/cortex-dag-generator",
"id": "6dbdd3cfd263a802d3a8454379bccd5a25220646",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/hier_reader.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60112"
},
{
"name": "Shell",
"bytes": "7610"
}
],
"symlink_target": ""
} |
import json
import sure # noqa # pylint: disable=unused-import
import moto.server as server
"""
Test the different server responses
"""
def test_list_virtual_clusters():
backend = server.create_backend_app("emr-containers")
test_client = backend.test_client()
res = test_client.get("/virtualclusters")
json.loads(res.data).should.have.key("virtualClusters")
| {
"content_hash": "49e7f4eb77fe84092f14372a0339b887",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 59,
"avg_line_length": 23.75,
"alnum_prop": 0.7210526315789474,
"repo_name": "spulec/moto",
"id": "2e05a2d6cd7474bbde8064a3ebe7d557c58eb355",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_emrcontainers/test_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "5983"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "1424"
},
{
"name": "Jinja",
"bytes": "2502"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Python",
"bytes": "14737868"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "5515"
}
],
"symlink_target": ""
} |
from spatial_model_utilities import find_closest_grid_point, great_circle_distance
import pytz
from datetime import datetime, timedelta
import string
def readline_skip_comments(f):
"""
Read a new line while skipping comments.
"""
l = f.readline().strip()
while len(l) > 0 and l[0] == '#':
l = f.readline().strip()
return l
class Observation:
"""
An observation of a field value at a certain time.
"""
def __init__(self, s, tm, obs, var, field_name):
"""
Constructs an observation packet from the info dictionary.
"""
self.s = s
self.tm = tm
self.obs_val = obs
self.field_name = field_name
self.obs_var = var
self.nearest_grid_pt_override = None
def get_time(self):
"""
Return time of observation.
"""
return self.tm
def get_value(self):
"""
Return the observation value.
"""
return self.obs_val
def get_measurement_variance(self):
"""
Return the variance of the measurement of the given field.
"""
return self.obs_var
def get_position(self):
"""
Longitude and lattitude of the originating station (shortcut).
"""
return self.s.get_position()
def get_nearest_grid_point(self):
"""
Return the indices that identify the nearest grid point.
"""
if self.nearest_grid_pt_override is None:
return self.s.get_nearest_grid_point()
else:
return self.nearest_grid_pt_override
def set_nearest_grid_point(self, ngp):
"""
Override the nearest grid point - used for TSM testing.
"""
self.nearest_grid_pt_override = ngp
def get_station(self):
"""
Return the station from which this observation originates
"""
return self.s
class Station:
"""
An observation station which stores and yields observations.
All times must be in GMT.
"""
def __init__(self):
"""
Load a station from data file.
"""
# array of observation times and dictionary of obs variables
self.tm = []
self.obs_vars = {}
# no co-registration by default
self.grid_pt = None
self.dist_grid_pt = None
def register_to_grid(self, wrf_data):
"""
Find the nearest grid point to the current location.
"""
# only co-register to grid if required
mlon, mlat = wrf_data.get_lons(), wrf_data.get_lats()
self.grid_pt = find_closest_grid_point(self.lon, self.lat, mlon, mlat)
self.dist_grid_pt = great_circle_distance(self.lon, self.lat,
mlon[self.grid_pt], mlat[self.grid_pt])
def get_id(self):
"""
Returns the id of the station.
"""
return self.id
def get_name(self):
"""
Returns the name of the station.
"""
return self.name
def get_position(self):
"""
Get geographical position of the observation station as a (lon, lat) tuple.
"""
return self.lon, self.lat
def get_nearest_grid_point(self):
"""
Returns the indices of the nearest grid point.
"""
return self.grid_pt
def get_dist_to_grid(self):
"""
Returns the distance in kilometers to the nearest grid point.
"""
return self.dist_grid_pt
def get_obs_times(self):
"""
Return observatuion times.
"""
return self.tm
def get_elevation(self):
"""
Return the elevation in meters above sea level.
"""
return self.elevation
def get_observations(self, obs_type):
"""
Returns a list of Observations for given observation type (var name).
"""
return self.obs[obs_type] if obs_type in self.obs else []
class MesoWestStation(Station):
"""
An observation station with data downloaded from the MesoWest website in xls format.
"""
def __init__(self, name):
"""
Initialize the station using an info_string that is written by the scrape_stations.py
script into the 'station_infos' file.
"""
# parse the info_string
self.name = name
Station.__init__(self)
def load_station_info(self, station_info):
"""
Load station information from an .info file. """
with open(station_info, "r") as f:
# read station id
self.id = readline_skip_comments(f)
# read station name
self.name = readline_skip_comments(f)
# read station geo location
loc_str = readline_skip_comments(f).split(",")
self.lat, self.lon = float(loc_str[0]), float(loc_str[1])
# read elevation
self.elevation = float(readline_skip_comments(f))
# read sensor types
self.sensors = map(lambda x: x.strip(), readline_skip_comments(f).split(","))
# create empty lists for observations
self.obs = {}
for s in self.sensors:
self.obs[s] = []
def load_station_data(self, station_file):
"""
Load all available fuel moisture data from the station measurement file
in an obs file.
"""
gmt_tz = pytz.timezone('GMT')
with open(station_file, "r") as f:
while True:
# read in the date or exit if another packet is not found
tm_str = readline_skip_comments(f)
if len(tm_str) == 0:
break
tstamp = gmt_tz.localize(datetime.strptime(tm_str, '%Y-%m-%d_%H:%M:%S %Z'))
# read in the variable names
var_str = map(string.strip, readline_skip_comments(f).split(","))
# read in observations
vals = map(lambda x: float(x), readline_skip_comments(f).split(","))
# read in variances
variances = map(lambda x: float(x), readline_skip_comments(f).split(","))
# construct observations
for vn,val,var in zip(var_str, vals, variances):
self.obs[vn].append(Observation(self, tstamp, val, var, vn))
if __name__ == '__main__':
o = MesoWestStation('../real_data/colorado_stations/BAWC2.xls', 'BAWC2,39.3794,-105.3383,2432.9136')
print(o.get_observations('fm10'))
| {
"content_hash": "8464a6ba69bd74373eb9f9acb86e5d23",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 121,
"avg_line_length": 26.069767441860463,
"alnum_prop": 0.5414808206958073,
"repo_name": "vejmelkam/fmda",
"id": "3782634a02b60eacf223e6da7d02b7b5c7c9e658",
"size": "6727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/observation_stations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "180"
},
{
"name": "Python",
"bytes": "75559"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.db import IntegrityError
from datetime import datetime
from myproject.core.models import Person, Occupation, Address, Phone, Brand, Category, Product
class PersonModelTest(TestCase):
def setUp(self):
self.occupation = Occupation.objects.create(
occupation='Web developer',
)
self.person = Person(
occupation=self.occupation,
gender='M',
treatment='sr',
first_name='Regis',
last_name='da Silva',
cpf='11122233396',
birthday='1979-05-31T00:00:00+00:00',
email='r.santos@example.com',
blocked=False,
)
def test_create(self):
"""
Person must have gender, first_name, last_name, cpf, birthday, email,
phone, occupation, blocked
"""
self.person.save()
self.assertEqual(1, self.person.pk)
def test_has_created_at(self):
'Person must have automatic created_at.'
self.person.save()
self.assertIsInstance(self.person.created_at, datetime)
# def test_str(self):
# pass
class PersonUniqueTest(TestCase):
def setUp(self):
# Create a first entry to force the collision.
self.occupation = Occupation.objects.create(
occupation='Web developer',
)
self.person = Person.objects.create(
occupation=self.occupation,
gender='M',
treatment='sr',
first_name='Regis',
last_name='da Silva',
cpf='11122233396',
birthday='1979-05-31T00:00:00+00:00',
email='r.santos@example.com',
blocked=False,
)
def test_cpf_unique(self):
'CPF must be unique'
p = Person(
occupation=self.occupation,
gender='M',
treatment='sr',
first_name='Regis',
last_name='da Silva',
cpf='11122233396',
birthday='1979-05-31T00:00:00+00:00',
email='r.santos@example.com',
blocked=False,
)
self.assertRaises(IntegrityError, p.save)
class OccupationModelTest(TestCase):
def setUp(self):
self.occupation = Occupation(
occupation='Web developer',
)
def test_create(self):
self.occupation.save()
self.assertEqual(1, self.occupation.pk)
class AddressModelTest(TestCase):
def setUp(self):
self.occupation = Occupation.objects.create(
occupation='Web developer',
)
self.person = Person.objects.create(
occupation=self.occupation,
gender='M',
treatment='sr',
first_name='Regis',
last_name='da Silva',
cpf='11122233396',
birthday='1979-05-31T00:00:00+00:00',
email='r.santos@example.com',
blocked=False,
)
def test_address(self):
"""
Address must have person_id, type_address, address, address_number,
complement, district, city, uf, cep
"""
address = Address.objects.create(
person=self.person,
type_address='c',
address=u'Av. Paulista',
address_number=721,
complement=u'apto 313',
district=u'Bela Vista',
city=u'São Paulo',
uf='SP',
cep='01311-100',
)
self.assertEqual(1, address.pk)
class PhoneModelTest(TestCase):
def setUp(self):
self.occupation = Occupation.objects.create(
occupation='Web developer',
)
self.person = Person.objects.create(
occupation=self.occupation,
gender='M',
treatment='sr',
first_name='Regis',
last_name='da Silva',
cpf='11122233396',
birthday='1979-05-31T00:00:00+00:00',
email='r.santos@example.com',
blocked=False,
)
def test_phone(self):
""" Phone must have person_id, phone, type_phone """
phone = Phone.objects.create(
person=self.person,
phone='(11) 1234-5678',
type_phone='pri',
)
self.assertEqual(1, phone.pk)
class BrandModelTest(TestCase):
def setUp(self):
self.brand = Brand(
brand='ambrella',
)
def test_create(self):
self.brand.save()
self.assertEqual(1, self.brand.pk)
class CategoryModelTest(TestCase):
def setUp(self):
self.category = Category(
category='alimento',
)
def test_create(self):
self.category.save()
self.assertEqual(1, self.category.pk)
class ProductModelTest(TestCase):
def setUp(self):
self.brand = Brand.objects.create(
brand='ambrella',
)
self.category = Category.objects.create(
category='alimento',
)
def test_create(self):
"""
Product must have imported, outofline, ncm, category, brand,
product, cost, icms, ipi, stock, stock_min
"""
self.product = Product.objects.create(
brand=self.brand,
category=self.category,
imported=True,
outofline=False,
ncm='12345678',
product=u'Amendoim',
cost=5.75,
icms=0.05,
ipi=0.1,
stock=100,
stock_min=50,
)
self.assertEqual(1, self.product.pk)
class ProductUniqueTest(TestCase):
def setUp(self):
# Create a first entry to force the collision.
self.brand = Brand.objects.create(
brand='ambrella',
)
self.category = Category.objects.create(
category='alimento',
)
self.product = Product.objects.create(
brand=self.brand,
category=self.category,
imported=True,
outofline=False,
ncm='12345678',
product=u'Amendoim',
cost=5.75,
icms=0.05,
ipi=0.1,
stock=100,
stock_min=50,
)
def test_product_unique(self):
'product must be unique'
p = Product(
brand=self.brand,
category=self.category,
imported=True,
outofline=False,
ncm='12345678',
product=u'Amendoim',
cost=5.75,
icms=0.05,
ipi=0.1,
stock=100,
stock_min=50,
)
self.assertRaises(IntegrityError, p.save)
| {
"content_hash": "d6a656131048bf33e1b72360c5af61a3",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 94,
"avg_line_length": 26.375494071146246,
"alnum_prop": 0.5325940356661172,
"repo_name": "rg3915/django-example",
"id": "d083d7a5aa08173a0fe48b921172f4884faa21ef",
"size": "6674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/core/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111377"
},
{
"name": "Makefile",
"bytes": "2196"
},
{
"name": "Python",
"bytes": "52356"
},
{
"name": "TeX",
"bytes": "217744"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'project.core.views.home', name='home'),
)
| {
"content_hash": "fe0c3fd1a20444baa77a1f3df35cb871",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.6615384615384615,
"repo_name": "CalebMuhia/django-barcode-generator",
"id": "50b5eec4c98a97a2399e91a4cc1f4536af5e136c",
"size": "130",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/core/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5282"
}
],
"symlink_target": ""
} |
import numpy as np
import os
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
try:
import atari_py
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you can install Atari dependencies by running 'pip install gym[atari]'.)".format(e))
import logging
logger = logging.getLogger(__name__)
def to_ram(ale):
ram_size = ale.getRAMSize()
ram = np.zeros((ram_size),dtype=np.uint8)
ale.getRAM(ram)
return ram
class AtariEnv(gym.Env, utils.EzPickle):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, game='pong', obs_type='ram', frameskip=(2, 5), repeat_action_probability=0.):
"""Frameskip should be either a tuple (indicating a random range to
choose from, with the top value exclude), or an int."""
utils.EzPickle.__init__(self, game, obs_type)
assert obs_type in ('ram', 'image')
self.game_path = atari_py.get_game_path(game)
if not os.path.exists(self.game_path):
raise IOError('You asked for game %s but path %s does not exist'%(game, self.game_path))
self._obs_type = obs_type
self.frameskip = frameskip
self.ale = atari_py.ALEInterface()
self.viewer = None
# Tune (or disable) ALE's action repeat:
# https://github.com/openai/gym/issues/349
assert isinstance(repeat_action_probability, (float, int)), "Invalid repeat_action_probability: {!r}".format(repeat_action_probability)
self.ale.setFloat('repeat_action_probability'.encode('utf-8'), repeat_action_probability)
self._seed()
(screen_width, screen_height) = self.ale.getScreenDims()
self._action_set = self.ale.getMinimalActionSet()
self.action_space = spaces.Discrete(len(self._action_set))
(screen_width,screen_height) = self.ale.getScreenDims()
if self._obs_type == 'ram':
self.observation_space = spaces.Box(low=np.zeros(128), high=np.zeros(128)+255)
elif self._obs_type == 'image':
self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width, 3))
else:
raise error.Error('Unrecognized observation type: {}'.format(self._obs_type))
def _seed(self, seed=None):
self.np_random, seed1 = seeding.np_random(seed)
# Derive a random seed. This gets passed as a uint, but gets
# checked as an int elsewhere, so we need to keep it below
# 2**31.
seed2 = seeding.hash_seed(seed1 + 1) % 2**31
# Empirically, we need to seed before loading the ROM.
self.ale.setInt(b'random_seed', seed2)
self.ale.loadROM(self.game_path)
return [seed1, seed2]
def _step(self, a):
reward = 0.0
action = self._action_set[a]
if isinstance(self.frameskip, int):
num_steps = self.frameskip
else:
num_steps = self.np_random.randint(self.frameskip[0], self.frameskip[1])
for _ in range(num_steps):
reward += self.ale.act(action)
ob = self._get_obs()
return ob, reward, self.ale.game_over(), {"ale.lives": self.ale.lives()}
def _get_image(self):
return self.ale.getScreenRGB2()
def _get_ram(self):
return to_ram(self.ale)
@property
def _n_actions(self):
return len(self._action_set)
def _get_obs(self):
if self._obs_type == 'ram':
return self._get_ram()
elif self._obs_type == 'image':
img = self._get_image()
return img
# return: (states, observations)
def _reset(self):
self.ale.reset_game()
return self._get_obs()
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self._get_image()
if mode == 'rgb_array':
return img
elif mode == 'human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def get_action_meanings(self):
return [ACTION_MEANING[i] for i in self._action_set]
def get_keys_to_action(self):
KEYWORD_TO_KEY = {
'UP': ord('w'),
'DOWN': ord('s'),
'LEFT': ord('a'),
'RIGHT': ord('d'),
'FIRE': ord(' '),
}
keys_to_action = {}
for action_id, action_meaning in enumerate(self.get_action_meanings()):
keys = []
for keyword, key in KEYWORD_TO_KEY.items():
if keyword in action_meaning:
keys.append(key)
keys = tuple(sorted(keys))
assert keys not in keys_to_action
keys_to_action[keys] = action_id
return keys_to_action
# def save_state(self):
# return self.ale.saveState()
# def load_state(self):
# return self.ale.loadState()
# def clone_state(self):
# return self.ale.cloneState()
# def restore_state(self, state):
# return self.ale.restoreState(state)
ACTION_MEANING = {
0 : "NOOP",
1 : "FIRE",
2 : "UP",
3 : "RIGHT",
4 : "LEFT",
5 : "DOWN",
6 : "UPRIGHT",
7 : "UPLEFT",
8 : "DOWNRIGHT",
9 : "DOWNLEFT",
10 : "UPFIRE",
11 : "RIGHTFIRE",
12 : "LEFTFIRE",
13 : "DOWNFIRE",
14 : "UPRIGHTFIRE",
15 : "UPLEFTFIRE",
16 : "DOWNRIGHTFIRE",
17 : "DOWNLEFTFIRE",
}
| {
"content_hash": "c3972082b876b6e2d093320558fec2e7",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 143,
"avg_line_length": 31.42222222222222,
"alnum_prop": 0.5754950495049505,
"repo_name": "hparik11/Deep-Learning-Nanodegree-Foundation-Repository",
"id": "a224dfca8504f16c8c815e5378250bf793e3a7ae",
"size": "5656",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reinforcement/gym/gym/envs/atari/atari_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3994519"
},
{
"name": "Jupyter Notebook",
"bytes": "26097389"
},
{
"name": "Makefile",
"bytes": "461"
},
{
"name": "Python",
"bytes": "651374"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import re
import sys
import optparse
import textwrap
import importlib
from collections import (
OrderedDict,
)
from textwrap import (
dedent,
)
import tpn
from .config import (
Config,
ConfigObjectAlreadyCreated,
get_config,
_clear_config_if_already_created,
)
from .command import (
Command,
CommandError,
ClashingCommandNames,
)
from .util import (
iterable,
ensure_unique,
add_linesep_if_missing,
prepend_error_if_missing,
Dict,
Options,
Constant,
DecayDict,
)
from .invariant import (
Invariant,
)
#===============================================================================
# Globals
#===============================================================================
INTERACTIVE = False
#===============================================================================
# Constants
#===============================================================================
class _ArgumentType(Constant):
Optional = 1
Mandatory = 2
ArgumentType = _ArgumentType()
#===============================================================================
# CommandLine Class
#===============================================================================
class CommandLine:
_conf_ = True
_argc_ = 0
_optc_ = 0
_vargc_ = None
_usage_ = None
_quiet_ = None
_verbose_ = None
_command_ = None
_shortname_ = None
_description_ = None
def __init__(self, program_name, command_class, config_class):
self.mandatory_opts = OrderedDict()
self.config_class = config_class
self.program_name = program_name
self.command_class = command_class
self.command_classname = command_class.__name__
self.command = self.command_class(sys.stdin, sys.stdout, sys.stderr)
self.name = self.command.name
self.shortname = self.command.shortname
self.prog = '%s %s' % (self.program_name, self.name)
self.parser = None
self.conf = None
def add_option(self, *args, **kwds):
if kwds.get('mandatory'):
self.mandatory_opts[args] = kwds['dest']
del kwds['mandatory']
self.parser.add_option(*args, **kwds)
def remove_option(self, *args):
self.parser.remove_option(args[0])
if args in self._mandatory_opts:
del self._mandatory_opts[args]
def usage_error(self, msg):
self.parser.print_help()
sys.stderr.write("\nerror: %s\n" % msg)
self.parser.exit(status=1)
def _add_parser_options(self):
cmd = self.command
if not hasattr(cmd, '_invariants'):
return
invariants = cmd._invariants
for (_, name) in cmd._invariant_order:
i = invariants[name]
args = []
if i._opt_short:
args.append('-' + i._opt_short)
if i._opt_long:
args.append('--' + i._opt_long)
fields = (
'help',
'action',
'default',
'metavar',
'mandatory',
)
k = Dict()
k.dest = name
for f in fields:
v = getattr(i, '_' + f)
if v:
k[f] = v if not callable(v) else v()
self.add_option(*args, **k)
def run(self, args):
k = Dict()
k.prog = self.prog
if self._usage_:
k.usage = self._usage_
if self._description_:
k.description = self._description_
else:
docstring = self.command.__doc__
if docstring:
k.description = textwrap.dedent(docstring)
self.parser = optparse.OptionParser(**k)
if self.command._verbose_:
assert self.command._quiet_ is None
self.parser.add_option(
'-v', '--verbose',
dest='verbose',
action='store_true',
default=False,
help="run in verbose mode [default: %default]"
)
if self.command._quiet_:
assert self.command._verbose_ is None
self.parser.add_option(
'-q', '--quiet',
dest='quiet',
action='store_true',
default=False,
help="run in quiet mode [default: %default]"
)
if self.command._conf_:
self.parser.add_option(
'-c', '--conf',
metavar='FILE',
help="use alternate configuration file FILE"
)
self._add_parser_options()
(opts, self.args) = self.parser.parse_args(args)
# Ignore variable argument commands altogether.
# xxx: todo
if 0 and self._vargc_ is not True:
arglen = len(self.args)
if arglen == 0 and self._argc_ != 0:
self.parser.print_help()
self.parser.exit(status=1)
if len(self.args) != self._argc_ and self._argc_ != 0:
self.usage_error("invalid number of arguments")
self.options = Options(opts.__dict__)
if self.mandatory_opts:
d = opts.__dict__
for (opt, name) in self.mandatory_opts.items():
if d.get(name) is None:
self.usage_error("%s is mandatory" % '/'.join(opt))
#self._pre_process_parser_results()
f = None
if self._conf_:
f = self.options.conf
if f and not os.path.exists(f):
self.usage_error("configuration file '%s' does not exist" % f)
try:
self.conf = self.config_class(options=self.options)
self.conf.load(filename=f)
except ConfigObjectAlreadyCreated:
self.conf = get_config()
self.command.interactive = INTERACTIVE
self.command.conf = self.conf
self.command.args = self.args
self.command.options = self.options
self.command.start()
#===============================================================================
# CLI Class
#===============================================================================
class CLI(object):
"""
The CLI class glues together Command and CommandLine instances.
"""
__unknown_subcommand__ = "Unknown subcommand '%s'"
__usage__ = "Type '%prog help' for usage."
__help__ = """\
Type '%prog <subcommand> help' for help on a specific subcommand.
Available subcommands:"""
def __init__(self, *args, **kwds):
k = DecayDict(**kwds)
self.args = list(args) if args else []
self.program_name = k.program_name
self.module_names = k.module_names or []
self.args_queue = k.get('args_queue', None)
self.feedback_queue = k.get('feedback_queue', None)
k.assert_empty(self)
self.returncode = 0
self.commandline = None
include_tpn = True
for name in self.module_names:
if name == '-tpn':
include_tpn = False
self.module_names.remove('-tpn')
elif name == 'tpn':
include_tpn = False
if include_tpn:
self.module_names.insert(0, 'tpn')
ensure_unique(self.module_names)
self.modules = Dict()
self.modules.config = OrderedDict()
self.modules.commands = OrderedDict()
self._help = self.__help__
self._commands_by_name = dict()
self._commands_by_shortname = dict()
self._import_command_and_config_modules()
self._load_commands()
if not self.args_queue:
if self.args:
self.run()
else:
self.help()
def run(self):
if not self.args_queue:
self._process_commandline()
return
from Queue import Empty
cmdlines = {}
while True:
try:
args = self.args_queue.get_nowait()
except Empty:
break
cmdline = args.pop(0).lower()
if cmdline not in cmdlines:
cmdlines[cmdline] = self._find_commandline(cmdline)
cl = cmdlines[cmdline]
cl.run(args)
self.args_queue.task_done()
def _import_command_and_config_modules(self):
for namespace in self.module_names:
for suffix in ('commands', 'config'):
name = '.'.join((namespace, suffix))
store = getattr(self.modules, suffix)
store[namespace] = importlib.import_module(name)
def _find_command_subclasses(self):
seen = dict()
pattern = re.compile('^class ([^\s]+)\(.*', re.M)
subclasses = list()
for (namespace, module) in self.modules.commands.items():
path = module.__file__
if path[-1] == 'c':
path = path[:-1]
with open(path, 'r') as f:
matches = pattern.findall(f.read())
for name in [ n for n in matches if n[0] != '_' ]:
attr = getattr(module, name)
if attr == Command or not issubclass(attr, Command):
continue
if name in seen:
args = (name, seen[name], namespace)
raise ClashingCommandNames(*args)
seen[name] = namespace
subclasses.append((namespace, name, attr))
return subclasses
def _load_commands(self):
subclasses = [
sc for sc in sorted(self._find_command_subclasses())
]
for (namespace, command_name, command_class) in subclasses:
if command_name in self._commands_by_name:
continue
config_module = self.modules.config[namespace]
config_class = getattr(config_module, 'Config')
cl = CommandLine(self.program_name, command_class, config_class)
helpstr = self._helpstr(cl.name)
if cl.shortname:
if cl.shortname in self._commands_by_shortname:
continue
self._commands_by_shortname[cl.shortname] = cl
if '[n]@' in helpstr:
prefix = '[n]@'
else:
prefix = ''
helpstr += ' (%s%s)' % (prefix, cl.shortname)
self._help += helpstr
self._commands_by_name[cl.name] = cl
# Add a fake version command so that it'll appear in the list of
# available commands. (We intercept version requests during
# _process_command(); there's no actual command for it.)
self._help += self._helpstr('version')
self._commands_by_name['version'] = None
def _helpstr(self, name):
i = 12
if name.startswith('multiprocess'):
prefix = '[n]@'
name = prefix + name
i -= len(prefix)
return os.linesep + (' ' * i) + name
def _load_commandlines(self):
subclasses = [
sc for sc in sorted(self._find_commandline_subclasses())
]
for (scname, subclass) in subclasses:
if scname in self._commands_by_name:
continue
try:
cl = subclass(self.program_name)
except TypeError as e:
# Skip abstract base classes (e.g. 'AdminCommandLine').
if e.args[0].startswith("Can't instantiate abstract class"):
continue
raise
helpstr = self._helpstr(cl.name)
if cl.shortname:
if cl.shortname in self._commands_by_shortname:
continue
self._commands_by_shortname[cl.shortname] = cl
helpstr += ' (%s)' % cl.shortname
self._help += helpstr
self._commands_by_name[cl.name] = cl
# Add a fake version command so that it'll appear in the list of
# available subcommands. It doesn't matter if it's None as we
# intercept 'version', '-v' and '--version' in the
# _process_commandline method before doing the normal command
# lookup.
self._help += self._helpstr('version')
self._commands_by_name['version'] = None
def _find_commandline(self, cmdline):
return self._commands_by_name.get(cmdline,
self._commands_by_shortname.get(cmdline))
def _process_commandline(self):
args = self.args
cmdline = args.pop(0).lower()
if cmdline and cmdline[0] != '_':
if '-' not in cmdline and hasattr(self, cmdline):
getattr(self, cmdline)(args)
return self._exit(0)
elif cmdline in ('-v', '-V', '--version'):
self.version()
else:
cl = self.commandline = self._find_commandline(cmdline)
if cl:
try:
cl.run(args)
return self._exit(0)
except (CommandError, Invariant) as err:
self._commandline_error(cl, str(err))
if not self.returncode:
self._error(
os.linesep.join((
self.__unknown_subcommand__ % cmdline,
self.__usage__,
))
)
def _exit(self, code):
self.returncode = code
def _commandline_error(self, cl, msg):
args = (self.program_name, cl.name, msg)
msg = '%s %s failed: %s' % args
sys.stderr.write(prepend_error_if_missing(msg))
return self._exit(1)
def _error(self, msg):
sys.stderr.write(
add_linesep_if_missing(
dedent(msg).replace(
'%prog', self.program_name
)
)
)
return self._exit(1)
def usage(self, args=None):
self._error(self.__usage__)
def version(self, args=None):
sys.stdout.write(add_linesep_if_missing(tpn.__version__))
return self._exit(0)
def help(self, args=None):
if args:
l = [ args.pop(0), '-h' ]
if args:
l += args
self._process_commandline(l)
else:
self._error(self._help + os.linesep)
#===============================================================================
# Main
#===============================================================================
def extract_command_args_and_kwds(*args_):
args = [ a for a in args_ ]
kwds = {
'program_name': args.pop(0),
'module_names': [ m for m in args.pop(0).split(',') ] if args else None
}
return (args, kwds)
def run(*args_):
global INTERACTIVE
if len(args_) == 1 and isinstance(args_[0], str):
args_ = args_[0].split(' ')
INTERACTIVE = True
(args, kwds) = extract_command_args_and_kwds(*args_)
tpn.config._clear_config_if_already_created()
cli = CLI(*args, **kwds)
if INTERACTIVE:
return cli.commandline.command
else:
return cli
def run_mp(**kwds):
cli = CLI(**kwds)
cli.run()
if __name__ == '__main__':
# Intended invocation:
# python -m tpn.cli <program_name> <library_name> \
# <command_name> [arg1 arg2 argN]
# Multiprocessor support: prefix command_name with @. The @ will be
# removed, the command will be run, and then the command.result field
# will be expected to be populated with a list of argument lists that
# will be pushed onto a multiprocessing joinable queue.
is_mp = False
args = sys.argv[1:]
if len(args) <= 2:
cli = run(*args)
sys.exit(cli.returncode)
command = args[2]
if '@' in command:
is_mp = True
ix = command.find('@')
parallelism_hint = int(command[:ix] or 0)
args[2] = command[ix+1:]
cli = run(*args)
if not is_mp or cli.returncode:
sys.exit(cli.returncode)
command = cli.commandline.command
results = command.results
if not results:
err("parallel command did not produce any results\n")
sys.exit(1)
from multiprocessing import (
cpu_count,
Process,
JoinableQueue,
)
args_queue = JoinableQueue(len(results))
for args in results:
args_queue.put(args[2:])
# Grab the program_name and module_names from the first result args.
(_, kwds) = extract_command_args_and_kwds(*results[0])
kwds['args_queue'] = args_queue
nprocs = cpu_count()
if parallelism_hint:
if parallelism_hint > nprocs:
fmt = "warning: parallelism hint exceeds ncpus (%d vs %d)\n"
msg = fmt % (parallelism_hint, nprocs)
sys.stderr.write(msg)
nprocs = parallelism_hint
procs = []
for i in range(0, nprocs):
p = Process(target=run_mp, kwargs=kwds)
procs.append(p)
p.start()
sys.stdout.write("started %d processes\n" % len(procs))
args_queue.join()
def main():
args = [ 'tpn', 'tpn' ] + sys.argv[1:]
cli = run(*args)
sys.exit(cli.returncode)
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| {
"content_hash": "b97c2ddcd1ec13be2e0b7fa35f4b12ca",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 80,
"avg_line_length": 30.21913043478261,
"alnum_prop": 0.5038558931860037,
"repo_name": "tpn/tpn",
"id": "345ab072341a5df3081caf5881ff94a29a3bb84f",
"size": "17548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tpn/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "49"
},
{
"name": "Jupyter Notebook",
"bytes": "141992"
},
{
"name": "Python",
"bytes": "329484"
}
],
"symlink_target": ""
} |
import sys
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE,SIG_DFL)
count = 0
currentKey = None
#### Complete the rest of the code
| {
"content_hash": "00675284dbf0e8216e0e6962ee32cafc",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 18,
"alnum_prop": 0.7430555555555556,
"repo_name": "ece579/ece579_f17",
"id": "48150bf3ebdcea3503f37da1444cd8f7d9e2b748",
"size": "166",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "recitation2/problems/hist-combine-reduce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "18924"
},
{
"name": "Makefile",
"bytes": "4533"
},
{
"name": "Python",
"bytes": "14007"
},
{
"name": "Scala",
"bytes": "3984"
},
{
"name": "Shell",
"bytes": "670"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from pandas import CategoricalIndex, Index, MultiIndex, Timestamp, date_range
import pandas._testing as tm
class TestGetLevelValues:
def test_get_level_values_box_datetime64(self):
dates = date_range("1/1/2000", periods=4)
levels = [dates, [0, 1]]
codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, codes=codes)
assert isinstance(index.get_level_values(0)[0], Timestamp)
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(["foo", "foo", "bar", "baz", "qux", "qux"], name="first")
tm.assert_index_equal(result, expected)
assert result.name == "first"
result = idx.get_level_values("first")
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(["A", "B"]), CategoricalIndex([1, 2, 3])],
codes=[np.array([0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])],
)
exp = CategoricalIndex(["A", "A", "A", "B", "B", "B"])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_all_na():
# GH#17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = Index(["a", np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH#17924
arrays = [["a", "b", "b"], [1, np.nan, 2]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [["a", "b", "b"], [np.nan, np.nan, 2]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = Index(["a", np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [["a", "b", "b"], pd.DatetimeIndex([0, 1, pd.NaT])]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_when_periods():
# GH33131. See also discussion in GH32669.
# This test can probably be removed when PeriodIndex._engine is removed.
from pandas import Period, PeriodIndex
idx = MultiIndex.from_arrays(
[PeriodIndex([Period("2019Q1"), Period("2019Q2")], name="b")]
)
idx2 = MultiIndex.from_arrays(
[idx._get_level_values(level) for level in range(idx.nlevels)]
)
assert all(x.is_monotonic for x in idx2.levels)
| {
"content_hash": "39732c11a5d26ef2a064e89d07975d3a",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 78,
"avg_line_length": 33.77142857142857,
"alnum_prop": 0.6283135927805978,
"repo_name": "jreback/pandas",
"id": "f976515870259bebbd2431d1e0021ace44ca0302",
"size": "3546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/multi/test_get_level_values.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
def create_initial_choices(apps, schema_editor):
ConferenceName = apps.get_model('metadata', 'ConferenceName')
ConferenceName.objects.create(name='AASHE')
ConferenceName.objects.create(name='Other')
PresentationType = apps.get_model('metadata', 'PresentationType')
PresentationType.objects.create(name='Presentation')
PresentationType.objects.create(name='Poster')
class Migration(migrations.Migration):
dependencies = [
('metadata', '0016_auto_20160202_2133'),
]
operations = [
migrations.CreateModel(
name='ConferenceName',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Conference Name',
'verbose_name_plural': 'Conference Names',
},
),
migrations.CreateModel(
name='PresentationType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
],
options={
'ordering': ('name',),
'verbose_name': 'Presentation Type',
'verbose_name_plural': 'Presentation Types',
},
),
migrations.RunPython(create_initial_choices)
]
| {
"content_hash": "9a3755ec7573993544667652a3f99520",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 114,
"avg_line_length": 33.755102040816325,
"alnum_prop": 0.5792019347037485,
"repo_name": "AASHE/hub",
"id": "53d08c3ddaba76e3012f486fedc1c71d7eeb02a3",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hub/apps/metadata/migrations/0017_conferencename_presentationtype.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9044"
},
{
"name": "HTML",
"bytes": "104612"
},
{
"name": "JavaScript",
"bytes": "10807"
},
{
"name": "Python",
"bytes": "498577"
}
],
"symlink_target": ""
} |
import os
import pickle
import tempfile
import sys
from glob import glob
from data_utils import START_SYMBOL, END_SYMBOL, all_features, \
all_metadatas, indexed_chorale_to_score, chorale_to_inputs, BACH_DATASET
from deepBach import load_models
from flask import Flask, request, make_response, jsonify
from music21 import musicxml, converter
from tqdm import tqdm
import numpy as np
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = {'xml', 'mxl', 'mid', 'midi'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def parallel_gibbs_server(models=None,
start_tick=None, end_tick=None,
start_voice_index=None, end_voice_index=None,
chorale_metas=None,
num_iterations=1000,
timesteps=16,
num_voices=None,
temperature=1.,
input_chorale=None,
batch_size_per_voice=16,
parallel_updates=True,
metadatas=None):
"""
input_chorale is time major
Returns (time, num_voices) matrix of indexes
"""
assert models is not None
assert input_chorale is not None
print(models)
print(type(models))
sequence_length = len(input_chorale[:, 0])
# init
seq = np.zeros(shape=(2 * timesteps + sequence_length, num_voices))
seq[timesteps: -timesteps, :] = input_chorale
for expert_index in range(num_voices):
# Add start and end symbol
seq[:timesteps, expert_index] = [note2indexes[expert_index][START_SYMBOL]] * timesteps
seq[-timesteps:, expert_index] = [note2indexes[expert_index][END_SYMBOL]] * timesteps
for expert_index in range(start_voice_index, end_voice_index + 1):
# Randomize selected zone
seq[timesteps + start_tick: timesteps + end_tick, expert_index] = np.random.randint(num_pitches[expert_index],
size=end_tick - start_tick)
if chorale_metas is not None:
# chorale_metas is a list
# todo how to specify chorale_metas from musescore
extended_chorale_metas = [np.concatenate((np.zeros((timesteps,)),
chorale_meta,
np.zeros((timesteps,))),
axis=0)
for chorale_meta in chorale_metas]
else:
raise NotImplementedError
min_temperature = temperature
temperature = 1.3
discount_factor = np.power(1. / temperature, 3 / 2 / num_iterations)
# Main loop
for iteration in tqdm(range(num_iterations)):
temperature = max(min_temperature, temperature * discount_factor) # Simulated annealing
time_indexes = {}
probas = {}
for voice_index in range(start_voice_index, end_voice_index + 1):
batch_input_features = []
time_indexes[voice_index] = []
for batch_index in range(batch_size_per_voice):
time_index = np.random.randint(timesteps + start_tick, timesteps + end_tick)
time_indexes[voice_index].append(time_index)
(left_feature,
central_feature,
right_feature,
label) = all_features(seq, voice_index, time_index, timesteps, num_pitches, num_voices)
left_metas, central_metas, right_metas = all_metadatas(chorale_metadatas=extended_chorale_metas,
metadatas=metadatas,
time_index=time_index, timesteps=timesteps)
input_features = {'left_features': left_feature[:, :],
'central_features': central_feature[:],
'right_features': right_feature[:, :],
'left_metas': left_metas,
'central_metas': central_metas,
'right_metas': right_metas}
# list of dicts: predict need dict of numpy arrays
batch_input_features.append(input_features)
# convert input_features
batch_input_features = {key: np.array([input_features[key] for input_features in batch_input_features])
for key in batch_input_features[0].keys()
}
# make all estimations
probas[voice_index] = models[voice_index].predict(batch_input_features,
batch_size=batch_size_per_voice)
if not parallel_updates:
# update
for batch_index in range(batch_size_per_voice):
probas_pitch = probas[voice_index][batch_index]
# use temperature
probas_pitch = np.log(probas_pitch) / temperature
probas_pitch = np.exp(probas_pitch) / np.sum(np.exp(probas_pitch)) - 1e-7
# pitch can include slur_symbol
pitch = np.argmax(np.random.multinomial(1, probas_pitch))
seq[time_indexes[voice_index][batch_index], voice_index] = pitch
if parallel_updates:
# update
for voice_index in range(start_voice_index, end_voice_index + 1):
for batch_index in range(batch_size_per_voice):
probas_pitch = probas[voice_index][batch_index]
# use temperature
probas_pitch = np.log(probas_pitch) / temperature
probas_pitch = np.exp(probas_pitch) / np.sum(np.exp(probas_pitch)) - 1e-7
# pitch can include slur_symbol
pitch = np.argmax(np.random.multinomial(1, probas_pitch))
seq[time_indexes[voice_index][batch_index], voice_index] = pitch
return seq[timesteps:-timesteps, :]
# INITIALIZATION
response_headers = {"Content-Type": "text/html",
"charset": "utf-8"
}
# datasets only Bach for the moment
pickled_dataset = BACH_DATASET
if not os.path.exists(pickled_dataset):
print('Warning: no dataset')
raise NotImplementedError
# load dataset
X, X_metadatas, voice_ids, index2notes, note2indexes, metadatas = pickle.load(open(pickled_dataset, 'rb'))
num_voices = len(voice_ids)
num_pitches = list(map(len, index2notes))
# get model names present in folder models/
models_list = glob('models/*.yaml')
models_list = list(set(map(lambda name: '_'.join(name.split('_')[:-1]).split('/')[-1], models_list)))
# model_name = 'deepbach'
model_name = 'skip_new'
assert os.path.exists('models/' + model_name + '_' + str(num_voices - 1) + '.yaml')
# load models
models = load_models(model_name, num_voices=num_voices)
temperature = 1.
timesteps = int(models[0].input[0]._keras_shape[1])
@app.route('/compose', methods=['POST'])
def compose():
# global models
# --- Parse request---
with tempfile.NamedTemporaryFile(mode='w', suffix='.xml') as file:
print(file.name)
# file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
xml_string = request.form['xml_string']
file.write(xml_string)
# load chorale with music21
input_chorale = converter.parse(file.name)
input_chorale = chorale_to_inputs(input_chorale,
voice_ids=voice_ids,
index2notes=index2notes,
note2indexes=note2indexes
)
# generate metadata:
# todo find a way to set metadata from musescore
# you may choose a given chorale:
chorale_metas = X_metadatas[50]
# or just generate them
# chorale_metas = [metas.generate(sequence_length) for metas in metadatas]
# make chorale time major
input_chorale = np.transpose(input_chorale, axes=(1, 0))
NUM_MIDI_TICKS_IN_SIXTEENTH_NOTE = 120
start_tick_selection = int(float(request.form['start_tick']) / NUM_MIDI_TICKS_IN_SIXTEENTH_NOTE)
end_tick_selection = int(float(request.form['end_tick']) / NUM_MIDI_TICKS_IN_SIXTEENTH_NOTE)
# if no selection REGENERATE and set chorale length
if start_tick_selection == 0 and end_tick_selection == 0:
chorale_length = len(chorale_metas[0])
# randomize
input_chorale = np.array([np.random.randint(num_pitches[expert_index],
size=(chorale_length,))
for expert_index in range(num_voices)])
input_chorale = np.transpose(input_chorale, axes=(1, 0))
end_tick_selection = chorale_length
start_voice_index = 0
end_voice_index = num_voices - 1
else:
start_voice_index = int(request.form['start_staff'])
end_voice_index = int(request.form['end_staff'])
diff = end_tick_selection - start_tick_selection + 1
num_iterations = 100 * diff
if diff < 16:
batch_size_per_voice = 4
else:
batch_size_per_voice = 16
num_iterations = max(int(num_iterations // batch_size_per_voice // num_voices), 5)
# --- Generate---
output_chorale = parallel_gibbs_server(models=models,
start_tick=start_tick_selection,
end_tick=end_tick_selection,
start_voice_index=start_voice_index,
end_voice_index=end_voice_index,
input_chorale=input_chorale,
chorale_metas=chorale_metas,
num_iterations=num_iterations,
num_voices=num_voices,
timesteps=timesteps,
temperature=temperature,
batch_size_per_voice=batch_size_per_voice,
parallel_updates=True,
metadatas=metadatas)
# convert back to music21
output_chorale = indexed_chorale_to_score(np.transpose(output_chorale, axes=(1, 0)),
pickled_dataset=pickled_dataset
)
# convert chorale to xml
goe = musicxml.m21ToXml.GeneralObjectExporter(output_chorale)
xml_chorale_string = goe.parse()
response = make_response((xml_chorale_string, response_headers))
return response
@app.route('/test', methods=['POST', 'GET'])
def test_generation():
response = make_response(('TEST', response_headers))
if request.method == 'POST':
print(request)
return response
@app.route('/models', methods=['GET'])
def get_models():
global models_list
# recompute model names present in folder models/
models_list = glob('models/*.yaml')
models_list = list(set(map(lambda name: '_'.join(name.split('_')[:-1]).split('/')[-1], models_list)))
return jsonify(models_list)
@app.route('/current_model', methods=['POST', 'PUT'])
def current_model_update():
global model_name
global models
model_name = request.form['model_name']
# todo to remove this statement
if model_name == 'undefined':
return ''
models = load_models(model_base_name=model_name, num_voices=num_voices)
return 'Model ' + model_name + ' loaded'
@app.route('/current_model', methods=['GET'])
def current_model_get():
global model_name
return model_name
| {
"content_hash": "de89f4ceb84bd1f6868277356c402fb7",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 119,
"avg_line_length": 39.57877813504823,
"alnum_prop": 0.5382240636932326,
"repo_name": "SonyCSL-Paris/DeepBach",
"id": "5a678041c01705604bce8307543c2a13e9684fe8",
"size": "12309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin_flask_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86625"
},
{
"name": "QML",
"bytes": "8246"
}
],
"symlink_target": ""
} |
from .parser.factory import ParserFactory
from .utils import array_wrapper
class SuperDict(object):
properties = []
def __init__(self, *args, **kwargs):
for prop in self.properties:
setattr(self, prop, None)
for idx, prop in enumerate(args):
if idx > len(self.properties):
break
setattr(self, self.properties[idx], prop)
for k,v in kwargs.items():
setattr(self, k, v)
class Contact(SuperDict):
TYPE_REGISTRANT = 1
TYPE_ADMINISTRATIVE = 2
TYPE_TECHNICAL = 3
properties = ['id', 'type', 'name', 'organization',
'address', 'city', 'zip', 'state', 'country', 'country_code',
'phone', 'fax', 'email', 'url',
'created_on', 'updated_on']
class Part(SuperDict):
properties = ['body', 'host']
class Registrar(SuperDict):
properties = ['id', 'name', 'organization', 'url']
class Nameserver(SuperDict):
properties = ['name', 'ipv4', 'ipv6']
class Record(object):
METHODS = [
'contacts', 'is_changed'
]
PROPERTIES = [
'disclaimer',
'domain', 'domain_id',
'status', 'available', 'registered', 'reserved',
'created_on', 'updated_on', 'expires_on',
'registrar',
'registrant_contacts', 'admin_contacts', 'technical_contacts',
'nameservers',
'referral_url', 'referral_whois',
'valid', 'invalid'
]
# Initializes a new instance with given server and parts
#
# @param [Server] server
# @param [List<Part>] parts
#
def __init__(self, server, parts):
self.server = server
self.parts = parts
self.__parsers = None
def __str__(self):
return self.content
def __getattr__(self, attr):
if attr in self.PROPERTIES or attr in self.METHODS:
for parser in self.parsers:
if parser.is_property_supported(attr):
if attr.endswith("contacts") or attr == "nameservers":
return array_wrapper(getattr(parser, attr))
return getattr(parser, attr)
return None
@property
def parsers(self):
if self.__parsers is None:
self.__parsers = [ParserFactory.parser_for(part) for part in self.parts[::-1]]
return self.__parsers
@property
def content(self):
return "\n".join([part.body for part in self.parts])
@property
def registrant_contact(self):
for parser in self.parsers:
if parser.is_property_supported("registrant_contacts"):
return parser.registrant_contacts[0]
@property
def admin_contact(self):
for parser in self.parsers:
if parser.is_property_supported("admin_contacts"):
return parser.admin_contacts[0]
@property
def technical_contact(self):
for parser in self.parsers:
if parser.is_property_supported("technical_contacts"):
print parser.technical_contacts
return parser.technical_contacts[0]
# Returns a Hash containing all supported properties for this record
# along with corresponding values.
#
# @return [{ property => object }]
#
@property
def properties(self):
props = {}
for prop in Record.PROPERTIES:
if self.is_property_supported(prop):
props[prop] = self.prop
return props
# Collects and returns all the contacts.
#
# @return [List<Contact>]
#
#
@property
def contacts(self):
return self.parser.contacts
def is_property_supported(self, prop):
return any(parser.is_property_supported(prop) for parser in self.parsers)
# Checks whether this is an incomplete response.
#
# @return [Boolean]
#
#
@property
def response_incomplete(self):
for parser in self.parsers:
if parser.is_property_supported("response_incomplete"):
return parser.response_incomplete
# Checks whether this is a throttle response.
#
# @return [Boolean]
#
@property
def response_throttled(self):
for parser in self.parsers:
if parser.is_property_supported("response_throttled"):
return parser.response_throttled
# Checks whether this is an unavailable response.
#
# @return [Boolean]
#
@property
def response_unavailable(self):
for parser in self.parsers:
if parser.is_property_supported("response_unavailable"):
return parser.response_unavailable | {
"content_hash": "2e1be6e5592324fc332071d2fc286a9b",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 102,
"avg_line_length": 28.227544910179642,
"alnum_prop": 0.5795502757742893,
"repo_name": "huyphan/pyyawhois",
"id": "abde682aadc00381cf0389b37b8998b4b83e7521",
"size": "4714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yawhois/record.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.unittest import tab_test_case
class InspectorRuntimeTest(tab_test_case.TabTestCase):
def testRuntimeEvaluateSimple(self):
res = self._tab.EvaluateJavaScript('1+1')
assert res == 2
def testRuntimeEvaluateThatFails(self):
self.assertRaises(exceptions.EvaluateException,
lambda: self._tab.EvaluateJavaScript('fsdfsdfsf'))
def testRuntimeEvaluateOfSomethingThatCantJSONize(self):
def test():
self._tab.EvaluateJavaScript("""
var cur = {};
var root = {next: cur};
for (var i = 0; i < 1000; i++) {
next = {};
cur.next = next;
cur = next;
}
root;""")
self.assertRaises(exceptions.EvaluateException, test)
def testRuntimeExecuteOfSomethingThatCantJSONize(self):
self._tab.ExecuteJavaScript('window')
def testIFrame(self):
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
self._tab.Navigate(self._browser.http_server.UrlOf('host.html'))
# Access host page.
test_defined_js = "typeof(testVar) != 'undefined'"
self._tab.WaitForJavaScriptExpression(test_defined_js, timeout=5)
self.assertEquals(self._tab.EvaluateJavaScript('testVar'), 'host')
def TestVarReady(context_id):
"""Returns True if the context and testVar are both ready."""
try:
return self._tab.EvaluateJavaScriptInContext(test_defined_js,
context_id)
except exceptions.EvaluateException:
# This happens when the context is not ready.
return False
def TestVar(context_id):
"""Waits for testVar and the context to be ready, then returns the value
of testVar."""
util.WaitFor(lambda: TestVarReady(context_id), timeout=10)
return self._tab.EvaluateJavaScriptInContext('testVar', context_id)
# Access parent page using EvaluateJavaScriptInContext.
self.assertEquals(TestVar(context_id=1), 'host')
# Access the iframes.
self.assertEquals(TestVar(context_id=2), 'iframe1')
self.assertTrue(TestVar(context_id=3) in ['iframe2', 'iframe3'])
self.assertTrue(TestVar(context_id=4) in ['iframe2', 'iframe3'])
# Accessing a non-existent iframe throws an exception.
self.assertRaises(exceptions.EvaluateException,
lambda: self._tab.EvaluateJavaScriptInContext('1+1', context_id=5))
| {
"content_hash": "8aebcc428da459df40c486e2295ef172",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 37.784615384615385,
"alnum_prop": 0.6758957654723127,
"repo_name": "anirudhSK/chromium",
"id": "ef45bab0575bebfcca1f8e49aae42c6ea2d99a55",
"size": "2618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/backends/chrome/inspector_runtime_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42502191"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "201859263"
},
{
"name": "CSS",
"bytes": "946557"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5687122"
},
{
"name": "JavaScript",
"bytes": "22163714"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2496"
},
{
"name": "Objective-C",
"bytes": "7670589"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10873885"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1315894"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '9()(lzm)jdr$szjfdx8^^#j_6efj@d&$9pb6l2h&=udxom3(bn'
DEBUG = True
TEMPLATE_DEBUG = True
if DEBUG:
# XXX Monkey patch is_secure_transport to allow development over insecure HTTP
from warnings import warn
warn(UserWarning("Monkey_patching oauthlib.oauth2:is_secure_transport to allow OAuth2 over HTTP. Never do this in production!"))
fake_is_secure_transport = lambda token_url: True
import oauthlib.oauth2
import requests_oauthlib.oauth2_session
import oauthlib.oauth2.rfc6749.parameters
import oauthlib.oauth2.rfc6749.clients.base
for module in [
oauthlib.oauth2,
requests_oauthlib.oauth2_session,
oauthlib.oauth2.rfc6749.parameters,
oauthlib.oauth2.rfc6749.clients.base,
]:
module.is_secure_transport = fake_is_secure_transport
ALLOWED_HOSTS = ['ssoexample.tracon.fi']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'kompassi_oauth2_example',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'kompassi_oauth2.backends.KompassiOAuth2AuthenticationBackend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'kompassi_oauth2_example.urls'
WSGI_APPLICATION = 'kompassi_oauth2_example.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'kompassi_oauth2_example.sqlite3'),
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG' if DEBUG else 'WARNING',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'celery': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'WARNING',
'propagate': True
},
'kompassi_oauth2': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'WARNING',
'propagate': True
},
}
}
LANGUAGE_CODE = 'fi-fi'
TIME_ZONE = 'Europe/Helsinki'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
KOMPASSI_OAUTH2_AUTHORIZATION_URL = 'http://kompassi.dev:8000/oauth2/authorize'
KOMPASSI_OAUTH2_TOKEN_URL = 'http://kompassi.dev:8000/oauth2/token'
KOMPASSI_OAUTH2_CLIENT_ID = 'kompassi_insecure_test_client_id'
KOMPASSI_OAUTH2_CLIENT_SECRET = 'kompassi_insecure_test_client_secret'
KOMPASSI_OAUTH2_SCOPE = ['read']
KOMPASSI_API_V2_USER_INFO_URL = 'http://kompassi.dev:8000/api/v2/people/me'
LOGIN_URL = '/oauth2/login'
LOGOUT_URL = '/logout'
| {
"content_hash": "28d3ce7a46d10656305bb47d54c01a72",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 132,
"avg_line_length": 28.507246376811594,
"alnum_prop": 0.6329435688866294,
"repo_name": "tracon/kompassi-oauth2-example",
"id": "ffe63159c91c2282ef02bc65f4afd90fd13794a0",
"size": "3934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kompassi_oauth2_example/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "453"
},
{
"name": "Python",
"bytes": "9609"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from optparse import make_option
from django.core.management.base import NoArgsCommand
from doc.actions import create_doc_db, create_doc_local
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='-1', help='Project unix name'),
make_option('--dname', action='store', dest='dname',
default='-1', help='Document name'),
make_option('--release', action='store', dest='release',
default='-1', help='Project Release'),
make_option('--syncer', action='store', dest='syncer',
default='doc.syncer.generic_syncer.SingleURLSyncer',
help='Syncer Python name'),
make_option('--parser', action='store', dest='parser',
default='-1', help='Parser Python name'),
make_option('--url', action='store', dest='url',
default='-1', help='Document URL'),
make_option('--local', action='store_true', dest='local',
default=False, help='Set to create local document'),
)
help = "Initialize documentation model"
@recocommand
def handle_noargs(self, **options):
pname = smart_decode(options.get('pname'))
dname = smart_decode(options.get('dname'))
release = smart_decode(options.get('release'))
url = smart_decode(options.get('url'))
syncer = smart_decode(options.get('syncer'))
parser = smart_decode(options.get('parser'))
create_doc_db(pname, dname, release, url, syncer, parser)
if (options.get('local', False)):
create_doc_local(pname, dname, release, syncer, url)
| {
"content_hash": "0572479247a783890a4a19eed1e87eab",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 65,
"avg_line_length": 45.1,
"alnum_prop": 0.6280487804878049,
"repo_name": "bartdag/recodoc2",
"id": "9de4c50be772d7b25cacede11289c2285e338cce",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/doc/management/commands/createdoc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
} |
import json
from card_lookup.searcher import Searcher
f_read = open("cards.json", encoding = "utf8")
card_dict = json.loads(str(f_read.read()))
f_read.close()
card_searcher = Searcher(card_dict)
result = card_searcher.find_card("Al'Akir")
print(result) | {
"content_hash": "3c45eded68c3fd0661616585a6be88cb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 25.4,
"alnum_prop": 0.7322834645669292,
"repo_name": "MagiChau/Hearthstone-Card-Lookup",
"id": "24cdc93546a68a91b81818f01afb14ebe517e3a6",
"size": "254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4671"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import pytest
from dvc.fs import get_cloud_fs
from dvc.fs.webdav import WebDAVFileSystem, WebDAVSFileSystem
from tests.utils.asserts import issubset
url_fmt = "{scheme}://{user}@example.com/public.php/webdav"
url = "webdav://example.com/public.php/webdav"
user = "username"
password = "password"
token = "4MgjsNM5aSJjxIKM"
def test_common():
fs = WebDAVFileSystem(
url=url,
cert_path="cert/path",
key_path="key/path",
ssl_verify="bundle.pem",
timeout=10,
prefix="/public.php/webdav",
user=None,
password=None,
ask_password=False,
token=None,
)
assert issubset(
{
"headers": {},
"auth": None,
"base_url": url,
"cert": ("cert/path", "key/path"),
"verify": "bundle.pem",
"timeout": 10,
},
fs.fs_args,
)
assert fs.prefix == "/public.php/webdav"
def test_user():
fs = WebDAVFileSystem(url=url, user=user)
assert issubset({"auth": (user, None), "headers": {}}, fs.fs_args)
def test_password():
config = {"url": url, "user": user, "password": password}
fs = WebDAVFileSystem(**config)
assert issubset(
{
"headers": {},
"auth": (user, password),
},
fs.fs_args,
)
def test_token():
config = {"token": token, "url": url}
fs = WebDAVFileSystem(**config)
assert issubset(
{"headers": {"Authorization": f"Bearer {token}"}, "auth": None},
fs.fs_args,
)
@patch("dvc.fs.webdav.ask_password")
def test_ask_password(ask_password_mocked):
ask_password_mocked.return_value = "pass"
host = "host"
# it should not ask for password as password is set
config = {
"url": url,
"user": user,
"password": password,
"ask_password": True,
"host": host,
}
fs = WebDAVFileSystem(**config)
assert issubset({"auth": (user, password), "headers": {}}, fs.fs_args)
config.pop("password")
fs = WebDAVFileSystem(**config)
assert issubset({"auth": (user, "pass"), "headers": {}}, fs.fs_args)
ask_password_mocked.assert_called_once_with(host, user)
def test_ssl_verify_custom_cert():
config = {
"url": url,
"ssl_verify": "/path/to/custom/cabundle.pem",
}
fs = WebDAVFileSystem(**config)
assert fs.fs_args["verify"] == "/path/to/custom/cabundle.pem"
@pytest.mark.parametrize(
"base_url, fs_cls",
[
(url_fmt.format(scheme="webdav", user=user), WebDAVFileSystem),
(url_fmt.format(scheme="webdavs", user=user), WebDAVSFileSystem),
],
)
def test_remote_with_jobs(dvc, base_url, fs_cls):
scheme = "http" + ("s" if fs_cls is WebDAVSFileSystem else "")
remote_config = {"url": base_url}
dvc.config["remote"]["dav"] = remote_config
cls, config, _ = get_cloud_fs(dvc, name="dav")
assert config["user"] == user
assert f"{scheme}://{user}@example.com" in config["host"]
assert cls is fs_cls
# config from remote takes priority
remote_config.update({"user": "admin"})
cls, config, _ = get_cloud_fs(dvc, name="dav")
assert config["user"] == "admin"
assert f"{scheme}://{user}@example.com" in config["host"]
assert cls is fs_cls
| {
"content_hash": "e8d09c1e48f2bf650043b27f5b002999",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 74,
"avg_line_length": 27.016260162601625,
"alnum_prop": 0.583508877520313,
"repo_name": "efiop/dvc",
"id": "e207dfe977300802377ec89f009f0575bd0ef32f",
"size": "3323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/remote/test_webdav.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "53"
},
{
"name": "Inno Setup",
"bytes": "10158"
},
{
"name": "PowerShell",
"bytes": "2686"
},
{
"name": "Python",
"bytes": "2231040"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
} |
import sqlite3
import time
class SQLiteLogger:
def __init__(self, filename="g2x.db"):
self.filename = filename
self.connection = None
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, traceback):
self.close()
def open(self):
try:
with open(self.filename):
self.connection = sqlite3.connect(self.filename)
except IOError:
self.connection = sqlite3.connect(self.filename)
cursor = self.connection.cursor()
cursor.execute('''CREATE TABLE readings
(date real, device text, property text, value real)''')
self.connection.commit()
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def log(self, device, property, value, t=None):
if self.connection is not None:
if t is None:
t = time.time()
values = (t, device, property, value)
cursor = self.connection.cursor()
cursor.execute("INSERT INTO readings VALUES(?,?,?,?)", values)
self.connection.commit()
| {
"content_hash": "2485d2c028ace762df99161760a86d50",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 85,
"avg_line_length": 29.951219512195124,
"alnum_prop": 0.5537459283387622,
"repo_name": "thelonious/g2x",
"id": "faad7fc06248eea46a600b77efeecaaa5336cac7",
"size": "1228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/DataLogger/sqlite_logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32092"
},
{
"name": "Shell",
"bytes": "534"
}
],
"symlink_target": ""
} |
import subprocess, os.path
import sqlite3
import shutil
import tarfile
import datetime
import csv
from time import sleep
import datetime
from functools import wraps
from flask import Flask
from flask import request
from flask import render_template
from flask import Markup
from flask import session, redirect, url_for, escape, request
from flask import Blueprint
from flask import g
import config
from tasks import startTaskDaemon, stopTaskDaemon, checkStatus
admincraft = Blueprint('admincraft', __name__, template_folder='templates', static_folder='static')
def requires_auth(f):
"""Decorator to check if username and password are valid"""
@wraps(f)
def decorated(*args, **kwargs):
if config.USERNAME != session.get('username') or config.PASSWORD != session.get('password'):
return redirect(url_for('admincraft.login'))
return f(*args, **kwargs)
return decorated
#Main index.html page.
@admincraft.route("/")
@requires_auth
def index(name=None):
#If user session, then display "Logged in as %"
if 'username' in session:
username = 'Logged in as %s' % escape(session['username'])
else:
username = 'You are not logged in'
#Open and read -10 lines from the server.log file into object. Used to get last line for activeUsers below.
loggingFile = config.MINECRAFTDIR + config.SERVERLOG
loggingFile = open(loggingFile, "r")
logging = loggingFile.readlines()[-10:]
#Read ops.txt to display Server Operators on Users section.
opsFile = config.MINECRAFTDIR + config.SERVEROPS
ops = open(opsFile, "r").readlines()
ops = [i.rstrip() for i in ops]
#Read white-list.txt to display Whitelisted on Users section.
whiteListFile = config.MINECRAFTDIR + config.WHITELIST
whiteListUsers = open(whiteListFile, "r").readlines()
#Read banned-ips.txt to display Banned IPs on Users section.
bannedIPsFile = config.MINECRAFTDIR + config.BANNEDIPS
bannedIPs = csv.reader(open(bannedIPsFile, "r").readlines(), delimiter='|')
#bannedIPs = [i.rstrip() for i in bannedIPs] #pre 1.3
for b in bannedIPs:
print b
#Read server.properties to display Server Properties on Server Config section. -2 first lines.
#NOTE: if the user edits their server configuration file, the last two lines may not be what
#you are expecting.
propertiesFile = config.MINECRAFTDIR + config.SERVERPROPERTIES
properties = open(propertiesFile, "r").readlines()[2:]
#Capturing status by running status command to /etc/init.d/minecraft and returning as stdout.
stdout = subprocess.Popen([config.MINECRAFTDAEMON + " status"], stdout=subprocess.PIPE, shell=True).communicate()[0]
#Check status and display Online or Offline to index.html (bottom-right corner) page.
serverStatus = stdout
print serverStatus
if "online" in serverStatus:
serverStatus = Markup('<p style="color:#339933;font-weight:bold">Online</p>')
elif "offline" in serverStatus:
serverStatus = Markup('<p style="color:#339933;font-weight:bold">Offline</p>')
else:
serverStatus = "Unable to check server status."
selectedTheme = 'themes/%s/index.html' % config.THEME
return render_template(selectedTheme,username=username,
name=name,
ops=ops,
logging=logging,
whiteListUsers=whiteListUsers,
bannedIPs=bannedIPs,
properties=properties,
serverStatus=serverStatus,
LOGINTERVAL=config.LOGINTERVAL,
THEME=config.THEME)
#/server is used to send GET requests to Restart, Start, Stop or Backup server.
@admincraft.route("/server", methods=['GET'])
@requires_auth
def serverState():
#Grab option value from GET request.
keyword = request.args.get('option')
#Check status value and run /etc/init.d/minecraft command to restart/start/stop/backup.
if keyword == "restart":
subprocess.Popen(config.MINECRAFTDAEMON + ' restart', shell=True)
return 'Restarting Minecraft Server...'
elif keyword == "start":
subprocess.Popen(config.MINECRAFTDAEMON + ' start', shell=True)
return 'Starting Minecraft Server...'
elif keyword == "stop":
subprocess.Popen(config.MINECRAFTDAEMON + ' stop', shell=True)
return 'Stopping Minecraft Server...'
elif keyword == "backup":
subprocess.Popen(config.MINECRAFTDAEMON + ' backup', shell=True)
return 'Backing up Minecraft Server...'
#If option value is 'status', then capture output and return 'Server is Online' or 'Server is Offline'
elif keyword == "status":
stdout = subprocess.Popen([config.MINECRAFTDAEMON + " status"], stdout=subprocess.PIPE, shell=True).communicate()[0]
serverStatus = stdout
if "online" in serverStatus:
serverStatus = Markup('Server is <font color="#339933"><strong>Online</strong></font>')
elif "offline" in serverStatus:
serverStatus = Markup('Server is <font color="#FF0000"><strong>Offline</strong></font>')
else:
serverStatus = "Unable to check server status."
return serverStatus
else:
return 'Invalid option!'
#/logs returns the *entire* server log.
@admincraft.route("/logs", methods=['GET'])
@requires_auth
def showLog():
loggingFile = config.MINECRAFTDIR + config.SERVERLOG
loggingFile = open(loggingFile, "r")
loggingHTML = loggingFile.readlines()
selectedTheme = 'themes/%s/logging.html' % config.THEME
return render_template(selectedTheme, loggingHTML=loggingHTML)
#/command is used when sending commands to '/etc/init.d/minecraft command' from the GUI. Used on mainConsole on index.html.
@admincraft.route("/command", methods=['GET'])
@requires_auth
def sendCommand():
#server.log file for logging command entered
loggingFile = config.MINECRAFTDIR + config.SERVERLOG
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
#Grabs operater value from GET request. say/give/command
consoleOperator = str(request.args.get('operator'))
#If the value was "command", then set as '' to remove redundancies when Popen is executed below.
if consoleOperator == "command":
consoleOperator = ''
#Otherwise, keep the value. (say/give)
else:
consoleOperator = consoleOperator + ' '
#Grab value from command GET request. This was entered via user from textInput box.
command = str(request.args.get('command'))
#Initiate full command via Popen. Return "Sending Command..."
commandProc = config.MINECRAFTDAEMON + ' command "' + consoleOperator + command + '"'
subprocess.Popen(commandProc, shell=True)
print commandProc
# Post Minecraft 1.3, Console logging was removed, so appending command entered to file manually.
""" seems like console logging is back as of 1.4.7
with open(loggingFile, "a") as f:
f.write(time + " [CONSOLE] " + command + "\n")
"""
return 'Sending Command...'
#/logging reads the last X amount of lines from server.log to be parsed out on GUI #mainConsole.
@admincraft.route("/logging", methods=['GET'])
@requires_auth
def logs():
#Open and read last 40 lines. This needs to be configurable eventually.
loggingFile = config.MINECRAFTDIR + config.SERVERLOG
loggingFile = open(loggingFile, "r")
loggingHTML = loggingFile.readlines()[-config.LOGLINES:]
selectedTheme = 'themes/%s/logging.html' % config.THEME
return render_template(selectedTheme, loggingHTML=loggingHTML)
#/dataValues is used to create a dataIcons.html view, which is then imported to Index. Used for "Give" on GUI.
@admincraft.route("/dataValues", methods=['GET'])
@requires_auth
def dataValues():
selectedTheme = 'themes/%s/dataIcons.html' % config.THEME
return render_template(selectedTheme)
#/login will be for sessions. So far, only username is accepted with any value. Needs work here.
@admincraft.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session['username'] = request.form['username']
session['password'] = request.form['password']
return redirect(url_for('admincraft.index'))
selectedTheme = 'themes/%s/login.html' % config.THEME
return render_template(selectedTheme)
#Kill or Pop session when hitting /logout
@admincraft.route('/logout')
def logout():
# remove the username from the session if its there
session.pop('username', None)
session.pop('password', None)
return redirect(url_for('admincraft.index'))
#/commandList is used to create a commandList.html view, which is then imported to Index. Used for "Command" on GUI.
@admincraft.route('/commandList', methods=['GET', 'POST'])
@requires_auth
def commandList():
selectedTheme = 'themes/%s/commandList.html' % config.THEME
return render_template(selectedTheme)
@admincraft.route('/tabs', methods=['GET', 'POST'])
@requires_auth
def tabs():
#Read server.properties to display Server Properties on Server Config section. -2 first lines.
propertiesFile = config.MINECRAFTDIR + config.SERVERPROPERTIES
properties = open(propertiesFile, "r").readlines()[2:]
#Read ops.txt to display Server Operators on Users section.
opsFile = config.MINECRAFTDIR + config.SERVEROPS
ops = open(opsFile, "r").readlines()
ops = [i.rstrip() for i in ops]
#Read white-list.txt to display Whitelisted on Users section.
whiteListFile = config.MINECRAFTDIR + config.WHITELIST
whiteListUsers = open(whiteListFile, "r").readlines()
whiteListUsers = [i.rstrip() for i in whiteListUsers]
#Read banned-players.txt to display Banned Players on Users section.
bannedUsersFile = config.MINECRAFTDIR + config.BANNEDPLAYERS
"""
bannedUsers = open(bannedUsersFile, "r").readlines()[2:]
bannedUsers = [i.rstrip() for i in bannedUsers]
#Read banned-ips.txt to display Banned IPs on Users section.
bannedIPsFile = config.MINECRAFTDIR + config.BANNEDIPS
bannedIPs = open(bannedIPsFile, "r").readlines()[2:]
bannedIPs = [i.rstrip() for i in bannedIPs]
"""
bannedUsers = csv.reader(open(bannedUsersFile, "r").readlines()[3:], delimiter='|', quoting=csv.QUOTE_ALL)
#bannedUsers = [i.rstrip() for i in bannedUsers] #pre 1.3
bannedUsersList = []
for u in bannedUsers:
bannedUsersList.append(u[0])
#Read banned-ips.txt to display Banned IPs on Users section.
bannedIPsFile = config.MINECRAFTDIR + config.BANNEDIPS
bannedIPs = csv.reader(open(bannedIPsFile, "r").readlines()[3:], delimiter='|', quoting=csv.QUOTE_ALL)
#bannedIPs = [i.rstrip() for i in bannedIPs]
bannedIPsList = []
for i in bannedIPs:
bannedIPsList.append(i[0])
#Ghetto method of shelling out the 'list' command to minecraft init script, which returns
#the list of players in server.log. Grab last line of server.log, strip time/date
#and determine whether players are connected or not. Rest of logic in Jinja2 tabs.html.
subprocess.Popen(config.MINECRAFTDAEMON + ' command list', shell=True)
sleep(1) #Unfortunately, the minecraft init commands lag a bit, so this is required to grab the last line correctly.
activeUsersFile = config.MINECRAFTDIR + config.SERVERLOG
activeUsers = open(activeUsersFile, "r").readlines()[-1:]
activeUsers = [i.rstrip()[27:] for i in activeUsers]
noUsers = "No players connected" #If activeUsers list is empty, Jinja2 will use this variable instead.
backupDir = config.BACKUPDIR
isRunning = Markup('Task Scheduler <p style="color:#339933;font-weight:bold">Online</p>')
#Connects to db to list scheduled jobs in a table
dbpath = config.DATABASE
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute('select * from tasks order by type')
a = c.fetchall()
conn.commit()
c.close()
selectedTheme = 'themes/%s/tabs.html' % config.THEME
return render_template(selectedTheme, a=a, activeUsers=activeUsers, isRunning=isRunning, backupDir=backupDir, ops=ops, whiteListUsers=whiteListUsers, bannedUsersList=bannedUsersList, bannedIPsList=bannedIPsList, properties=properties)
#/serverConfig is used for GET request via server property configurations.
@admincraft.route('/serverConfig', methods=['GET'])
@requires_auth
def serverConfig():
#Grab Vars from GET request
generatorSettingsValue = request.args.get('generator-settings')
allowNetherValue = request.args.get('allow-nether')
levelNameValue = request.args.get('level-name')
enableQueryValue = request.args.get('enable-query')
allowFlightValue = request.args.get('allow-flight')
serverPortValue = request.args.get('server-port')
levelTypeValue = request.args.get('level-type')
enableRconValue = request.args.get('enable-rcon')
levelSeedValue = request.args.get('level-seed')
forceGamemodeValue = request.args.get('force-gamemode')
serverIPValue = request.args.get('server-ip')
maxBuildHeightValue = request.args.get('max-build-height')
spawnNPCsValue = request.args.get('spawn-npcs')
whitelistValue = request.args.get('white-list')
spawnAnimalsValue = request.args.get('spawn-animals')
snooperEnabledValue = request.args.get('snooper-enabled')
hardcoreValue = request.args.get('hardcore')
texturePackValue = request.args.get('texture-pack')
onlineModeValue = request.args.get('online-mode')
pvpValue = request.args.get('pvp')
difficultyValue = request.args.get('difficulty')
gamemodeValue = request.args.get('gamemode')
maxPlayersValue = request.args.get('max-players')
spawnMonstersValue = request.args.get('spawn-monsters')
generateStructuresValue = request.args.get('generate-structures')
viewDistanceValue = request.args.get('view-distance')
spawnProtectionValue = request.args.get('spawn-protection')
motdValue = request.args.get('motd')
GET_VARS = [
(generatorSettingsValue, request.args.get('generator-settings') ),
(allowNetherValue, request.args.get('allow-nether') ),
(levelNameValue, request.args.get('level-name') ),
(enableQueryValue, request.args.get('enable-query') ),
(allowFlightValue, request.args.get('allow-flight') ),
(serverPortValue, request.args.get('server-port') ),
(levelTypeValue, request.args.get('level-type') ),
(enableRconValue, request.args.get('enable-rcon') ),
(levelSeedValue, request.args.get('level-seed') ),
(forceGamemodeValue, request.args.get('force-gamemode') ),
(serverIPValue, request.args.get('server-ip') ),
(maxBuildHeightValue, request.args.get('build-height') ),
(spawnNPCsValue, request.args.get('spawn-npcs') ),
(whitelistValue, request.args.get('white-list') ),
(spawnAnimalsValue, request.args.get('spawn-animals') ),
(snooperEnabledValue, request.args.get('snooper-enabled') ),
(hardcoreValue, request.args.get('request.args.get-hardcore')),
(texturePackValue, request.args.get('texture-pack') ),
(onlineModeValue, request.args.get('online-mode') ),
(pvpValue, request.args.get('request.args.get-pvp')),
(difficultyValue, request.args.get('request.args.get-difficulty')),
(gamemodeValue, request.args.get('request.args.get-gamemode')),
(maxPlayersValue, request.args.get('max-players') ),
(spawnMonstersValue, request.args.get('spawn-monsters') ),
(generateStructuresValue,request.args.get('generate-structures') ),
(viewDistanceValue, request.args.get('view-distance') ),
(spawnProtectionValue, request.args.get('spawn-protection') ),
(motdValue, request.args.get('request.args.get-motd'))
]
#Set server.properties
p = config.MINECRAFTDIR + config.SERVERPROPERTIES
#Open properties as f with read and write permissions.
f = open(p, "r+")
pText = f.readlines()
#Each line is read. If line-item contains X text, then use value. Set as pOutput.
for pItem in pText:
if "generator-settings" in pItem:
pOutput = [w.replace(pItem, "generator-settings" + '=' + generatorSettingsValue + '\n') for w in pText]
for pItem in pOutput:
if "allow-nether" in pItem:
pOutput = [w.replace(pItem, "allow-nether" + '=' + allowNetherValue + '\n') for w in pOutput]
for pItem in pOutput:
if "level-name" in pItem:
pOutput = [w.replace(pItem, "level-name" + '=' + levelNameValue + '\n') for w in pOutput]
for pItem in pOutput:
if "enable-query" in pItem:
pOutput = [w.replace(pItem, "enable-query" + '=' + enableQueryValue + '\n') for w in pOutput]
for pItem in pOutput:
if "allow-flight" in pItem:
pOutput = [w.replace(pItem, "allow-flight" + '=' + allowFlightValue + '\n') for w in pOutput]
for pItem in pOutput:
if "server-port" in pItem:
pOutput = [w.replace(pItem, "server-port" + '=' + serverPortValue + '\n') for w in pOutput]
for pItem in pOutput:
if "level-type" in pItem:
pOutput = [w.replace(pItem, "level-type" + '=' + levelTypeValue + '\n') for w in pOutput]
for pItem in pOutput:
if "enable-rcon" in pItem:
pOutput = [w.replace(pItem, "enable-rcon" + '=' + enableRconValue + '\n') for w in pOutput]
for pItem in pOutput:
if "level-seed" in pItem:
pOutput = [w.replace(pItem, "level-seed" + '=' + levelSeedValue + '\n') for w in pOutput]
for pItem in pOutput:
if "force-gamemode" in pItem:
pOutput = [w.replace(pItem, "force-gamemode" + '=' + forceGamemodeValue + '\n') for w in pOutput]
for pItem in pOutput:
if "server-ip" in pItem:
pOutput = [w.replace(pItem, "server-ip" + '=' + serverIPValue + '\n') for w in pOutput]
for pItem in pOutput:
if "max-build-height" in pItem:
pOutput = [w.replace(pItem, "max-build-height" + '=' + maxBuildHeightValue + '\n') for w in pOutput]
for pItem in pOutput:
if "spawn-npcs" in pItem:
pOutput = [w.replace(pItem, "spawn-npcs" + '=' + spawnNPCsValue + '\n') for w in pOutput]
for pItem in pOutput:
if "white-list" in pItem:
pOutput = [w.replace(pItem, "white-list" + '=' + whitelistValue + '\n') for w in pOutput]
for pItem in pOutput:
if "spawn-animals" in pItem:
pOutput = [w.replace(pItem, "spawn-animals" + '=' + spawnAnimalsValue + '\n') for w in pOutput]
for pItem in pOutput:
if "snooper-enabled" in pItem:
pOutput = [w.replace(pItem, "snooper-enabled" + '=' + snooperEnabledValue + '\n') for w in pOutput]
for pItem in pOutput:
if "texture-pack" in pItem:
pOutput = [w.replace(pItem, "texture-pack" + '=' + texturePackValue + '\n') for w in pOutput]
for pItem in pOutput:
if "online-mode" in pItem:
pOutput = [w.replace(pItem, "online-mode" + '=' + onlineModeValue + '\n') for w in pOutput]
for pItem in pOutput:
if "pvp" in pItem:
pOutput = [w.replace(pItem, "pvp" + '=' + pvpValue + '\n') for w in pOutput]
for pItem in pOutput:
if "difficulty" in pItem:
pOutput = [w.replace(pItem, "difficulty" + '=' + difficultyValue + '\n') for w in pOutput]
for pItem in pOutput:
if "gamemode" in pItem:
pOutput = [w.replace(pItem, "gamemode" + '=' + gamemodeValue + '\n') for w in pOutput]
for pItem in pOutput:
if "max-players" in pItem:
pOutput = [w.replace(pItem, "max-players" + '=' + maxPlayersValue + '\n') for w in pOutput]
for pItem in pOutput:
if "spawn-monsters" in pItem:
pOutput = [w.replace(pItem, "spawn-monsters" + '=' + spawnMonstersValue + '\n') for w in pOutput]
for pItem in pOutput:
if "generate-structures" in pItem:
pOutput = [w.replace(pItem, "generate-structures" + '=' + generateStructuresValue + '\n') for w in pOutput]
for pItem in pOutput:
if "view-distance" in pItem:
pOutput = [w.replace(pItem, "view-distance" + '=' + viewDistanceValue + '\n') for w in pOutput]
for pItem in pOutput:
if "motd" in pItem:
pOutput = [w.replace(pItem, "motd" + '=' + motdValue + '\n') for w in pOutput]
#Close file for reading. Re-open as write and write out pOutput to file.
f.writelines(pOutput)
f.close()
return redirect(url_for('admincraft.index'))
#return render_template('serverConfig.html', pOutput=pOutput)
#/usersConfig - Adds/Removes users from User Config
@admincraft.route('/addUser', methods=['GET', 'POST'])
@requires_auth
def addUser():
addType = request.args.get('type')
addValue = request.args.get('user')
if addType == "operators":
f = config.MINECRAFTDIR + config.SERVEROPS
elif addType == "whitelist":
f = config.MINECRAFTDIR + config.WHITELIST
elif addType == "banned-players":
f = config.MINECRAFTDIR + config.BANNEDPLAYERS
elif addType == "banned-ips":
f = config.MINECRAFTDIR + config.BANNEDIPS
else:
print "Error reading Add Type"
#Open f as o and append value.
with open(f, "a") as o:
o.write(addValue + "\n")
o.close()
return "User Added"
@admincraft.route('/removeUser', methods=['GET', 'POST'])
@requires_auth
def removeUser():
#Grab vars from GET request
removeType = request.args.get('type')
removeValue = request.args.get('user')
if removeType == "operators":
f = config.MINECRAFTDIR + config.SERVEROPS
elif removeType == "whitelist":
f = config.MINECRAFTDIR + config.WHITELIST
elif removeType == "banned-players":
f = config.MINECRAFTDIR + config.BANNEDPLAYERS
elif removeType == "banned-ips":
f = config.MINECRAFTDIR + config.BANNEDIPS
else:
print "Error reading Remove Type"
#Open f and read out lines
o = open(f, "r+").readlines()
#Create a list as ops, minus the removeValue
ops = []
ops = [names for names in o if names != removeValue + "\n"]
#Open ops.txt for writing and write out new lines
o.writelines(ops)
o.close()
return "User Removed"
@admincraft.route('/task', methods=['GET'])
@requires_auth
def taskService():
command = request.args.get("command")
if command == "stop":
stopTaskDaemon()
return 'Shutting down task daemon...'
elif command == "start":
startTaskDaemon()
return 'Starting task daemon...'
elif command == "restart":
stopTaskDaemon()
startTaskDaemon()
return 'Restarting task daemon...'
elif command == "status":
status = checkStatus()
return status
@admincraft.route('/addTask', methods=['POST', 'GET'])
@requires_auth
def addTask():
dbpath = config.DATABASE
task = request.args.get("type")
dom = request.args.get("dom")
dow = request.args.get("dow")
hour = request.args.get("hour")
minute = request.args.get("minute")
v = [task, dom, dow, hour, minute]
conn = sqlite3.connect(dbpath)
c = conn.cursor()
if not os.path.exists(dbpath):
c.execute('''create table tasks (type text, month text, day text, hour text, minute text)''')
else:
c.execute("INSERT into tasks VALUES (?,?,?,?,?)", v)
c.execute('select * from tasks order by type')
for row in c:
print row
conn.commit()
c.close()
return 'Task saved.'
#Turn on later
#@admincraft.errorhandler(500)
#def not_found(error):
# return render_template('themes/%s/500.html' % config.THEME), 500
#@admincraft.errorhandler(404)
#def not_found(error):
# return render_template('themes/%s/404.html' % config.THEME), 404
| {
"content_hash": "824b33a543154fe498236eb86a4370ec",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 238,
"avg_line_length": 41.035,
"alnum_prop": 0.6454652532391049,
"repo_name": "alfg/AdminCraft",
"id": "a170cf6b7a6b582397fc0a65a62f56ac0e777325",
"size": "24644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admincraft/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25571"
},
{
"name": "Python",
"bytes": "29789"
},
{
"name": "Shell",
"bytes": "6084"
}
],
"symlink_target": ""
} |
from neo.SmartContract.ContractParameterType import ContractParameterType
from neo.VM.InteropService import StackItem, Array, ByteArray, Struct, Boolean, Integer, InteropInterface
from neo.Core.UInt160 import UInt160
from neo.Core.UInt256 import UInt256
from neo.Core.BigInteger import BigInteger
from neo.Core.Cryptography.ECCurve import ECDSA
import binascii
class ContractParameter:
"""Contract Parameter used for parsing parameters sent to and from smart contract invocations"""
def __init__(self, type, value):
"""
Args:
type (neo.SmartContract.ContractParameterType.ContractParameterType): The type of the parameter
value (*): The value of the parameter
"""
self.Type = type
self.Value = value
@staticmethod
def ToParameter(item: StackItem):
"""
Convert a StackItem to a ContractParameter object
Args:
item (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object
Returns:
ContractParameter
"""
if isinstance(item, Array) or isinstance(item, Struct):
items = item.GetArray()
output = [ContractParameter.ToParameter(subitem) for subitem in items]
return ContractParameter(type=ContractParameterType.Array, value=output)
elif isinstance(item, Boolean):
return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean())
elif isinstance(item, ByteArray):
return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray())
elif isinstance(item, Integer):
return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger()))
elif isinstance(item, InteropInterface):
return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())
@staticmethod
def AsParameterType(type: ContractParameterType, item: StackItem):
"""
Convert a StackItem to a ContractParameter object of a specified ContractParameterType
Args:
type (neo.SmartContract.ContractParameterType): The ContractParameterType to convert to
item (neo.VM.InteropService.StackItem): The item to convert to a ContractParameter object
Returns:
"""
if type == ContractParameterType.Integer:
return ContractParameter(type, value=item.GetBigInteger())
elif type == ContractParameterType.Boolean:
return ContractParameter(type, value=item.GetBoolean())
elif type == ContractParameterType.Array:
output = [ContractParameter.ToParameter(subitem) for subitem in item.GetArray()]
return ContractParameter(type, value=output)
elif type == ContractParameterType.String:
return ContractParameter(type, value=item.GetString())
elif type == ContractParameterType.InteropInterface:
return ContractParameter(type, value=item.GetInterface())
# all other types return a byte array
else:
return ContractParameter(type, value=item.GetByteArray())
def ToJson(self, auto_hex=True):
"""
Converts a ContractParameter instance to a json representation
Returns:
dict: a dictionary representation of the contract parameter
"""
jsn = {}
jsn['type'] = str(ContractParameterType(self.Type))
if self.Type == ContractParameterType.Signature:
jsn['value'] = self.Value.hex()
elif self.Type == ContractParameterType.ByteArray:
if auto_hex:
jsn['value'] = self.Value.hex()
else:
jsn['value'] = self.Value
elif self.Type == ContractParameterType.Boolean:
jsn['value'] = self.Value
elif self.Type == ContractParameterType.String:
jsn['value'] = str(self.Value)
elif self.Type == ContractParameterType.Integer:
jsn['value'] = self.Value
# @TODO, see ``FromJson``, not sure if this is working properly
elif self.Type == ContractParameterType.PublicKey:
jsn['value'] = self.Value.ToString()
elif self.Type in [ContractParameterType.Hash160,
ContractParameterType.Hash256]:
jsn['value'] = self.Value.ToString()
elif self.Type == ContractParameterType.Array:
res = []
for item in self.Value:
if item:
res.append(item.ToJson(auto_hex=auto_hex))
jsn['value'] = res
elif self.Type == ContractParameterType.InteropInterface:
try:
jsn['value'] = self.Value.ToJson()
except Exception as e:
pass
return jsn
def ToVM(self):
"""
Used for turning a ContractParameter item into somethnig consumable by the VM
Returns:
"""
if self.Type == ContractParameterType.String:
return str(self.Value).encode('utf-8').hex()
elif self.Type == ContractParameterType.Integer and isinstance(self.Value, int):
return BigInteger(self.Value)
return self.Value
@staticmethod
def FromJson(json):
"""
Convert a json object to a ContractParameter object
Args:
item (dict): The item to convert to a ContractParameter object
Returns:
ContractParameter
"""
type = ContractParameterType.FromString(json['type'])
value = json['value']
param = ContractParameter(type=type, value=None)
if type == ContractParameterType.Signature or type == ContractParameterType.ByteArray:
param.Value = bytearray.fromhex(value)
elif type == ContractParameterType.Boolean:
param.Value = bool(value)
elif type == ContractParameterType.Integer:
param.Value = int(value)
elif type == ContractParameterType.Hash160:
param.Value = UInt160.ParseString(value)
elif type == ContractParameterType.Hash256:
param.Value = UInt256.ParseString(value)
# @TODO Not sure if this is working...
elif type == ContractParameterType.PublicKey:
param.Value = ECDSA.decode_secp256r1(value).G
elif type == ContractParameterType.String:
param.Value = str(value)
elif type == ContractParameterType.Array:
val = [ContractParameter.FromJson(item) for item in value]
param.Value = val
return param
| {
"content_hash": "4f0ae2ee0028244f8859678ec8f8d81b",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 108,
"avg_line_length": 36.09189189189189,
"alnum_prop": 0.6380110828216264,
"repo_name": "hal0x2328/neo-python",
"id": "1d9fd9788f21b72c8535201d368566088ee4002d",
"size": "6677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo/SmartContract/ContractParameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2059"
},
{
"name": "Makefile",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "1758220"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
} |
"""Support for ISY994 fans."""
from __future__ import annotations
import math
from typing import Any
from pyisy.constants import ISY_VALUE_UNKNOWN, PROTO_INSTEON
from homeassistant.components.fan import DOMAIN as FAN, FanEntity, FanEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from .const import _LOGGER, DOMAIN as ISY994_DOMAIN, ISY994_NODES, ISY994_PROGRAMS
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
SPEED_RANGE = (1, 255) # off is not included
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the ISY994 fan platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
entities: list[ISYFanEntity | ISYFanProgramEntity] = []
for node in hass_isy_data[ISY994_NODES][FAN]:
entities.append(ISYFanEntity(node))
for name, status, actions in hass_isy_data[ISY994_PROGRAMS][FAN]:
entities.append(ISYFanProgramEntity(name, status, actions))
await migrate_old_unique_ids(hass, FAN, entities)
async_add_entities(entities)
class ISYFanEntity(ISYNodeEntity, FanEntity):
"""Representation of an ISY994 fan device."""
_attr_supported_features = FanEntityFeature.SET_SPEED
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return ranged_value_to_percentage(SPEED_RANGE, self._node.status)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if self._node.protocol == PROTO_INSTEON:
return 3
return int_states_in_range(SPEED_RANGE)
@property
def is_on(self) -> bool | None:
"""Get if the fan is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return bool(self._node.status != 0)
async def async_set_percentage(self, percentage: int) -> None:
"""Set node to speed percentage for the ISY994 fan device."""
if percentage == 0:
await self._node.turn_off()
return
isy_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage))
await self._node.turn_on(val=isy_speed)
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Send the turn on command to the ISY994 fan device."""
await self.async_set_percentage(percentage or 67)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Send the turn off command to the ISY994 fan device."""
await self._node.turn_off()
class ISYFanProgramEntity(ISYProgramEntity, FanEntity):
"""Representation of an ISY994 fan program."""
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return ranged_value_to_percentage(SPEED_RANGE, self._node.status)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return int_states_in_range(SPEED_RANGE)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return bool(self._node.status != 0)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Send the turn on command to ISY994 fan program."""
if not await self._actions.run_then():
_LOGGER.error("Unable to turn off the fan")
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Send the turn off command to ISY994 fan program."""
if not await self._actions.run_else():
_LOGGER.error("Unable to turn on the fan")
| {
"content_hash": "64e35d997438509aaec72a77a46bbaee",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 84,
"avg_line_length": 33.61904761904762,
"alnum_prop": 0.6520302171860245,
"repo_name": "w1ll1am23/home-assistant",
"id": "9e264076d885cdf9b1316fa767665c15e8a7831f",
"size": "4236",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/isy994/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Dashboard',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True)),
('sites', models.ManyToManyField(to='sites.Site', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Thing',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField()),
('object_id', models.PositiveIntegerField()),
('template', models.CharField(default=b'', help_text=b'Use a custom template for this item.', max_length=255, blank=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
('dashboard', models.ForeignKey(related_name=b'things', to='django_featuring.Dashboard')),
],
options={
'ordering': ['order'],
},
bases=(models.Model,),
),
]
| {
"content_hash": "1810ad252935a70e8967e5bcaba2e4bc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 139,
"avg_line_length": 36.525,
"alnum_prop": 0.5359342915811088,
"repo_name": "whatisjasongoldstein/django-featuring",
"id": "0e2d85d7baa4c2928344cdc1f094011eca84eaaa",
"size": "1485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_featuring/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "819"
},
{
"name": "Python",
"bytes": "8001"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Callable
from airflow.callbacks.base_callback_sink import BaseCallbackSink
from airflow.callbacks.callback_requests import CallbackRequest
class PipeCallbackSink(BaseCallbackSink):
"""
Class for sending callbacks to DagProcessor using pipe.
It is used when DagProcessor is not executed in standalone mode.
"""
def __init__(self, get_sink_pipe: Callable[[], MultiprocessingConnection]):
self._get_sink_pipe = get_sink_pipe
def send(self, callback: CallbackRequest):
"""
Sends information about the callback to be executed by Pipe.
:param callback: Callback request to be executed.
"""
try:
self._get_sink_pipe().send(callback)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
| {
"content_hash": "f4e73e3097093e30d50872053cc92e31",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 33.774193548387096,
"alnum_prop": 0.6991404011461319,
"repo_name": "nathanielvarona/airflow",
"id": "d702a781fa57c0bf89f3ef5b667678afa67b6065",
"size": "1834",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/callbacks/pipe_callback_sink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
from mock import DEFAULT
from mock import MagicMock
from mock import patch
from testtools.matchers import Is, Equals, Not
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.common import configuration
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.db2 import (
manager as db2_manager)
from trove.guestagent.datastore.experimental.db2 import (
service as db2_service)
from trove.guestagent import pkg
from trove.guestagent import volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentDB2ManagerTest(DatastoreManagerTest):
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
@patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT,
chown=DEFAULT, chmod=DEFAULT)
@patch.object(db2_service.DB2App, 'process_default_dbm_config')
def setUp(self, *arg, **kwargs):
super(GuestAgentDB2ManagerTest, self).setUp('db2')
self.real_status = db2_service.DB2AppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
db2_service.DB2AppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.manager = db2_manager.Manager()
self.real_db_app_status = db2_service.DB2AppStatus
self.origin_format = volume.VolumeDevice.format
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = db2_service.DB2App.stop_db
self.origin_start_db = db2_service.DB2App.start_db
self.orig_change_ownership = (db2_service.DB2App.change_ownership)
self.orig_create_databases = db2_service.DB2Admin.create_database
self.orig_list_databases = db2_service.DB2Admin.list_databases
self.orig_delete_database = db2_service.DB2Admin.delete_database
self.orig_create_users = db2_service.DB2Admin.create_user
self.orig_list_users = db2_service.DB2Admin.list_users
self.orig_delete_user = db2_service.DB2Admin.delete_user
self.orig_update_hostname = db2_service.DB2App.update_hostname
self.orig_backup_restore = backup.restore
self.orig_init_config = db2_service.DB2App.init_config
self.orig_update_overrides = db2_service.DB2App.update_overrides
self.orig_remove_overrides = db2_service.DB2App.remove_overrides
def tearDown(self):
super(GuestAgentDB2ManagerTest, self).tearDown()
db2_service.DB2AppStatus.set_status = self.real_db_app_status
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
db2_service.DB2App.stop_db = self.origin_stop_db
db2_service.DB2App.start_db = self.origin_start_db
db2_service.DB2App.change_ownership = self.orig_change_ownership
db2_service.DB2Admin.create_database = self.orig_create_databases
db2_service.DB2Admin.create_user = self.orig_create_users
db2_service.DB2Admin.create_database = self.orig_create_databases
db2_service.DB2Admin.list_databases = self.orig_list_databases
db2_service.DB2Admin.delete_database = self.orig_delete_database
db2_service.DB2Admin.create_user = self.orig_create_users
db2_service.DB2Admin.list_users = self.orig_list_users
db2_service.DB2Admin.delete_user = self.orig_delete_user
db2_service.DB2App.update_hostname = self.orig_update_hostname
backup.restore = self.orig_backup_restore
db2_service.DB2App.init_config = self.orig_init_config
db2_service.DB2App.update_overrides = self.orig_update_overrides
db2_service.DB2App.remove_overrides = self.orig_remove_overrides
def test_update_status(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def test_prepare_device_path_true(self):
self._prepare_dynamic()
def test_prepare_device_path_false(self):
self._prepare_dynamic(device_path=None)
def test_prepare_database(self):
self._prepare_dynamic(databases=['db1'])
def test_prepare_from_backup(self):
self._prepare_dynamic(['db2'], backup_id='123backup')
@patch.object(configuration.ConfigurationManager, 'save_configuration')
def _prepare_dynamic(self, packages=None, databases=None, users=None,
config_content='MockContent', device_path='/dev/vdb',
is_db_installed=True, backup_id=None, overrides=None):
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'DB2Backup',
'checksum': 'fake-checksum'} if backup_id else None
mock_status = MagicMock()
mock_app = MagicMock()
self.manager.appStatus = mock_status
self.manager.app = mock_app
mock_status.begin_install = MagicMock(return_value=None)
mock_app.change_ownership = MagicMock(return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
backup.restore = MagicMock(return_value=None)
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=MagicMock(
return_value=is_db_installed)):
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=databases,
memory_mb='2048', users=users,
device_path=device_path,
mount_point="/home/db2inst1/db2inst1",
backup_info=backup_info,
overrides=None,
cluster_config=None)
mock_status.begin_install.assert_any_call()
self.assertEqual(1, mock_app.change_ownership.call_count)
if databases:
self.assertTrue(db2_service.DB2Admin.create_database.called)
else:
self.assertFalse(db2_service.DB2Admin.create_database.called)
if users:
self.assertTrue(db2_service.DB2Admin.create_user.called)
else:
self.assertFalse(db2_service.DB2Admin.create_user.called)
if backup_id:
backup.restore.assert_any_call(self.context,
backup_info,
'/home/db2inst1/db2inst1')
self.assertTrue(
self.manager.configuration_manager.save_configuration.called
)
def test_restart(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
with patch.object(db2_service.DB2App, 'restart',
return_value=None) as restart_mock:
# invocation
self.manager.restart(self.context)
# verification/assertion
restart_mock.assert_any_call()
def test_stop_db(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2App.stop_db = MagicMock(return_value=None)
self.manager.stop_db(self.context)
db2_service.DB2App.stop_db.assert_any_call(
do_not_start_on_reboot=False)
def test_start_db_with_conf_changes(self):
with patch.object(db2_service.DB2App, 'start_db_with_conf_changes'):
self.manager.start_db_with_conf_changes(self.context, 'something')
db2_service.DB2App.start_db_with_conf_changes.assert_any_call(
'something')
def test_create_database(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
self.manager.create_database(self.context, ['db1'])
db2_service.DB2Admin.create_database.assert_any_call(['db1'])
def test_create_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
self.manager.create_user(self.context, ['user1'])
db2_service.DB2Admin.create_user.assert_any_call(['user1'])
def test_delete_database(self):
databases = ['db1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.delete_database = MagicMock(return_value=None)
self.manager.delete_database(self.context, databases)
db2_service.DB2Admin.delete_database.assert_any_call(databases)
def test_delete_user(self):
user = ['user1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.delete_user = MagicMock(return_value=None)
self.manager.delete_user(self.context, user)
db2_service.DB2Admin.delete_user.assert_any_call(user)
def test_list_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.list_databases = MagicMock(
return_value=['database1'])
databases = self.manager.list_databases(self.context)
self.assertThat(databases, Not(Is(None)))
self.assertThat(databases, Equals(['database1']))
db2_service.DB2Admin.list_databases.assert_any_call(None, None, False)
def test_list_users(self):
db2_service.DB2Admin.list_users = MagicMock(return_value=['user1'])
users = self.manager.list_users(self.context)
self.assertThat(users, Equals(['user1']))
db2_service.DB2Admin.list_users.assert_any_call(None, None, False)
@patch.object(db2_service.DB2Admin, 'get_user',
return_value=MagicMock(return_value=['user1']))
def test_get_users(self, get_user_mock):
username = ['user1']
hostname = ['host']
mock_status = MagicMock()
self.manager.appStatus = mock_status
users = self.manager.get_user(self.context, username, hostname)
self.assertThat(users, Equals(get_user_mock.return_value))
get_user_mock.assert_any_call(username, hostname)
def test_rpc_ping(self):
output = self.manager.rpc_ping(self.context)
self.assertTrue(output)
def test_update_update_overrides(self):
configuration = {"DIAGSIZE": 50}
db2_service.DB2App.update_overrides = MagicMock()
self.manager.update_overrides(self.context, configuration, False)
db2_service.DB2App.update_overrides.assert_any_call(self.context,
configuration)
def test_reset_update_overrides(self):
configuration = {}
db2_service.DB2App.remove_overrides = MagicMock()
self.manager.update_overrides(self.context, configuration, True)
db2_service.DB2App.remove_overrides.assert_any_call()
| {
"content_hash": "90c26ca3a115a33e4f7d67e814531a96",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 79,
"avg_line_length": 46.18359375,
"alnum_prop": 0.6545715977332318,
"repo_name": "zhangg/trove",
"id": "e55c43162141bc7e32c83a3911a65f1762249243",
"size": "12393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trove/tests/unittests/guestagent/test_db2_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4546016"
},
{
"name": "Shell",
"bytes": "145524"
}
],
"symlink_target": ""
} |
import unittest
import json
from django.test import Client
from rest_framework import status
class SampleViewTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def tearDown(self):
pass
def test_sample(self):
response = self.client.get("/openoapi/multivim-vio/v1/swagger.json")
self.assertEqual(status.HTTP_200_OK, response.status_code, response.content)
# resp_data = json.loads(response.content)
# self.assertEqual({"status": "active"}, resp_data)
| {
"content_hash": "5c2728365021832e62234a14ec7a4b70",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.6876190476190476,
"repo_name": "johnsonlau/multivimdriver-vmware-vio",
"id": "04b42367c154e27b97c1bb285f98c5d21ed31d3e",
"size": "1006",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vio/vio/swagger/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "145731"
},
{
"name": "Shell",
"bytes": "1658"
}
],
"symlink_target": ""
} |
import os
import logging
from cliff.app import App
from cliff.commandmanager import CommandManager
from cliff.command import Command
from cliff.show import ShowOne
from cliff.lister import Lister
from db_manage import DbManage
class ListItems(Lister):
#"Show details about a file"
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
self.db = DbManage()
self.db.connect_db()
columns = ('',
'Task Title',
'Start Time',
'Duration',
'Short Break',
'Long Break',
'Cycles',
'Status',
'Sound Status',
)
data = self.db.retrieve_all_entries()
return (columns, data)
# def take_action(self, parsed_args):
# #argv = str(parsed_args.arg)
# msg = "You provided the wrong command use 'pomodoro' for help"
# self.db = DbManage()
# self.db.connect_db()
# columns = ('',
# 'Task Title',
# 'Start Time',
# 'Duration',
# 'Short Break',
# 'Long Break',
# 'Cycles',
# 'Status',
# 'Sound Status',)
# #if param == None:
# data = self.db.retrieve_all_entries()
# # else:
# # param = argv.split("*",1)[1]
# # if len([x for x in param.split(':') if x.isdigit()]) == 3:
# # argv = self.normal_day_to_timestamps(argv)
# # msg = "List is bieng processed"
# # data = self.db.retrieve_entries_past_timestamp(argv)
# return (columns, data)
# #should be in the format d/m/Y
# def normal_day_to_timestamps(self, normaltime):
# date = datetime.datetime.strptime(normaltime, "%d/%m/%Y")
# return calendar.timegm(date.utctimetuple())
| {
"content_hash": "15738007513e24620e7d3db627b5cf7c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 32.20634920634921,
"alnum_prop": 0.48053228191227204,
"repo_name": "Migwi-Ndungu/bc-9-Pomodoro-Timer",
"id": "20b247fc237680317ba75998b1a05d25c40cb717",
"size": "2029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "List_display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22713"
}
],
"symlink_target": ""
} |
from django.db import models
"""
Representa las opciones del motivo de otorgamiento de una normativa
"""
class NormativaMotivoOtorgamiento(models.Model):
nombre = models.CharField(max_length = 50, unique = True)
class Meta:
app_label = 'postitulos'
ordering = ['nombre']
db_table = 'postitulos_normativa_motivo_otorgamiento'
def __unicode__(self):
return self.nombre
| {
"content_hash": "1c44d51a9811ef460b0b1662f19222bf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 67,
"avg_line_length": 25.9375,
"alnum_prop": 0.6795180722891566,
"repo_name": "MERegistro/meregistro",
"id": "f49df340111a5fc74cf3834c9191a7e3664e5144",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meregistro/apps/postitulos/models/NormativaMotivoOtorgamiento.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79500"
},
{
"name": "HTML",
"bytes": "782188"
},
{
"name": "JavaScript",
"bytes": "106755"
},
{
"name": "PLpgSQL",
"bytes": "515442"
},
{
"name": "Python",
"bytes": "7190737"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
import random
import string
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class RandomString(resource.Resource):
"""A resource which generates a random string.
This is useful for configuring passwords and secrets on services.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
LENGTH, SEQUENCE, CHARACTER_CLASSES, CHARACTER_SEQUENCES,
SALT,
) = (
'length', 'sequence', 'character_classes', 'character_sequences',
'salt',
)
_CHARACTER_CLASSES_KEYS = (
CHARACTER_CLASSES_CLASS, CHARACTER_CLASSES_MIN,
) = (
'class', 'min',
)
_CHARACTER_SEQUENCES = (
CHARACTER_SEQUENCES_SEQUENCE, CHARACTER_SEQUENCES_MIN,
) = (
'sequence', 'min',
)
ATTRIBUTES = (
VALUE,
) = (
'value',
)
properties_schema = {
LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('Length of the string to generate.'),
default=32,
constraints=[
constraints.Range(1, 512),
]
),
SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('Sequence of characters to build the random string from.'),
constraints=[
constraints.AllowedValues(['lettersdigits', 'letters',
'lowercase', 'uppercase',
'digits', 'hexdigits',
'octdigits']),
],
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % CHARACTER_CLASSES,
version='2014.2'
)
)
),
CHARACTER_CLASSES: properties.Schema(
properties.Schema.LIST,
_('A list of character class and their constraints to generate '
'the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_CLASSES_CLASS: properties.Schema(
properties.Schema.STRING,
(_('A character class and its corresponding %(min)s '
'constraint to generate the random string from.')
% {'min': CHARACTER_CLASSES_MIN}),
constraints=[
constraints.AllowedValues(
['lettersdigits', 'letters', 'lowercase',
'uppercase', 'digits', 'hexdigits',
'octdigits']),
],
default='lettersdigits'),
CHARACTER_CLASSES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'character class that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
)
),
CHARACTER_SEQUENCES: properties.Schema(
properties.Schema.LIST,
_('A list of character sequences and their constraints to '
'generate the random string from.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
CHARACTER_SEQUENCES_SEQUENCE: properties.Schema(
properties.Schema.STRING,
_('A character sequence and its corresponding %(min)s '
'constraint to generate the random string '
'from.') % {'min': CHARACTER_SEQUENCES_MIN},
required=True),
CHARACTER_SEQUENCES_MIN: properties.Schema(
properties.Schema.INTEGER,
_('The minimum number of characters from this '
'sequence that will be in the generated '
'string.'),
default=1,
constraints=[
constraints.Range(1, 512),
]
)
}
)
),
SALT: properties.Schema(
properties.Schema.STRING,
_('Value which can be set or changed on stack update to trigger '
'the resource for replacement with a new random string . The '
'salt value itself is ignored by the random generator.')
),
}
attributes_schema = {
VALUE: attributes.Schema(
_('The random string generated by this resource. This value is '
'also available by referencing the resource.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
_sequences = {
'lettersdigits': string.ascii_letters + string.digits,
'letters': string.ascii_letters,
'lowercase': string.ascii_lowercase,
'uppercase': string.ascii_uppercase,
'digits': string.digits,
'hexdigits': string.digits + 'ABCDEF',
'octdigits': string.octdigits
}
def translation_rules(self):
if self.properties.get(self.SEQUENCE):
return [
properties.TranslationRule(
self.properties,
properties.TranslationRule.ADD,
[self.CHARACTER_CLASSES],
[{self.CHARACTER_CLASSES_CLASS: self.properties.get(
self.SEQUENCE),
self.CHARACTER_CLASSES_MIN: 1}]),
properties.TranslationRule(
self.properties,
properties.TranslationRule.DELETE,
[self.SEQUENCE]
)
]
@staticmethod
def _deprecated_random_string(sequence, length):
rand = random.SystemRandom()
return ''.join(rand.choice(sequence) for x in six.moves.xrange(length))
def _generate_random_string(self, char_sequences, char_classes, length):
random_string = ""
# Add the minimum number of chars from each char sequence & char class
if char_sequences:
for char_seq in char_sequences:
seq = char_seq[self.CHARACTER_SEQUENCES_SEQUENCE]
seq_min = char_seq[self.CHARACTER_SEQUENCES_MIN]
for i in six.moves.xrange(seq_min):
random_string += random.choice(seq)
if char_classes:
for char_class in char_classes:
cclass_class = char_class[self.CHARACTER_CLASSES_CLASS]
cclass_seq = self._sequences[cclass_class]
cclass_min = char_class[self.CHARACTER_CLASSES_MIN]
for i in six.moves.xrange(cclass_min):
random_string += random.choice(cclass_seq)
def random_class_char():
cclass_dict = random.choice(char_classes)
cclass_class = cclass_dict[self.CHARACTER_CLASSES_CLASS]
cclass_seq = self._sequences[cclass_class]
return random.choice(cclass_seq)
def random_seq_char():
seq_dict = random.choice(char_sequences)
seq = seq_dict[self.CHARACTER_SEQUENCES_SEQUENCE]
return random.choice(seq)
# Fill up rest with random chars from provided sequences & classes
if char_sequences and char_classes:
weighted_choices = ([True] * len(char_classes) +
[False] * len(char_sequences))
while len(random_string) < length:
if random.choice(weighted_choices):
random_string += random_class_char()
else:
random_string += random_seq_char()
elif char_sequences:
while len(random_string) < length:
random_string += random_seq_char()
else:
while len(random_string) < length:
random_string += random_class_char()
# Randomize string
random_string = ''.join(random.sample(random_string,
len(random_string)))
return random_string
def validate(self):
super(RandomString, self).validate()
sequence = self.properties[self.SEQUENCE]
char_sequences = self.properties[self.CHARACTER_SEQUENCES]
char_classes = self.properties[self.CHARACTER_CLASSES]
if sequence and (char_sequences or char_classes):
msg = (_("Cannot use deprecated '%(seq)s' property along with "
"'%(char_seqs)s' or '%(char_classes)s' properties")
% {'seq': self.SEQUENCE,
'char_seqs': self.CHARACTER_SEQUENCES,
'char_classes': self.CHARACTER_CLASSES})
raise exception.StackValidationFailed(message=msg)
def char_min(char_dicts, min_prop):
if char_dicts:
return sum(char_dict[min_prop] for char_dict in char_dicts)
return 0
length = self.properties[self.LENGTH]
min_length = (char_min(char_sequences, self.CHARACTER_SEQUENCES_MIN) +
char_min(char_classes, self.CHARACTER_CLASSES_MIN))
if min_length > length:
msg = _("Length property cannot be smaller than combined "
"character class and character sequence minimums")
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
char_sequences = self.properties[self.CHARACTER_SEQUENCES]
char_classes = self.properties[self.CHARACTER_CLASSES]
length = self.properties[self.LENGTH]
if char_sequences or char_classes:
random_string = self._generate_random_string(char_sequences,
char_classes,
length)
else:
sequence = self.properties[self.SEQUENCE]
if not sequence: # Deprecated property not provided, use a default
sequence = "lettersdigits"
char_seq = self._sequences[sequence]
random_string = self._deprecated_random_string(char_seq, length)
self.data_set('value', random_string, redact=True)
self.resource_id_set(self.physical_resource_name())
def _resolve_attribute(self, name):
if name == self.VALUE:
return self.data().get(self.VALUE)
def get_reference_id(self):
if self.resource_id is not None:
return self.data().get('value')
else:
return six.text_type(self.name)
def resource_mapping():
return {
'OS::Heat::RandomString': RandomString,
}
| {
"content_hash": "12ad45af6a2750ebdff4fd6b4705372d",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 38.51677852348993,
"alnum_prop": 0.52709531277226,
"repo_name": "maestro-hybrid-cloud/heat",
"id": "00d111b8425416813f7eba747d52c63c3ba6faf8",
"size": "12053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/heat/random_string.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6954236"
},
{
"name": "Shell",
"bytes": "33503"
}
],
"symlink_target": ""
} |
class Pidata(object):
def __init__(self, insights):
self.insights = insights
self.big5 = {}
self.processed = False
def floored_percentage(self, val, digits=2):
""" Number comes in a 0.45678912 and will be converted
1st to 0.4568
then to 45.68
finally to a string """
val = round(val, 2 + digits)
val *= 100
return '%.2f' % val
def processData(self):
if self.insights:
if "tree" in self.insights:
coredata = self.insights["tree"]["children"][0]
big5data = coredata['children'][0]['children']
for xx in big5data:
self.big5[xx["name"]] = self.floored_percentage(xx["percentage"])
self.processed = True
def getData(self, trait, withsymbol):
response = "No value found for %s " % trait
if trait and trait in self.big5:
response = self.big5[trait]
if withsymbol:
response += '%'
return response
def getTraitValue(self, trait, withsymbol=True):
summaryTxt = "No value found for %s " % trait
if self.insights:
if not self.processed:
self.processData()
summaryTxt = self.getData(trait,withsymbol)
return summaryTxt
def getOpennessValue(self):
return self.getTraitValue("Openness", False)
def getConscientiousnessValue(self):
return self.getTraitValue("Conscientiousness", False)
def getExtraversionValue(self):
return self.getTraitValue("Extraversion", False)
def getAgreeablenessValue(self):
return self.getTraitValue("Agreeableness", False)
def getEmotionalrangeValue(self):
return self.getTraitValue("Emotional range", False)
def big5keys(self):
return ['Openness', 'Conscientiousness', 'Extraversion', 'Agreeableness', 'Emotional range']
| {
"content_hash": "9d97e9014767f8be297fbb2aded366fd",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 99,
"avg_line_length": 30.810344827586206,
"alnum_prop": 0.6491326245103526,
"repo_name": "chughts/watson-betaworks-python-sample",
"id": "6fd567c2a4ba33b6b2ece2c993edb82e63ade7a8",
"size": "2357",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Watson/watsonutils/pinsights.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7306"
},
{
"name": "HTML",
"bytes": "15770"
},
{
"name": "JavaScript",
"bytes": "34098"
},
{
"name": "Python",
"bytes": "56311"
},
{
"name": "Shell",
"bytes": "1187"
}
],
"symlink_target": ""
} |
from .Nodes import h, Leaf, Branch
class Tree:
def __init__(self, store=None, root_hash=None):
"""Initialize a Merkle tree from a store and a root hash.
Example:
>>> from hippiehug import Tree
>>> t = Tree()
>>> t.add(b"Hello")
>>> b"Hello" in t
True
>>> b"World" not in t
True
"""
self.store = store if store is not None else {}
self.root_hash = root_hash
def root(self):
"""Return the root of the Tree.
Keep this value safe, and the integrity of the set is guaranteed.
"""
return self.root_hash
def add(self, item, key=None):
"""Add and element to the Merkle tree."""
item_key = h(item)
if key is None:
key = item
if self.root_hash == None:
l = Leaf(item_key, key)
self.store[l.identity()] = l
self.root_hash = l.identity()
else:
head_element = self.store[self.root_hash]
new_head_elem = head_element.add(self.store, item_key, key)
self.root_hash = new_head_elem.identity()
def multi_add(self, items, keys=None):
"""Add many elements to the Merkle tree.
This is more efficient than adding individual elements.
:param items: Items to add
:param keys:
Example:
>>> t = Tree()
>>> t.multi_add([b"Hello", b"World"])
>>> assert b"Hello" in t and b"World" in t
"""
item_keys = [h(i) for i in items]
if keys is None:
keys = items
if self.root_hash == None:
l = Leaf(item_keys[0], keys[0])
self.store[l.identity()] = l
b = l.multi_add(self.store, item_keys[1:], keys[1:])
self.root_hash = b.identity()
else:
head_element = self.store[self.root_hash]
new_head_elem = head_element.multi_add(self.store, item_keys, keys)
self.root_hash = new_head_elem.identity()
def is_in(self, item, key=None):
"""Checks whether an element is in the Merkle Tree.
:param item: Item to check
:param key: If not None, hash of the item is used as a lookup key
"""
if self.root_hash == None:
return False
if key is None:
key = item
item_key = h(item)
head_element = self.store[self.root_hash]
return head_element.is_in(self.store, item_key, key)
def multi_is_in(self, items, keys=None, evidence=False):
"""Check whether the items are in the Tree.
:param items: Items to check
:param keys: If not None, hashes of items are used as lookup keys
:param evidence: Return the current root_hash of the Tree and a list
of Branches and Leafs as evidence.
Example lookup:
>>> t = Tree()
>>> t.multi_add([b"Hello", b"World"])
>>> t.multi_is_in([b"Hello", b"World", b"!"])
[True, True, False]
Example gathering of evidence:
>>> _, root_hash, bag = t.multi_is_in([b"Hello", b"World", b"!"], evidence=True)
>>> new_store = dict((e.identity(), e) for e in bag)
>>> new_t = Tree(new_store, root_hash)
>>> new_t.multi_is_in([b"Hello", b"World", b"!"])
[True, True, False]
Example using key-values:
>>> t = Tree()
>>> t.add(key=b"K1", item=b"V1")
>>> t.add(key=b"K2", item=b"V2")
>>> t.is_in(key=b"K2", item=b"V2")
True
>>> t.is_in(key=b"K1", item=b"V1")
True
>>> t.multi_is_in(keys=[b"K2", b"K1", b"K2", b"!"], items=[b"V2", b"V1", b"!", b"V2"])
[True, True, False, False]
"""
# >>> t.multi_add(keys=[b"K1", b"K2"], items=[b"V1", b"V2"])
if keys is None:
keys = items
if self.root_hash == None:
if not evidence:
return [ False ] * len(items)
else:
return [ False ] * len(items), None, []
item_keys = [ h(i) for i in items ]
head_element = self.store[self.root_hash]
evid = [] if evidence else None
solution = {}
head_element.multi_is_in_fast( self.store, evid, items=item_keys, keys=keys, solution=solution)
if not evidence:
return [solution[(i, k)] for i, k in zip(item_keys, keys)]
else:
return [solution[(i, k)] for i, k in zip(item_keys, keys)], self.root_hash, evid
def __contains__(self, item):
return self.is_in(item)
def evidence(self, key=None):
"""Gather evidence about the inclusion / exclusion of the *key*.
The evidence includes all Branches and Leafs necessary to prove the *key* is,
or is not, in the Merkle Tree. They are ordered from the root to the Leaf
that either contrains the sought *key*, or not.
Example:
>>> t = Tree()
>>> t.add(b"Hello")
>>> t.add(b"World")
>>> root, E = t.evidence(b"World")
>>> evidence_store = dict((e.identity(), e) for e in E)
>>> t2 = Tree(evidence_store, root)
>>> b"World" in t2
True
"""
if self.root_hash == None:
return []
# item_key = h(item)
head_element = self.store[self.root_hash]
return self.root_hash, head_element.evidence(self.store, [], key)
| {
"content_hash": "5dd891a007900e9063bdb2bddaad7955",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 103,
"avg_line_length": 31.908571428571427,
"alnum_prop": 0.5109240687679083,
"repo_name": "gdanezis/rousseau-chain",
"id": "680dd44a6fac0453dbe45ae429bf9a91067595eb",
"size": "5584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hippiehug-package/hippiehug/Tree.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7250"
},
{
"name": "Makefile",
"bytes": "7421"
},
{
"name": "Python",
"bytes": "137103"
},
{
"name": "TLA",
"bytes": "20157"
}
],
"symlink_target": ""
} |
"""Keystone External Authentication Plugins."""
import abc
import six
from keystone.auth.plugins import base
from keystone.common import dependency
import keystone.conf
from keystone import exception
from keystone.i18n import _
CONF = keystone.conf.CONF
@six.add_metaclass(abc.ABCMeta)
class Base(base.AuthMethodHandler):
def authenticate(self, request, auth_info, auth_context):
"""Use REMOTE_USER to look up the user in the identity backend.
auth_context is an in-out variable that will be updated with the
user_id from the actual user from the REMOTE_USER env variable.
"""
if not request.remote_user:
msg = _('No authenticated user')
raise exception.Unauthorized(msg)
try:
user_ref = self._authenticate(request)
except Exception:
msg = _('Unable to lookup user %s') % request.remote_user
raise exception.Unauthorized(msg)
auth_context['user_id'] = user_ref['id']
auth_type = (request.auth_type or '').lower()
if 'kerberos' in CONF.token.bind and auth_type == 'negotiate':
auth_context['bind']['kerberos'] = user_ref['name']
@abc.abstractmethod
def _authenticate(self, request):
"""Look up the user in the identity backend.
Return user_ref
"""
pass
@dependency.requires('identity_api')
class DefaultDomain(Base):
def _authenticate(self, request):
"""Use remote_user to look up the user in the identity backend."""
return self.identity_api.get_user_by_name(
request.remote_user,
CONF.identity.default_domain_id)
@dependency.requires('identity_api', 'resource_api')
class Domain(Base):
def _authenticate(self, request):
"""Use remote_user to look up the user in the identity backend.
The domain will be extracted from the REMOTE_DOMAIN environment
variable if present. If not, the default domain will be used.
"""
if request.remote_domain:
ref = self.resource_api.get_domain_by_name(request.remote_domain)
domain_id = ref['id']
else:
domain_id = CONF.identity.default_domain_id
return self.identity_api.get_user_by_name(request.remote_user,
domain_id)
class KerberosDomain(Domain):
"""Allows `kerberos` as a method."""
def _authenticate(self, request):
if request.auth_type != 'Negotiate':
raise exception.Unauthorized(_("auth_type is not Negotiate"))
return super(KerberosDomain, self)._authenticate(request)
| {
"content_hash": "e9ff0dc25bc3d7ab0256af0bcfa44615",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 31.987951807228917,
"alnum_prop": 0.6369114877589453,
"repo_name": "cernops/keystone",
"id": "ee0e95b979858c6efc3394c6c1895c3909663de9",
"size": "3241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/auth/plugins/external.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4691908"
}
],
"symlink_target": ""
} |
class FuliSpidersPipeline(object):
def process_item(self, item, spider):
return item
| {
"content_hash": "16e1ed3e46d4402e87292f13f40ec48b",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.7010309278350515,
"repo_name": "moonlet/fuli",
"id": "1aeca1c939a1df94ce503176f94f1df310a4ef71",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fuli_spiders/fuli_spiders/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "573"
},
{
"name": "HTML",
"bytes": "2453"
},
{
"name": "Python",
"bytes": "28118"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
} |
import rospy
from specification import Specification
from rated_statistics import RatedStatisticsContainer
from arni_core.helper import *
import arni_msgs
from arni_msgs.msg import RatedStatistics, RatedStatisticsEntity
#from arni_gui.helper_functions import prepare_number_for_representation
class SpecificationHandler:
"""
Loads the specifications from the parameter server, stores and compares them to the actual metadata.
"""
__namespace = '/arni/specifications'
def __load_specifications(self):
"""
Loads Specifications from the configurations and stores them in the
internal storage.
"""
try:
params = rospy.get_param(self.__namespace)
if isinstance(params, dict):
specifications = []
for x in params.values():
for y in x:
specifications.append(y)
else:
specifications = params
for o in specifications:
for seuid in o.keys():
if SEUID().is_valid(seuid):
spec = Specification()
spec.seuid = seuid
for k in o[seuid].keys():
spec.add_tuple((k, o[seuid][k]))
self.__specifications[seuid] = spec
else:
rospy.logdebug("[SpecificationHandler][__load_specifications] %s is not a valid seuid." % seuid)
except KeyError:
pass
rospy.loginfo("[SpecificationHandler] Loaded %s parameters." % str(len(self.__specifications.keys())))
def loaded_specifications(self):
"""
Returns a list containing all seuids of loaded specifications.
:return: A list of strings.
"""
return self.__specifications.keys()
def get(self, identifier):
"""
Returns the Specification object from the internal storage.
:param identifier: The seuid describing the desired Specification object.
:type identifier: str
:return: The Specification object with the given identifier, None if it was not found.
"""
if identifier in self.__specifications.keys():
return self.__specifications[identifier]
if identifier[0] == "c":
if SEUID(identifier).topic in self.__specifications.keys():
return self.__specifications[SEUID(identifier).topic]
return None
def compare(self, data, identifier, specification=None):
"""
Compares a Message object with a Specification object regarding all available matching fields.
:param data: The actual data.
:type data: object.
:param identifier: The identifier of the metadata package.
:type identifier: str
:param specification: The Specification object, alternatively a string identifying it.
:type specification: Specification or str.
:returns: A RatedStatisticsContainer object representing the result.
"""
if identifier is None:
rospy.logdebug("[SpecificationHandler][compare] No identifier given.")
return None
if not SEUID().is_valid(identifier):
rospy.logdebug("[SpecificationHandler][compare] Given identifier is invalid.")
return None
if data is None:
rospy.logdebug("[SpecificationHandler][compare] No data given.")
return None
result = RatedStatisticsContainer(identifier)
if identifier[0] == "n":
result.host = data.host
if specification is None:
specification = self.get(identifier)
window_len = data.window_stop - data.window_start
if window_len.to_sec() == 0:
window_len = rospy.Duration(1)
fields = dir(data)
exclude = ("delivered_msgs", "traffic")
for x in exclude:
if x in fields:
fields.remove(x)
if identifier[0] == "c":
fields.append("bandwidth")
fields.append("frequency")
for field in fields:
value = None
if field[0] == "_" or "serialize" in field:
continue
current_obj = {}
if field == "bandwidth":
value = data.traffic / window_len.to_sec()
elif field == "frequency":
value = data.delivered_msgs / window_len.to_sec()
else:
value = getattr(data, field)
if value is not None:
limits = self.__get_limits(specification, field)
if isinstance(value, (list, tuple)):
current_obj["state"] = []
current_obj["actual"] = []
current_obj["expected"] = []
for i, v in enumerate(value):
limits = self.__get_limits(specification, field, i)
current_obj["actual"].append(v)
current_obj["state"].append(self.__compare(v, limits))
current_obj["expected"].append(limits)
else:
status = self.__compare(value, limits)
current_obj["state"] = status
current_obj["actual"] = value
current_obj["expected"] = limits
result.add_value(field, current_obj["actual"], current_obj["expected"], current_obj["state"])
result.add_value("alive", ["True"], ["True"], [2])
return result
def compare_topic(self, data=None):
"""
Compares Messages about one topic
:param data: List of Statistics messages
:return: list of RatedStatistics messages
"""
if not data:
data = []
by_connection = {}
by_topic = {}
result = []
frequency_set = False
for message in data:
seuid = SEUID(message)
if not seuid.identifier in by_connection.keys():
by_connection[seuid.identifier] = {
"window_min": rospy.Time.now(),
"window_max": rospy.Time(0),
"dropped_msgs": 0,
"frequency": 0,
"traffic": 0,
"bandwidth": 0,
"stamp_age_mean": rospy.Duration(0),
"stamp_age_stddev": rospy.Duration(0),
"stamp_age_max": rospy.Duration(0),
"count": 0,
}
by_connection[seuid.identifier]["count"] += 1
window_len = message.window_stop - message.window_start
if window_len.to_sec() == 0:
window_len = rospy.Duration(1)
by_connection[seuid.identifier]["window_min"] = min(message.window_start, by_connection[seuid.identifier]["window_min"])
by_connection[seuid.identifier]["window_max"] = max(message.window_stop, by_connection[seuid.identifier]["window_max"])
if hasattr(message, "delivered_msgs"):
frequency_set = True
by_connection[seuid.identifier]["frequency"] += message.delivered_msgs / float(window_len.to_sec())
by_connection[seuid.identifier]["dropped_msgs"] += message.dropped_msgs
by_connection[seuid.identifier]["traffic"] += message.traffic
by_connection[seuid.identifier]["bandwidth"] += message.traffic / float(window_len.to_sec())
by_connection[seuid.identifier]["stamp_age_max"] = max(message.stamp_age_max, by_connection[seuid.identifier]["stamp_age_max"])
by_connection[seuid.identifier]["stamp_age_mean"] += message.stamp_age_mean
#TODO by_connection[seuid.identifier]["stamp_age_stddev"]
for connection in by_connection:
seuid = SEUID(connection)
for key in 'frequency', 'bandwidth', 'dropped_msgs', 'traffic', 'stamp_age_mean': # average
by_connection[connection][key] /= by_connection[connection]['count']
topic = seuid.get_seuid('topic')
if not topic in by_topic.keys():
by_topic[topic] = {
"window_min": rospy.Time.now(),
"window_max": rospy.Time(0),
"dropped_msgs": 0,
"frequency": 0,
"traffic": 0,
"bandwidth": 0,
"stamp_age_mean": rospy.Duration(0),
"stamp_age_stddev": rospy.Duration(0),
"stamp_age_max": rospy.Duration(0),
'count': 0,
}
by_topic[topic]['count'] += 1
by_topic[topic]["window_min"] = min(by_connection[connection]['window_min'], by_topic[topic]["window_min"])
by_topic[topic]["window_max"] = max(by_connection[connection]['window_max'], by_topic[topic]["window_max"])
if "frequency" in by_connection[connection]:
frequency_set = True
by_topic[topic]["frequency"] += by_connection[connection]['frequency']
by_topic[topic]["dropped_msgs"] += by_connection[connection]['dropped_msgs']
by_topic[topic]["traffic"] += by_connection[connection]['traffic']
by_topic[topic]["bandwidth"] += by_connection[connection]['bandwidth']
by_topic[topic]["stamp_age_max"] = max(by_connection[connection]['stamp_age_max'], by_topic[topic]["stamp_age_max"])
by_topic[topic]["stamp_age_mean"] += by_connection[connection]['stamp_age_mean']
#TODO by_connection[seuid.identifier]["stamp_age_stddev"]
for topic in by_topic:
by_topic[topic]['stamp_age_mean'] /= by_topic[topic]['count']
for topic, data in by_topic.iteritems():
specification = self.get(topic)
r = RatedStatistics()
r.window_start = data["window_min"]
r.window_stop = data["window_max"]
window_len = data["window_max"] - data["window_min"]
if window_len.to_sec() == 0:
window_len = rospy.Duration(1)
r.seuid = topic
fields = ["dropped_msgs", "traffic", "bandwidth", "stamp_age_max", "stamp_age_mean"]
fields.remove("traffic")
if frequency_set:
fields.append("frequency")
for f in fields:
re = RatedStatisticsEntity()
re.statistic_type = f
value = data[f]
re.actual_value.append(str(value))
limits = self.__get_limits(specification, re.statistic_type)
re.expected_value.append(str(limits))
re.state = [self.__compare(value, limits)]
r.rated_statistics_entity.append(re)
re = RatedStatisticsEntity()
re.statistic_type = "alive"
re.expected_value = ["True"]
re.actual_value = ["True"]
re.state = [2]
r.rated_statistics_entity.append(re)
result.append(r)
return result
def __get_limits(self, specification, field, offset=0):
if specification is None or field is None:
return None
key = "%s_%s_%s" % (specification.seuid, field, str(offset))
if key in self.__limit_cache.keys():
return self.__limit_cache[key]
try:
specs = specification.get(field)[1]
if isinstance(specs, list) and len(specs) > 0 and isinstance(specs[0], list):
if len(specs) > offset:
specs = specs[offset]
else:
return None
limits = specs[0:2]
if len(specs) > 2 and specs[2][0].lower() == "r":
if limits[1] > 1:
limits[1] -= 1
m = limits[0]
r = limits[1]
limits[0] = m - m * r
limits[1] = m + m * r
except TypeError:
limits = None
except AttributeError:
limits = None
convert_to_duration = ["stamp_age_max", "stamp_age_mean", "stamp_age_stddev", "period_max", "period_mean", "period_stddev"]
if limits is not None and field in convert_to_duration:
limits = [rospy.Duration.from_sec(limits[0]), rospy.Duration.from_sec(limits[1])]
self.__limit_cache[key] = limits
return limits
def __compare(self, value, reference):
if not isinstance(reference, list) or len(reference) < 2 or \
not isinstance(reference[0], (int, long, float, complex)) or \
not isinstance(reference[1], (int, long, float, complex)):
return 2
reference.sort()
# r = (reference[0] - reference[1]) / 2
# m = reference[0] + r
if reference[0] > value:
state = 1
elif reference[1] < value:
state = 0
else:
state = 3
return state
def reload_specifications(self, msg=None):
"""
Reloads all specifications loaded into the namespace /arni/specifications
"""
self.__limit_cache = {}
self.__specifications = {}
self.__load_specifications()
return []
def __init__(self):
"""
Initiates the SpecificationHandler kicking off the loading of available specifications.
"""
self.__limit_cache = {}
self.__specifications = {}
self.reload_specifications()
| {
"content_hash": "e48b6a10534f90c0524e2b6c30583d82",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 139,
"avg_line_length": 44.485245901639345,
"alnum_prop": 0.5440005896226415,
"repo_name": "ROS-PSE/arni",
"id": "53cac8e657243dc431459779d9342d6ed11a1cc7",
"size": "13568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "arni_processing/src/arni_processing/specification_handler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CMake",
"bytes": "33582"
},
{
"name": "Python",
"bytes": "414118"
},
{
"name": "QMake",
"bytes": "880"
}
],
"symlink_target": ""
} |
import unittest
from perfect_numbers import is_perfect
class PerfectNumbersTest(unittest.TestCase):
def test_first_perfect_number(self):
self.assertIs(is_perfect(6), True)
def test_no_perfect_number(self):
self.assertIs(is_perfect(8), False)
def test_second_perfect_number(self):
self.assertIs(is_perfect(28), True)
def test_abundant(self):
self.assertIs(is_perfect(20), False)
def test_answer_to_the_ultimate_question_of_life(self):
self.assertIs(is_perfect(42), False)
def test_third_perfect_number(self):
self.assertIs(is_perfect(496), True)
def test_odd_abundant(self):
self.assertIs(is_perfect(945), False)
def test_fourth_perfect_number(self):
self.assertIs(is_perfect(8128), True)
def test_fifth_perfect_number(self):
self.assertIs(is_perfect(33550336), True)
def test_sixth_perfect_number(self):
self.assertIs(is_perfect(8589869056), True)
def test_seventh_perfect_number(self):
self.assertIs(is_perfect(137438691328), True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "bf9171b03dae323fe32435eaec096e02",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 59,
"avg_line_length": 26.232558139534884,
"alnum_prop": 0.6675531914893617,
"repo_name": "mweb/python",
"id": "bb6de15d568c1c86a065fc318f95fe96bc54e9a3",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/perfect-numbers/perfect_numbers_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "312009"
},
{
"name": "Shell",
"bytes": "640"
}
],
"symlink_target": ""
} |
import pytest
from numpy import array
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.web import _generate_url, webdoc
class TestWeb:
stable = 'https://www.statsmodels.org/stable/'
devel = 'https://www.statsmodels.org/devel/'
def test_string(self):
url = _generate_url('arch', True)
assert url == self.stable + 'search.html?q=' \
'arch&check_keywords=yes&area=default'
url = _generate_url('arch', False)
assert url == self.devel + 'search.html?q=' \
'arch&check_keywords=yes&area=default'
url = _generate_url('dickey fuller', False)
assert url == (self.devel +
'search.html?q='
'dickey+fuller&check_keywords=yes&area=default')
def test_function(self):
url = _generate_url(OLS, True)
assert url == (self.stable
+ 'generated/'
'statsmodels.regression.linear_model.OLS.html')
url = _generate_url(OLS, False)
assert url == (self.devel
+ 'generated/'
'statsmodels.regression.linear_model.OLS.html')
def test_nothing(self):
url = _generate_url(None, True)
assert url == 'https://www.statsmodels.org/stable/'
url = _generate_url(None, False)
assert url == 'https://www.statsmodels.org/devel/'
def test_errors(self):
with pytest.raises(ValueError):
webdoc(array, True)
with pytest.raises(ValueError):
webdoc(1, False)
| {
"content_hash": "36e043470bb1f3db3c8ed3fded22d00c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 37.11363636363637,
"alnum_prop": 0.5572565829761176,
"repo_name": "statsmodels/statsmodels",
"id": "7adc4e9487ff4e0b1c56d02fcad3724849939737",
"size": "1633",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/tools/tests/test_web.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14445661"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from .api import setup_celery_for_management_commands
from django.apps import AppConfig
class KolibriTasksConfig(AppConfig):
name = 'kolibri.tasks'
label = 'kolibritasks'
verbose_name = 'Kolibri Tasks'
def ready(self):
setup_celery_for_management_commands()
| {
"content_hash": "fc6aa1211cea63e8572e20a080415283",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.7333333333333333,
"repo_name": "whitzhu/kolibri",
"id": "f14a899d3e27985c91e0e790c699dba2e584bde3",
"size": "360",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kolibri/tasks/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "HTML",
"bytes": "1957"
},
{
"name": "JavaScript",
"bytes": "179491"
},
{
"name": "Makefile",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "415692"
},
{
"name": "Shell",
"bytes": "6693"
},
{
"name": "Vue",
"bytes": "125683"
}
],
"symlink_target": ""
} |
def solve():
'''
In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p).
It is possible to make P2 in the following way:
1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p
How many different ways can P2 be made using any number of coins?
'''
coins = [1, 2, 5, 10, 20, 50, 100, 200]
def balance(pattern): return sum(coins[x]*pattern[x] for x in range(0, len(pattern)))
def gen(pattern, coinnum, num):
coin = coins[coinnum]
for p in range(0, num/coin + 1):
newpat = pattern[:coinnum] + (p,)
bal = balance(newpat)
if bal > num: return
elif bal == num: yield newpat
elif coinnum < len(coins)-1:
for pat in gen(newpat, coinnum+1, num):
yield pat
return sum(1 for pat in gen((), 0, 200))
| {
"content_hash": "207e3717df1e074a1a920a1987af0b91",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 115,
"avg_line_length": 32.58620689655172,
"alnum_prop": 0.5576719576719577,
"repo_name": "serge-sans-paille/pythran",
"id": "7714a2ae489c76de9b3f9679d12842970f439da2",
"size": "1019",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/tests/euler/euler31.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
} |
import os
settings_path, settings_module = os.path.split(__file__)
import sys
sys.path.append('../../')
DEBUG = True
#TEMPLATE_DEBUG = DEBUG
USE_TZ=True
#TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SECRET_KEY = '8(o*lht586wqr9hp5env&n!h!gu@t5g4*$$uupbyd*f+61!xjh'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
# 'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(settings_path, 'templates')],
}
]
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
#Supply your own API KEY
POSTMARK_API_KEY = os.environ.get('POSTMARK_API_KEY', '')
assert len(POSTMARK_API_KEY) != 0
#Use the sender set up in your postmark account
POSTMARK_SENDER = os.environ.get('POSTMARK_SENDER', '')
assert len(POSTMARK_SENDER) != 0
| {
"content_hash": "ee8c28338362e374a9a51ecb6ac69cb6",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 69,
"avg_line_length": 21.6,
"alnum_prop": 0.6743827160493827,
"repo_name": "themartorana/python-postmark",
"id": "885319bdba4f3aa1618ce9832c1a8bbdddb8c11a",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/demo/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "252"
},
{
"name": "Python",
"bytes": "74616"
}
],
"symlink_target": ""
} |
import os,sys
try:
argument = sys.argv[1]
except:
argument = 0
def elinsGetDate():
return os.popen("date +'%Y%m%d'").read()[:-1]
#-- set path to program folder, or just hardcode where it is if main.py is in separated place
Path = str(__file__).replace("main.py","").replace("\\","/")
print Path
namafile = "time"+elinsGetDate()+".xml"
todayexist = 0
k = open(Path+"kosong","r")
datakosong = k.read()
k.close()
dummy = 0
programoutput = " "
try:
today = open(""+Path+""+namafile,"r")
#~ print "today file found"
data = today.read()
today.close()
if (data!=datakosong):
todayexist = 1
os.system("cp "+Path+"*.xml "+Path+"back/")
except:
#~ print "today time file not found"
dummy = 0
if (todayexist==0):
#~ print "fetching new file"
os.system("cp "+Path+"*.xml "+Path+"back/")
os.system("rm -f "+Path+"*.xml")
os.system("wget -q 'http://www.mahesajenar.com/scripts/adzan.php?kota=Yogyakarta&type=text3' --output-document="+Path+""+namafile)
os.system("cp "+Path+"*.xml "+Path+"back/")
f = open(""+Path+""+namafile,"r")
data = f.read()
f.close()
if (data==datakosong):
print "fetching failed"
programoutput = "fetching time for today failed"
os.system("rm -f "+Path+"back/"+namafile)
dummy = 123
#~ dummy = 0
else:
#bikin cukup 1 backup, the latest...
os.system("cp "+Path+"back/"+namafile+" /tmp/"+namafile)
os.system("rm -f "+Path+"back/*.xml")
os.system("cp /tmp/"+namafile+" "+Path+"back/")
data=data.upper()
posisi_awal=data.upper().find("IMSAK")
if (posisi_awal<0):
programoutput = "fetching time for today failed"
dummy = 123
else:
pos=posisi_awal
programoutput = data[pos:pos+15]
pos=pos+15+2
programoutput = programoutput+"\n"+ data[pos:pos+16]
pos=pos+16+2
programoutput = programoutput+"\n"+ data[pos:pos+16]
pos=pos+16+2
programoutput = programoutput+"\n"+ data[pos:pos+15]
pos=pos+15+2
programoutput = programoutput+"\n"+ data[pos:pos+17]
pos=pos+17+2
programoutput = programoutput+"\n"+ data[pos:pos+14]
if (dummy==123):
programoutput = "sikik"
backupfile = os.popen("ls "+Path+"back/").read()[:-1]
backupfile_tahun = backupfile[4:8].upper()
backupfile_bulan = backupfile[8:10].upper()
backupfile_hari = backupfile[10:12].upper()
programoutput = "Today fetch failed, last fetched info:\n\t\t\t["+backupfile_hari+" - "+backupfile_bulan+" - "+backupfile_tahun+"]\n"
today = open(""+Path+"back/"+backupfile,"r")
data = today.read()
today.close()
posisi_awal=data.upper().find("IMSAK")
pos=posisi_awal
programoutput = programoutput+data[pos:pos+15]
pos=pos+15+2
programoutput = programoutput+"\n"+ data[pos:pos+16]
pos=pos+16+2
programoutput = programoutput+"\n"+ data[pos:pos+16]
pos=pos+16+2
programoutput = programoutput+"\n"+ data[pos:pos+15]
pos=pos+15+2
programoutput = programoutput+"\n"+ data[pos:pos+17]
pos=pos+17+2
programoutput = programoutput+"\n"+ data[pos:pos+14]
print(programoutput)
programoutput = "notify-send \"Jarwo Info\" \""+programoutput+"\""
os.system(programoutput)
| {
"content_hash": "4ec12dbc57065129e4a913b2fcf0327c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 134,
"avg_line_length": 29.12621359223301,
"alnum_prop": 0.6666666666666666,
"repo_name": "imakin/PersonalAssistant",
"id": "924a0ff98b80bd3d67396f75ac399c853219ae69",
"size": "3022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jadwalsholat/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AutoIt",
"bytes": "41949"
},
{
"name": "C",
"bytes": "62428"
},
{
"name": "C++",
"bytes": "6514"
},
{
"name": "Makefile",
"bytes": "20558"
},
{
"name": "Python",
"bytes": "182576"
},
{
"name": "QMake",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "2735"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import os
import sys
import shutil
import zipfile
import time
def recursive_zip(zipf, directory, folder=""):
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
src = os.path.join(directory, item)
dest = folder + os.sep + item
ext = os.path.splitext(dest)[1]
st = os.stat(src)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
info = zipfile.ZipInfo(dest, date_time)
bts = open(src, "rb").read()
if ext == ".sh" or item in ("PVRTexToolCLI", "oxyresbuild.py", "gen_template.py", "png_strip.py", "gradlew"):
info.external_attr = 0755 << 16L # a+x
# zipf.writestr(info, bts, zipfile.ZIP_DEFLATED)
zipf.write(os.path.join(directory, item), folder + os.sep + item)
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(
directory, item), folder + os.sep + item)
def buildzip(name):
print("building zip: " + name)
destzip = "../../" + name
with zipfile.ZipFile(destzip, "w", compression=zipfile.ZIP_DEFLATED) as zp:
recursive_zip(zp, "../../temp")
# return
try:
shutil.copyfile(destzip, "../../../gdrive/oxygine/" + name)
except IOError, e:
pass
try:
shutil.copyfile(destzip, "../../../Dropbox/Public/oxygine/" + name)
except IOError, e:
pass
print("zip created: " + name)
temp = "../../temp"
SDL_dest = temp + "/SDL"
OXYGINE_dest = temp + "/oxygine-framework/"
SOUND_dest = temp + "/oxygine-sound/"
FLOW_dest = temp + "/oxygine-flow/"
FT_dest = temp + "/oxygine-freetype/"
print("cleaning temp...")
shutil.rmtree(temp, True)
def export(repo, dest):
print("exporting " + repo)
cmd = "git -C %s checkout-index -a -f --prefix=%s/" % (
"d:/" + repo, "d:/oxygine-framework/temp/" + dest)
os.system(cmd)
export("oxygine-framework", "oxygine-framework")
buildzip("oxygine-framework.zip")
# ALL IN ONE
#cmd = "hg archive -R ../../../SDL %s" % (SDL_dest, )
#os.system(cmd)
export("SDL", "SDL")
export("oxygine-sound", "oxygine-sound")
export("oxygine-flow", "oxygine-flow")
export("oxygine-freetype", "oxygine-freetype")
shutil.rmtree(SDL_dest + "/test")
def fix_file(name, cb):
data = open(name, "rb").read()
data = cb(data)
open(name, "wb").write(data)
fix_file(SDL_dest + "/include/SDL_config_windows.h",
lambda data: data.replace(
"#define SDL_AUDIO_DRIVER_XAUDIO2", "//#define SDL_AUDIO_DRIVER_XAUDIO2")
)
fix_file(SDL_dest + "/src/video/uikit/SDL_uikitview.h",
lambda data: data.replace(
"#define IPHONE_TOUCH_EFFICIENT_DANGEROUS", "//#define IPHONE_TOUCH_EFFICIENT_DANGEROUS")
)
def enum(folder, cb):
for item in os.listdir(folder):
path = folder + item
if os.path.isdir(path):
if item == "data":
cb(path)
enum(path + "/", cb)
def copy(path):
win32 = OXYGINE_dest + "/oxygine/third_party/win32/dlls/"
items = (win32 + "zlib.dll",
win32 + "pthreadVCE2.dll",
"../../libs/SDL2.dll")
if "Demo/" in path:
items = items + (win32 + "libcurl.dll", win32 + "ssleay32.dll", win32 + "libssh2.dll", win32 + "libeay32.dll")
for item in items:
name = os.path.split(item)[1]
shutil.copy(item, path + "/" + name)
enum(OXYGINE_dest + "/examples/", copy)
enum(SOUND_dest + "/examples/", copy)
enum(FLOW_dest + "/examples/", copy)
enum(FT_dest + "/examples/", copy)
shutil.copy(SDL_dest + "/android-project/src/org/libsdl/app/SDLActivity.java",
OXYGINE_dest + "/oxygine/SDL/android/lib/src/org/libsdl/app/SDLActivity.java")
libs = ("SDL2.lib", "SDL2main.lib", )
for lib in libs:
shutil.copy("../../libs/" + lib, OXYGINE_dest + "/libs/" + lib)
"""
libs = ("libSDL2main.a", "libSDL2.dll", "libSDL2.dll.a")
for lib in libs:
shutil.copy("../../libs/" + lib, OXYGINE_dest + "/libs/" + lib)
"""
buildzip("oxygine-framework-with-sdl.zip")
print("done.")
| {
"content_hash": "d29a0d33812c230dab7dcc6be3f222a4",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 121,
"avg_line_length": 29.356643356643357,
"alnum_prop": 0.5883754168651739,
"repo_name": "daltomi/oxygine-framework",
"id": "ee16efdcea74dbec36050e876239c73236100fa0",
"size": "4245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/others/build_oxygine_with_sdl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "185"
},
{
"name": "C",
"bytes": "82993"
},
{
"name": "C++",
"bytes": "1832260"
},
{
"name": "CMake",
"bytes": "11495"
},
{
"name": "GLSL",
"bytes": "8673"
},
{
"name": "HTML",
"bytes": "27932"
},
{
"name": "Java",
"bytes": "83320"
},
{
"name": "Makefile",
"bytes": "2160"
},
{
"name": "Objective-C++",
"bytes": "14702"
},
{
"name": "Python",
"bytes": "1636"
},
{
"name": "QMake",
"bytes": "6603"
}
],
"symlink_target": ""
} |
import os, sys
base_dir = os.path.dirname(os.path.abspath(__file__))
activate_this = os.path.join(base_dir, "/home/pi/.virtualenvs/piwars/bin/activate_this.py")
with open(activate_this) as f:
exec(f.read(), dict(__file__=activate_this))
import runpy
modulename = sys.argv[1]
sys.argv = sys.argv[0:1] + sys.argv[2:]
runpy.run_module(modulename)
| {
"content_hash": "4a0378d85faf5da0c063279889f0ca75",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 91,
"avg_line_length": 35.7,
"alnum_prop": 0.6890756302521008,
"repo_name": "westpark/robotics",
"id": "174d2fb12d78b6832c73f93fe5693d0b623a4ca6",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "launch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "402"
},
{
"name": "Python",
"bytes": "78934"
}
],
"symlink_target": ""
} |
"""Utilities for forward-mode automatic differentiation."""
import functools
import threading
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import backprop_util
from tensorflow.python.eager import execute
from tensorflow.python.eager import forwardprop_util
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Dictionary mapping from op names to special-cased jvp functions. Otherwise
# backward functions are transposed on the tape.
_SPECIAL_CASES = {}
def _identity_jvp(attr_tuple, inputs, outputs, tangents):
# Special-cased mostly for resource handles, where creating ones Tensors from
# handle data for transposing the backward function on the tape is error-prone
# (even if we get good handle data, partially defined shapes are an issue).
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["Identity"] = _identity_jvp
def _read_variable_jvp(attr_tuple, inputs, outputs, tangents):
# Like for Identity, this special case means we don't need to create
# variable-shaped Tensors from resource handles.
del attr_tuple, inputs, outputs
return [array_ops.identity(t) for t in tangents]
_SPECIAL_CASES["ReadVariableOp"] = _read_variable_jvp
_TRACE_COUNT_CONSISTENCY_LOCK = threading.Lock()
# Map from op names to number of traces of _jvp_helper. Used to cap the number
# of traces due to shape differences while still specializing where possible.
_TRACE_COUNT = {}
def _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents):
"""Computes a Jacobian-vector product for an op.
Note that this function would be wasteful if executed eagerly. It runs the
backward gradient function and throws away the result just to record its
operations on a GradientTape. These unused ops are pruned away when this
function is traced.
Args:
op_name: A string, the type of operation being executed.
attr_tuple: Attributes of the operation.
inputs: A flat list of input Tensors to the operation.
outputs: A flat list of output Tensors from the operation.
tangents: A flat list of Tensors, same shape as `inputs`.
Returns:
A flat list of tangents corresponding to `outputs`.
"""
with _TRACE_COUNT_CONSISTENCY_LOCK:
# Just make sure writes don't clobber each other's increments; reads in
# _jvp_dispatch do not lock.
_TRACE_COUNT[op_name] = _TRACE_COUNT.get(op_name, 0) + 1
special_case = _SPECIAL_CASES.get(op_name, None)
if special_case is not None:
return special_case(attr_tuple, inputs, outputs, tangents)
if not outputs:
# tape.gradients([], inputs) doesn't make much sense
return []
# Generally inner GradientTapes won't function while outer accumulators are
# recording. We temporarily reset forwardprop state to allow GradientTapes to
# function here.
with forwardprop_util.push_forwardprop_state():
trainable_inputs = []
trainable_indices = []
nontrivial_tangents = []
for input_index, tensor in enumerate(inputs):
if backprop_util.IsTrainable(tensor):
trainable_inputs.append(tensor)
trainable_indices.append(input_index)
nontrivial_tangents.append(tangents[input_index])
with backprop.GradientTape() as transpose_tape:
with backprop.GradientTape() as backfunc_tape:
backfunc_tape.watch(trainable_inputs)
execute.record_gradient(op_name, inputs, attr_tuple, outputs)
forwardprop_aids = []
trainable_outputs = []
nontrivial_output_indices = []
for output_index, output in enumerate(outputs):
if backprop_util.IsTrainable(output):
forwardprop_aids.append(
array_ops.ones_like(output, name="unused_forwardprop_aid"))
trainable_outputs.append(output)
nontrivial_output_indices.append(output_index)
transpose_tape.watch(forwardprop_aids)
grads = backfunc_tape.gradient(
trainable_outputs,
trainable_inputs,
forwardprop_aids,
unconnected_gradients=UnconnectedGradients.ZERO)
nontrivial_output_tangents = transpose_tape.gradient(
grads, forwardprop_aids, output_gradients=nontrivial_tangents)
output_tangents = [None] * len(outputs)
for index, tangent in zip(nontrivial_output_indices,
nontrivial_output_tangents):
output_tangents[index] = tangent
return output_tangents
def _jvp_helper_wrapper(op_name, attr_tuple, inputs, outputs, tangents,
use_batch):
"""Computes a batch of Jacobian-vector product for an op.
Args:
op_name: A string, the type of operation being executed.
attr_tuple: Attributes of the operation.
inputs: A flat list of input Tensors to the operation.
outputs: A flat list of output Tensors from the operation.
tangents: A flat list of Tensors, compatible with shape `[None] +
input_shape`.
use_batch: A bool, True to vetorize over batch of tangents of shape `[None]
+ input_shape`.
Returns:
A flat list of tangents compatible with `outputs`
or `[None] + output_shape`.
Raises:
ValueError: if tangent shapes are not compatible with input shapes.
"""
if use_batch:
for primal, tangent in zip(inputs, tangents):
if not tangent.shape.is_compatible_with([None] + primal.shape):
raise ValueError("Tangent {} was expected to be of shape "
"{} but is instead of shape {}".format(
tangent, [None] + primal.shape, tangent.shape))
return control_flow_ops.vectorized_map(
functools.partial(_jvp_helper, op_name, attr_tuple, inputs, outputs),
tangents,
)
return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)
# TODO(allenl): experimental_relax_shapes for gradients which rely on static
# shape information are underspecialized. We may want hand-written forward
# implementations, or a more satisfying story about how we re-specialize
# gradients which were traced with relaxed shapes (e.g. use conds instead of
# trace-time Python logic).
#
# Using function.defun rather than def_function.function avoids
# tf.config.run_functions_eagerly(True). `_jvp_helper` doesn't successfully run
# eagerly (infinite recursion), and even if it did it would use extra memory and
# run unnecessary computation. The function does not create variables, so the
# two symbols are otherwise equivalent.
_jvp_relaxed_shapes = function.defun(
_jvp_helper_wrapper, experimental_relax_shapes=True)
_jvp_exact_shapes = function.defun(
_jvp_helper_wrapper, experimental_relax_shapes=False)
# The maximum number of exact-shape traces to perform for a single op before
# switching to shape relaxation.
_TRACE_COUNT_LIMIT = 32
def _jvp_dispatch(op_name,
attr_tuple,
inputs,
outputs,
tangents,
use_batch=False):
"""Determine which forwardprop function to call."""
# Note that this _TRACE_COUNT read races with writes. That's fine, it just
# means we may trace a few more exact shapes before moving on to relaxation.
if _TRACE_COUNT.get(op_name, 0) < _TRACE_COUNT_LIMIT:
return _jvp_exact_shapes(op_name, attr_tuple, inputs, outputs, tangents,
use_batch)
return _jvp_relaxed_shapes(op_name, attr_tuple, inputs, outputs, tangents,
use_batch)
pywrap_tfe.TFE_Py_RegisterJVPFunction(_jvp_dispatch)
@tf_export("autodiff.ForwardAccumulator", v1=[])
class ForwardAccumulator():
"""Computes Jacobian-vector products ("JVP"s) using forward-mode autodiff.
Compare to `tf.GradientTape` which computes vector-Jacobian products ("VJP"s)
using reverse-mode autodiff (backprop). Reverse mode is more attractive when
computing gradients of a scalar-valued function with respect to many inputs
(e.g. a neural network with many parameters and a scalar loss). Forward mode
works best on functions with many outputs and few inputs. Since it does not
hold on to intermediate activations, it is much more memory efficient than
backprop where it is applicable.
Consider a simple linear regression:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> targets = tf.constant([[1.], [-1.]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([None, 2])
>>> with tf.autodiff.ForwardAccumulator(
... primals=dense.kernel,
... tangents=tf.constant([[1.], [0.]])) as acc:
... loss = tf.reduce_sum((dense(x) - targets) ** 2.)
>>> acc.jvp(loss)
<tf.Tensor: shape=(), dtype=float32, numpy=...>
The example has two variables containing parameters, `dense.kernel` (2
parameters) and `dense.bias` (1 parameter). Considering the training data `x`
as a constant, this means the Jacobian matrix for the function mapping from
parameters to loss has one row and three columns.
With forwardprop, we specify a length-three vector in advance which multiplies
the Jacobian. The `primals` constructor argument is the parameter (a
`tf.Tensor` or `tf.Variable`) we're specifying a vector for, and the
`tangents` argument is the "vector" in Jacobian-vector product. If our goal is
to compute the entire Jacobian matrix, forwardprop computes one column at a
time while backprop computes one row at a time. Since the Jacobian in the
linear regression example has only one row, backprop requires fewer
invocations:
>>> x = tf.constant([[2.0, 3.0], [1.0, 4.0]])
>>> targets = tf.constant([[1.], [-1.]])
>>> dense = tf.keras.layers.Dense(1)
>>> dense.build([None, 2])
>>> loss_fn = lambda: tf.reduce_sum((dense(x) - targets) ** 2.)
>>> kernel_fprop = []
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[1.], [0.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(
... dense.kernel, tf.constant([[0.], [1.]])) as acc:
... kernel_fprop.append(acc.jvp(loss_fn()))
>>> with tf.autodiff.ForwardAccumulator(dense.bias, tf.constant([1.])) as acc:
... bias_fprop = acc.jvp(loss_fn())
>>> with tf.GradientTape() as tape:
... loss = loss_fn()
>>> kernel_grad, bias_grad = tape.gradient(loss, (dense.kernel, dense.bias))
>>> np.testing.assert_allclose(
... kernel_grad, tf.stack(kernel_fprop)[:, tf.newaxis])
>>> np.testing.assert_allclose(bias_grad, bias_fprop[tf.newaxis])
Implicit in the `tape.gradient` call is a length-one vector which
left-multiplies the Jacobian, a vector-Jacobian product.
`ForwardAccumulator` maintains JVPs corresponding primal tensors it is
watching, derived from the original `primals` specified in the constructor. As
soon as a primal tensor is deleted, `ForwardAccumulator` deletes the
corresponding JVP.
`acc.jvp(x)` retrieves `acc`'s JVP corresponding to the primal tensor `x`. It
does not perform any computation. `acc.jvp` calls can be repeated as long as
`acc` is accessible, whether the context manager is active or not. New JVPs
are only computed while the context manager is active.
Note that `ForwardAccumulator`s are always applied in the order their context
managers were entered, so inner accumulators will not see JVP computation from
outer accumulators. Take higher-order JVPs from outer accumulators:
>>> primal = tf.constant(1.1)
>>> with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as outer:
... with tf.autodiff.ForwardAccumulator(primal, tf.constant(1.)) as inner:
... primal_out = primal ** tf.constant(3.5)
>>> inner_jvp = inner.jvp(primal_out)
>>> inner_jvp # 3.5 * 1.1 ** 2.5
<tf.Tensor: shape=(), dtype=float32, numpy=4.4417057>
>>> outer.jvp(inner_jvp) # 3.5 * 2.5 * 1.1 ** 1.5
<tf.Tensor: shape=(), dtype=float32, numpy=10.094786>
Reversing the collection in the last line to instead retrieve
`inner.jvp(outer.jvp(primal_out))` will not work.
Strict nesting also applies to combinations of `ForwardAccumulator` and
`tf.GradientTape`. More deeply nested `GradientTape` objects will ignore the
products of outer `ForwardAccumulator` objects. This allows (for example)
memory-efficient forward-over-backward computation of Hessian-vector products,
where the inner `GradientTape` would otherwise hold on to all intermediate
JVPs:
>>> v = tf.Variable([1., 2.])
>>> with tf.autodiff.ForwardAccumulator(
... v,
... # The "vector" in Hessian-vector product.
... tf.constant([1., 0.])) as acc:
... with tf.GradientTape() as tape:
... y = tf.reduce_sum(v ** 3.)
... backward = tape.gradient(y, v)
>>> backward # gradient from backprop
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([ 3., 12.], dtype=float32)>
>>> acc.jvp(backward) # forward-over-backward Hessian-vector product
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([6., 0.], dtype=float32)>
"""
def __init__(self, primals, tangents):
"""Specify tensors to watch and their Jacobian-vector products.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Listing a single tensor multiple times in `primals` raises an
exception. Excluding a tensor from `primals` is equivalent to watching it
with a tangent tensor of zeros.
Args:
primals: A tensor or nested structure of tensors to watch.
tangents: A tensor or nested structure of tensors, with the same nesting
structure as `primals`, with each element being a vector with the same
size as the corresponding primal element.
Raises:
ValueError: If the same tensor or variable is specified multiple times in
`primals`.
"""
self._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(False)
self._recording = False
primal_ids = set()
for primal in nest.flatten(primals):
if id(primal) in primal_ids:
raise ValueError(
"Tensor {} was specified as a primal multiple times. This may "
"indicate an error. If it was intended, please sum the "
"corresponding tangents.")
primal_ids.add(id(primal))
self._watch(primals, tangents)
def __enter__(self):
self._push_accumulator()
return self
def __exit__(self, typ, value, traceback):
if self._recording:
self._pop_accumulator()
def _push_accumulator(self):
if self._recording:
raise ValueError("Accumulator is already recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetAdd(self._accumulator)
self._recording = True
def _pop_accumulator(self):
if not self._recording:
raise ValueError("Accumulator is not recording.")
pywrap_tfe.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator)
self._recording = False
def _watch(self, primals, tangents):
"""Ensures that `primals` are being traced by this accumulator.
Mathematically, `tangents` is a vector right-multiplying the Jacobian matrix
(a Jacobian-vector product) for the function computed while this accumulator
is active. Since JVPs are computed in forward mode as the computation
happens, this vector must be supplied in advance.
Watching a single tensor multiple times sums each of its `tangents`. Any
un-watched tensor has zeros for its tangent vector.
Args:
primals: A Tensor or list of Tensors.
tangents: A Tensor or list of Tensors matching `primals`.
"""
def _watch(primal, tangent):
if not primal.dtype.is_floating:
logging.log_first_n(
logging.WARN, "The dtype of the watched primal must be "
"floating (e.g. tf.float32), got %r", 5, primal.dtype)
tangent = ops.convert_to_tensor(tangent, dtype=primal.dtype)
if hasattr(primal, "handle"):
# Run convert_to_tensor to get the captured handle from whichever
# function we're running if necessary.
primal = ops.convert_to_tensor(primal.handle)
pywrap_tfe.TFE_Py_ForwardAccumulatorWatch(self._accumulator, primal,
tangent)
nest.map_structure(_watch, primals, tangents, expand_composites=True)
def jvp(self, primals, unconnected_gradients=UnconnectedGradients.NONE):
"""Fetches the Jacobian-vector product computed for `primals`.
Note that this method performs no computation, and simply looks up a JVP
that was already computed (unlike backprop using a `tf.GradientTape`, where
the computation happens on the call to `tape.gradient`).
Args:
primals: A watched Tensor or structure of Tensors to fetch the JVPs for.
unconnected_gradients: A value which can either hold 'none' or 'zero' and
alters the value which will be returned if no JVP was computed for
`primals`. The possible values and effects are detailed in
'tf.UnconnectedGradients' and it defaults to 'none'.
Returns:
Tensors with the same shapes and dtypes as `primals`, or None if no JVP
is available.
"""
unconnected_gradients = UnconnectedGradients(unconnected_gradients)
if self._accumulator is None:
raise ValueError("Called jvp() without first tracing anything.")
def _fetch_jvp(tensor):
if hasattr(tensor, "handle"):
unwrapped_tensor = ops.convert_to_tensor(tensor.handle)
else:
unwrapped_tensor = tensor
result = pywrap_tfe.TFE_Py_ForwardAccumulatorJVP(self._accumulator,
unwrapped_tensor)
if result is None and unconnected_gradients == UnconnectedGradients.ZERO:
result = array_ops.zeros_like(tensor)
return result
return nest.map_structure(_fetch_jvp, primals)
@classmethod
def _batch_accumulator(cls, primals, tangents):
"""Factory constructor to test accumulator on batches of tangents.
Args:
primals: A tensor or nested structure of tensors to watch.
tangents: A tensor or nested structure of tensors, with the same nesting
structure as `primals`, with each element being a vector with compatible
shape `[None] + primal.shape` of the corresponding primal element.
Returns:
A batch accumulator object.
"""
acc = super(ForwardAccumulator, cls).__new__(cls, primals, tangents)
acc._recording = False
acc._accumulator = pywrap_tfe.TFE_Py_ForwardAccumulatorNew(True)
primal_ids = set()
for primal, tangent in zip(nest.flatten(primals), nest.flatten(tangents)):
tangent.shape.assert_is_compatible_with(
tensor_shape.TensorShape([None]) + primal.shape)
if id(primal) in primal_ids:
raise ValueError(
"Tensor {} was specified as a primal multiple times. This may "
"indicate an error. If it was intended, please sum the "
"corresponding tangents.")
primal_ids.add(id(primal))
acc._watch(primals, tangents)
return acc
| {
"content_hash": "dfc3de155817030b2c76d35b7bd07734",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 80,
"avg_line_length": 42.41431670281996,
"alnum_prop": 0.6966194445865085,
"repo_name": "Intel-Corporation/tensorflow",
"id": "4bc53ee4dfefce60ac40d60d0c44d62f9b8a49cc",
"size": "20242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/eager/forwardprop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
import os
from Widget import Widget
from util import props_to_str
HEADERS = [
'<link type="text/css" href="/CTK/css/CTK.css" rel="stylesheet" />',
'<script type="text/javascript" src="/CTK/js/jquery-ui-1.7.2.custom.min.js"></script>'
]
HTML = """
<div id="%(id)s" %(props)s></div>
"""
PERCENT_INIT_JS = """
$('#%(id)s').progressbar({ value: %(value)s });
"""
class ProgressBar (Widget):
def __init__ (self, props={}):
Widget.__init__ (self)
self.id = "progressbar_%d" %(self.uniq_id)
self.value = props.pop ('value', 0)
self.props = props.copy()
if 'class' in props:
self.props['class'] += ' progressbar'
else:
self.props['class'] = 'progressbar'
def Render (self):
render = Widget.Render (self)
props = {'id': self.id,
'value': self.value,
'props': props_to_str (self.props)}
render.html += HTML %(props)
render.js += PERCENT_INIT_JS %(props)
render.headers += HEADERS
return render
def JS_to_set (self, value):
return "$('#%s').progressbar ('option', 'value', %s);" %(self.id, value)
| {
"content_hash": "a7f59924574238e01e345388e6086c65",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 90,
"avg_line_length": 27.09090909090909,
"alnum_prop": 0.5335570469798657,
"repo_name": "cherokee/pyscgi",
"id": "9c6effe94bdc7bf9d64acf42c3f015c7dadb0196",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CTK/ProgressBar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "381309"
},
{
"name": "Python",
"bytes": "292070"
}
],
"symlink_target": ""
} |
from airflow.models import DAG # noqa # pylint: disable=unused-import
raise Exception("This dag file should have been ignored!")
| {
"content_hash": "980789568e92349b06fd0c8f020466bc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 70,
"avg_line_length": 43.666666666666664,
"alnum_prop": 0.7709923664122137,
"repo_name": "mtagle/airflow",
"id": "1e5b97165422c73f0cf02cb67644a01ea6e9f25d",
"size": "973",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/dags/subdir1/test_ignore_this.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from sqlalchemy.testing import eq_
from sqlalchemy import *
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.types import TypeDecorator
from sqlalchemy.testing import fixtures, AssertsExecutionResults, engines, \
assert_raises_message
from sqlalchemy import exc as sa_exc
import itertools
class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
__requires__ = 'returning',
def setup(self):
meta = MetaData(testing.db)
global table, GoofyType
class GoofyType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is None:
return None
return "FOO" + value
def process_result_value(self, value, dialect):
if value is None:
return None
return value + "BAR"
table = Table('tables', meta,
Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
Column('persons', Integer),
Column('full', Boolean),
Column('goofy', GoofyType(50))
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
def test_column_targeting(self):
result = table.insert().returning(table.c.id, table.c.full).execute({'persons': 1, 'full': False})
row = result.first()
assert row[table.c.id] == row['id'] == 1
assert row[table.c.full] == row['full'] == False
result = table.insert().values(persons=5, full=True, goofy="somegoofy").\
returning(table.c.persons, table.c.full, table.c.goofy).execute()
row = result.first()
assert row[table.c.persons] == row['persons'] == 5
assert row[table.c.full] == row['full'] == True
eq_(row[table.c.goofy], row['goofy'])
eq_(row['goofy'], "FOOsomegoofyBAR")
@testing.fails_on('firebird', "fb can't handle returning x AS y")
def test_labeling(self):
result = table.insert().values(persons=6).\
returning(table.c.persons.label('lala')).execute()
row = result.first()
assert row['lala'] == 6
@testing.fails_on('firebird', "fb/kintersbasdb can't handle the bind params")
@testing.fails_on('oracle+zxjdbc', "JDBC driver bug")
def test_anon_expressions(self):
result = table.insert().values(goofy="someOTHERgoofy").\
returning(func.lower(table.c.goofy, type_=GoofyType)).execute()
row = result.first()
eq_(row[0], "foosomeothergoofyBAR")
result = table.insert().values(persons=12).\
returning(table.c.persons + 18).execute()
row = result.first()
eq_(row[0], 30)
def test_update_returning(self):
table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}])
result = table.update(table.c.persons > 4, dict(full=True)).returning(table.c.id).execute()
eq_(result.fetchall(), [(1,)])
result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute()
eq_(result2.fetchall(), [(1, True), (2, False)])
def test_insert_returning(self):
result = table.insert().returning(table.c.id).execute({'persons': 1, 'full': False})
eq_(result.fetchall(), [(1,)])
@testing.requires.multivalues_inserts
def test_multirow_returning(self):
ins = table.insert().returning(table.c.id, table.c.persons).values(
[
{'persons': 1, 'full': False},
{'persons': 2, 'full': True},
{'persons': 3, 'full': False},
]
)
result = testing.db.execute(ins)
eq_(
result.fetchall(),
[(1, 1), (2, 2), (3, 3)]
)
def test_no_ipk_on_returning(self):
result = testing.db.execute(
table.insert().returning(table.c.id),
{'persons': 1, 'full': False}
)
assert_raises_message(
sa_exc.InvalidRequestError,
"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, "inserted_primary_key"
)
@testing.fails_on_everything_except('postgresql', 'firebird')
def test_literal_returning(self):
if testing.against("postgresql"):
literal_true = "true"
else:
literal_true = "1"
result4 = testing.db.execute('insert into tables (id, persons, "full") '
'values (5, 10, %s) returning persons' % literal_true)
eq_([dict(row) for row in result4], [{'persons': 10}])
def test_delete_returning(self):
table.insert().execute([{'persons': 5, 'full': False}, {'persons': 3, 'full': False}])
result = table.delete(table.c.persons > 4).returning(table.c.id).execute()
eq_(result.fetchall(), [(1,)])
result2 = select([table.c.id, table.c.full]).order_by(table.c.id).execute()
eq_(result2.fetchall(), [(2, False),])
class SequenceReturningTest(fixtures.TestBase):
__requires__ = 'returning', 'sequences'
def setup(self):
meta = MetaData(testing.db)
global table, seq
seq = Sequence('tid_seq')
table = Table('tables', meta,
Column('id', Integer, seq, primary_key=True),
Column('data', String(50))
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
def test_insert(self):
r = table.insert().values(data='hi').returning(table.c.id).execute()
assert r.first() == (1, )
assert seq.execute() == 2
class KeyReturningTest(fixtures.TestBase, AssertsExecutionResults):
"""test returning() works with columns that define 'key'."""
__requires__ = 'returning',
def setup(self):
meta = MetaData(testing.db)
global table
table = Table('tables', meta,
Column('id', Integer, primary_key=True, key='foo_id', test_needs_autoincrement=True),
Column('data', String(20)),
)
table.create(checkfirst=True)
def teardown(self):
table.drop()
@testing.exclude('firebird', '<', (2, 0), '2.0+ feature')
@testing.exclude('postgresql', '<', (8, 2), '8.2+ feature')
def test_insert(self):
result = table.insert().returning(table.c.foo_id).execute(data='somedata')
row = result.first()
assert row[table.c.foo_id] == row['id'] == 1
result = table.select().execute().first()
assert row[table.c.foo_id] == row['id'] == 1
class ReturnDefaultsTest(fixtures.TablesTest):
__requires__ = ('returning', )
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.sql import ColumnElement
from sqlalchemy.ext.compiler import compiles
counter = itertools.count()
class IncDefault(ColumnElement):
pass
@compiles(IncDefault)
def compile(element, compiler, **kw):
return str(next(counter))
Table("t1", metadata,
Column("id", Integer, primary_key=True, test_needs_autoincrement=True),
Column("data", String(50)),
Column("insdef", Integer, default=IncDefault()),
Column("upddef", Integer, onupdate=IncDefault())
)
def test_chained_insert_pk(self):
t1 = self.tables.t1
result = testing.db.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.insdef)
)
eq_(
[result.returned_defaults[k] for k in (t1.c.id, t1.c.insdef)],
[1, 0]
)
def test_arg_insert_pk(self):
t1 = self.tables.t1
result = testing.db.execute(
t1.insert(return_defaults=[t1.c.insdef]).values(upddef=1)
)
eq_(
[result.returned_defaults[k] for k in (t1.c.id, t1.c.insdef)],
[1, 0]
)
def test_chained_update_pk(self):
t1 = self.tables.t1
testing.db.execute(
t1.insert().values(upddef=1)
)
result = testing.db.execute(t1.update().values(data='d1').
return_defaults(t1.c.upddef))
eq_(
[result.returned_defaults[k] for k in (t1.c.upddef,)],
[1]
)
def test_arg_update_pk(self):
t1 = self.tables.t1
testing.db.execute(
t1.insert().values(upddef=1)
)
result = testing.db.execute(t1.update(return_defaults=[t1.c.upddef]).
values(data='d1'))
eq_(
[result.returned_defaults[k] for k in (t1.c.upddef,)],
[1]
)
def test_insert_non_default(self):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
result = testing.db.execute(
t1.insert().values(upddef=1).return_defaults(t1.c.data)
)
eq_(
[result.returned_defaults[k] for k in (t1.c.id, t1.c.data,)],
[1, None]
)
def test_update_non_default(self):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
testing.db.execute(
t1.insert().values(upddef=1)
)
result = testing.db.execute(t1.update().
values(upddef=2).return_defaults(t1.c.data))
eq_(
[result.returned_defaults[k] for k in (t1.c.data,)],
[None]
)
@testing.fails_on("oracle+cx_oracle", "seems like a cx_oracle bug")
def test_insert_non_default_plus_default(self):
t1 = self.tables.t1
result = testing.db.execute(
t1.insert().values(upddef=1).return_defaults(
t1.c.data, t1.c.insdef)
)
eq_(
dict(result.returned_defaults),
{"id": 1, "data": None, "insdef": 0}
)
@testing.fails_on("oracle+cx_oracle", "seems like a cx_oracle bug")
def test_update_non_default_plus_default(self):
t1 = self.tables.t1
testing.db.execute(
t1.insert().values(upddef=1)
)
result = testing.db.execute(t1.update().
values(insdef=2).return_defaults(
t1.c.data, t1.c.upddef))
eq_(
dict(result.returned_defaults),
{"data": None, 'upddef': 1}
)
class ImplicitReturningFlag(fixtures.TestBase):
def test_flag_turned_off(self):
e = engines.testing_engine(options={'implicit_returning':False})
assert e.dialect.implicit_returning is False
c = e.connect()
assert e.dialect.implicit_returning is False
def test_flag_turned_on(self):
e = engines.testing_engine(options={'implicit_returning':True})
assert e.dialect.implicit_returning is True
c = e.connect()
assert e.dialect.implicit_returning is True
def test_flag_turned_default(self):
supports = [False]
def go():
supports[0] = True
testing.requires.returning(go)()
e = engines.testing_engine()
# starts as False. This is because all of Firebird,
# Postgresql, Oracle, SQL Server started supporting RETURNING
# as of a certain version, and the flag is not set until
# version detection occurs. If some DB comes along that has
# RETURNING in all cases, this test can be adjusted.
assert e.dialect.implicit_returning is False
# version detection on connect sets it
c = e.connect()
assert e.dialect.implicit_returning is supports[0]
| {
"content_hash": "5ed630efdc04cdf8a9fa6c0bfd3a9f95",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 106,
"avg_line_length": 36.01764705882353,
"alnum_prop": 0.5451576024824433,
"repo_name": "Cito/sqlalchemy",
"id": "e7245aa3c61e0be856f6ce8a31ea9a7135156f1c",
"size": "12246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/sql/test_returning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "CSS",
"bytes": "8734"
},
{
"name": "JavaScript",
"bytes": "493"
},
{
"name": "Python",
"bytes": "7952860"
},
{
"name": "TeX",
"bytes": "13927"
}
],
"symlink_target": ""
} |
from oslotest import base
import stevedore
from testtools import matchers
class TestPasteDeploymentEntryPoints(base.BaseTestCase):
def test_entry_points(self):
factory_classes = {
'catch_errors': 'CatchErrors',
'correlation_id': 'CorrelationId',
'cors': 'CORS',
'debug': 'Debug',
'healthcheck': 'Healthcheck',
'http_proxy_to_wsgi': 'HTTPProxyToWSGI',
'request_id': 'RequestId',
'sizelimit': 'RequestBodySizeLimiter',
'ssl': 'SSLMiddleware',
}
em = stevedore.ExtensionManager('paste.filter_factory')
# Ensure all the factories are defined by their names
factory_names = [extension.name for extension in em]
self.assertThat(factory_names,
matchers.ContainsAll(factory_classes))
| {
"content_hash": "abd7add1a40954178da4a1daa0d06a7b",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 33.26923076923077,
"alnum_prop": 0.6034682080924856,
"repo_name": "openstack/oslo.middleware",
"id": "d7368ac40895633a6217412b60040c33ab7c08fe",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_middleware/tests/test_entry_points.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "200339"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Sample Data Helper'
copyright = u'2013, Jesús Espino García'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoSampleDataHelperdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoSampleDataHelper.tex', u'Django Sample Data Helper Documentation',
u'Jesús Espino García', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangosampledatahelper', u'Django Sample Data Helper Documentation',
[u'Jesús Espino García'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoSampleDataHelper', u'Django Sample Data Helper Documentation',
u'Jesús Espino García', 'DjangoSampleDataHelper', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "cdf8fd3a01be67f5db0cd88b8c1006b9",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 88,
"avg_line_length": 32.7292576419214,
"alnum_prop": 0.7059372915276851,
"repo_name": "kaleidos/django-sampledatahelper",
"id": "6bc03a0fc2f87bbc277a2ccd33b3fde6de7013cc",
"size": "7940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "46180"
}
],
"symlink_target": ""
} |
import argparse
import os
import sys
script_dir = os.path.normpath(sys.path[0])
sys.path.append(os.path.abspath(os.path.join(script_dir, "../../utils/")))
import dirutils
from ruleutils import create_layer_rules, create_color_rules
from basechart import generate_basechart_config
from geotif import generate_tif_config
from elevation import generate_elevation_config
from aml import generate_aml_config
resource_dir = os.path.normpath(os.path.join(script_dir, "..", "resources"))
def parse_arguments():
parser = argparse.ArgumentParser(prog="generate_map_config.py",
description="This program generates mapserver configuration for different kinds of geographical data. The resulting mapserver configuration will be put in the folder 'map' in the same folder as the geographical data. For the basechart option , the layer and color rules are fetched from the rule-set-path. If they are not present the script will create a default one at this location.")
parser.add_argument("-f", "--force-overwrite", action="store_true",
help="Force overwrite the rule set at RULE_SET_PATH")
basechartgroup = parser.add_argument_group("BaseChart arguments:")
basechartgroup.add_argument("-basechartdata", "--basechart-data-path", nargs=1,
help="Directory where your converted chart data is stored. The subfolder needs to be named 'shape'.")
enhancedchartgroup = parser.add_argument_group("Enhanced Chart arguments:")
enhancedchartgroup.add_argument("-enhancedchartdata", "--enhanced-data-path", nargs=1,
help="Directory where your converted enhanced chart data is stored. The subfolder can have any name, but must contains levels (1, 2, etc.) subfolders.")
geotifgroup = parser.add_argument_group("GeoTif arguments:")
geotifgroup.add_argument("-geotifdata", "--geotif-data-path", nargs=1,
help="Directory where your Geotif files are stored. The subfolder needs to be named 'data'.")
elevationgroup = parser.add_argument_group("Elevation arguments:")
elevationgroup.add_argument("-elevationdata", "--elevation-data-path", nargs=1,
help="Directory where your Elevation data files are stored. The subfolder needs to be named 'data'.")
amlgroup = parser.add_argument_group("AML arguments:")
amlgroup.add_argument("-amldata", "--aml-data-path", nargs=1,
help="Directory where your AML data files are stored. The subfolder needs to be named 'data'.")
parser.add_argument("-rules", "--rule-set-path", nargs=1,
help="Path to map configuration rule set", required=True)
parser.add_argument("-rule-default-color", nargs=1,
help="Substring that uniquely identifies a color table")
parser.add_argument("-d", "--debug", action="store_true",
help="Enable debug on the mapserver")
parser.add_argument("-c", "--chartsymbols", nargs=1,
help="Use OpenCPN chartsymbols.xml file to generate layers")
parser.add_argument("-y", "--displaycategory", nargs=1,
help="Comma separated list of OpenCPN Display Category to load. Displaybase is always loaded, default is Standard.")
parser.add_argument("-t", "--tablename", '--point_table', nargs=1,
help="Which OpenCPN chartsymbols.xml table to generate for point features. Default is Simplified.")
parser.add_argument("-a", "--area_table", default='Plain',
choices=['Plain', 'Symbolized'],
help="Which OpenCPN chartsymbols.xml table to generate for area features. Default is Plain.")
args = parser.parse_args()
if not ((args.basechart_data_path and args.rule_set_path) or
(args.enhanced_data_path and args.rule_set_path) or
args.geotif_data_path or
args.elevation_data_path or
args.aml_data_path):
parser.print_help()
sys.exit(2)
return args
def main():
args = parse_arguments()
# Set up the paths to use
enhanced_data = False
rule_set_path = dirutils.force_sub_dir(
os.path.abspath(args.rule_set_path[0]), "rules")
data_path = None
if args.basechart_data_path:
data_path = dirutils.force_sub_dir(os.path.abspath(
os.path.normpath(args.basechart_data_path[0])), "shape")
elif args.geotif_data_path:
data_path = dirutils.force_sub_dir(os.path.abspath(
os.path.normpath(args.geotif_data_path[0])), "data")
elif args.elevation_data_path:
data_path = dirutils.force_sub_dir(os.path.abspath(
os.path.normpath(args.elevation_data_path[0])), "data")
elif args.aml_data_path:
data_path = dirutils.force_sub_dir(os.path.abspath(
os.path.normpath(args.aml_data_path[0])), "data")
elif args.enhanced_data_path:
enhanced_data = True
if not args.chartsymbols:
print("Enhanced data require --chartsymbols option.")
sys.exit(1)
data_path = os.path.abspath(
os.path.normpath(args.enhanced_data_path[0]))
if not data_path:
print("No data found")
sys.exit(1)
chartsymbols = None
point_table = 'Simplified'
area_table = 'Plain'
displaycategory = ['Displaybase']
if args.chartsymbols and not os.path.isfile(args.chartsymbols[0]):
print("chartsymbols.xml not found at: " + args.chartsymbols[0])
sys.exit(1)
elif args.chartsymbols and enhanced_data:
chartsymbols = os.path.abspath(os.path.normpath(args.chartsymbols[0]))
if args.tablename and args.tablename[0] in ['Simplified', 'Paper']:
point_table = args.tablename[0]
area_table = args.area_table
if args.displaycategory:
displaycategory.extend(args.displaycategory[0].split(','))
else:
displaycategory.append('Standard')
if not os.path.exists(data_path):
os.makedirs(data_path)
map_path = os.path.normpath(os.path.join(data_path, "..", "map"))
# Move context to the script dir and run from there
os.chdir(script_dir)
# Check if color tables exist and create them if not
color_tables_exist = dirutils.does_color_tables_exist(rule_set_path)
if not color_tables_exist or args.force_overwrite:
if args.rule_default_color:
color_table = args.rule_default_color[0]
else:
color_table = None
create_color_rules(resource_dir, os.path.join(
rule_set_path, "color_tables"), color_table)
if args.basechart_data_path or args.enhanced_data_path:
# Check if layer definitions exist and create them if not
layer_definitions_exist = dirutils.does_layer_rules_exist(
rule_set_path)
if not layer_definitions_exist or args.force_overwrite:
create_layer_rules(resource_dir, os.path.join(
rule_set_path, "layer_rules"))
# Generate the BaseChart config ...
generate_basechart_config(data_path, map_path, rule_set_path, resource_dir,
args.force_overwrite, args.debug, point_table, area_table, displaycategory, chartsymbols)
elif args.geotif_data_path:
# ... or the TIF config
generate_tif_config(data_path, map_path, args.debug)
elif args.elevation_data_path:
# ... or Elevation config
generate_elevation_config(data_path, map_path, args.debug)
elif args.aml_data_path:
# ... or AML config
layer_definitions_exist = dirutils.does_layer_rules_exist(
rule_set_path)
if not layer_definitions_exist or args.force_overwrite:
create_layer_rules(resource_dir, os.path.join(
rule_set_path, "layer_rules"))
generate_aml_config(data_path, map_path, rule_set_path,
resource_dir, args.force_overwrite, args.debug)
if __name__ == "__main__":
main()
| {
"content_hash": "0b46c343a45ce87b4317f2b1425202b2",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 423,
"avg_line_length": 51.34177215189873,
"alnum_prop": 0.6433678500986193,
"repo_name": "LarsSchy/SMAC-M",
"id": "fc5438cd8d013b79a768f331a5c5fc24b5b3b96c",
"size": "8132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chart-installation/generate_map_files/scripts/generate_map_config.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266268"
},
{
"name": "Shell",
"bytes": "39116"
}
],
"symlink_target": ""
} |
"""Perl support for CodeIntel"""
import os
from os.path import (normpath, join, exists, splitext, basename, isdir,
normcase, dirname, islink, isabs)
import sys
import logging
import time
from glob import glob
import re
from pprint import pprint, pformat
import weakref
import process
from ciElementTree import Element, SubElement, tostring
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
from codeintel2.common import *
from codeintel2.citadel import (ImportHandler, CitadelBuffer,
CitadelEvaluator, CitadelLangIntel)
from codeintel2.citadel_common import ScanRequest
from codeintel2.indexer import PreloadLibRequest
from codeintel2.parseutil import urlencode_path
from codeintel2 import perlcile
from codeintel2.util import isident, isdigit, banner, indent, markup_text
from codeintel2.tree_perl import (PerlTreeEvaluator,
PerlPackageSubsTreeEvaluator,
PerlPackageMembersTreeEvaluator)
from codeintel2.langintel import (ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin)
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
line_end_re = re.compile("(?:\r\n|\r)")
lang = "Perl"
log = logging.getLogger("codeintel.perl")
# log.setLevel(logging.DEBUG)
#---- language support
class PerlLexer(Lexer):
lang = "Perl"
def __init__(self):
self._properties = SilverCity.PropertySet()
self._lexer = SilverCity.find_lexer_module_by_id(
ScintillaConstants.SCLEX_PERL)
self._keyword_lists = [
SilverCity.WordList(SilverCity.Keywords.perl_keywords)
]
# TODO: Merge handling of perl-complete-module-exports in with this one.
# Will just need a boolean flag (on the trigger) indicating that
# submodules should NOT be included.
class PerlImportsEvaluator(Evaluator):
def __str__(self):
return "Perl imports"
def eval(self, mgr):
try:
prefix = self.trg.extra["prefix"]
if prefix:
self.ctlr.set_desc("subimports of '%s'" % prefix)
prefix_tuple = tuple(prefix.split("::"))
else:
self.ctlr.set_desc("available imports")
prefix_tuple = ()
all_imports = set()
for lib in self.buf.libs:
# Reminder: A codeintel "blob" corresponds to a Perl module.
all_imports.update(lib.get_blob_imports(prefix_tuple))
if all_imports:
cplns = [((is_dir_import and "directory" or "module"), name)
for name, is_dir_import in all_imports]
cplns.sort(key=lambda i: i[1].upper())
self.ctlr.set_cplns(cplns)
finally:
self.ctlr.done("success")
class PerlLangIntel(CitadelLangIntel,
ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin):
lang = "Perl"
# Add '=' to the default set for Perl. For example:
# my $foo =
# ^ ^
# | `-- terminate calltip here
# `-- calltip triggers here
# Because Perl doesn't have keywords args to functions this can work.
calltip_region_terminators = tuple(']});=')
preceding_trg_terminators = {';': None, '=': None}
# XXX This cog regen is out-of-date. Re-write to parse perl.cix?
# To regenerate this block:
# - install the cog Python tool:
# http://www.nedbatchelder.com/code/cog/index.html
# - run "cog -r lang_perl.py"
#[[[cog
# import cog
# import os, sys
# sys.path.insert(0, os.path.join(os.pardir, "codeintel"))
# import cidb
# dbpath = cidb.find_komodo_cidb_path()
# sql = """SELECT symbol.name FROM file,scan,module,symbol
# WHERE file.compare_path LIKE '%perl.cix'
# AND scan.file_id=file.id AND module.scan_id=scan.id
# AND symbol.module_id=module.id AND symbol.type=0"""
# cog.outl('_allow_trg_on_space_from_identifier = {')
# for line in cidb.query(dbpath, 3, sql, "csv"):
# cog.outl(' "%s": 1,' % line.strip())
# cog.outl('}')
#]]]
_allow_trg_on_space_from_identifier = {
"-r": 1,
"-w": 1,
"-x": 1,
"-o": 1,
"-R": 1,
"-W": 1,
"-X": 1,
"-O": 1,
"-e": 1,
"-z": 1,
"-s": 1,
"-f": 1,
"-d": 1,
"-l": 1,
"-p": 1,
"-S": 1,
"-b": 1,
"-c": 1,
"-t": 1,
"-u": 1,
"-g": 1,
"-k": 1,
"-T": 1,
"-B": 1,
"-M": 1,
"-A": 1,
"-C": 1,
"UNITCHECK": 1,
"abs": 1,
"accept": 1,
"alarm": 1,
"atan2": 1,
"bind": 1,
"binmode": 1,
"bless": 1,
"break": 1,
"caller": 1,
"chdir": 1,
"chmod": 1,
"chomp": 1,
"chop": 1,
"chown": 1,
"chr": 1,
"chroot": 1,
"close": 1,
"closedir": 1,
"connect": 1,
"continue": 1,
"cos": 1,
"crypt": 1,
"dbmclose": 1,
"dbmopen": 1,
"default": 1,
"defined": 1,
"delete": 1,
"die": 1,
"do": 1,
"dump": 1,
"each": 1,
"eof": 1,
"eval": 1,
"exec": 1,
"exists": 1,
"exit": 1,
"exp": 1,
"fcntl": 1,
"fileno": 1,
"flock": 1,
"fork": 1,
"format": 1,
"formline": 1,
"getc": 1,
"getlogin": 1,
"getpeername": 1,
"getpgrp": 1,
"getppid": 1,
"getpriority": 1,
"getpwnam": 1,
"getgrnam": 1,
"gethostbyname": 1,
"getnetbyname": 1,
"getprotobyname": 1,
"getpwuid": 1,
"getgrgid": 1,
"getservbyname": 1,
"gethostbyaddr": 1,
"getnetbyaddr": 1,
"getprotobynumber": 1,
"getservbyport": 1,
"getpwent": 1,
"getgrent": 1,
"gethostent": 1,
"getnetent": 1,
"getprotoent": 1,
"getservent": 1,
"setpwent": 1,
"setgrent": 1,
"sethostent": 1,
"setnetent": 1,
"setprotoent": 1,
"setservent": 1,
"endpwent": 1,
"endgrent": 1,
"endhostent": 1,
"endnetent": 1,
"endprotoent": 1,
"endservent": 1,
"getsockname": 1,
"getsockopt": 1,
"given": 1,
"glob": 1,
"gmtime": 1,
"goto": 1,
"grep": 1,
"hex": 1,
"import": 1,
"index": 1,
"int": 1,
"ioctl": 1,
"join": 1,
"keys": 1,
"kill": 1,
"last": 1,
"lc": 1,
"lcfirst": 1,
"length": 1,
"link": 1,
"listen": 1,
"local": 1,
"localtime": 1,
"lock": 1,
"log": 1,
"lstat": 1,
"m": 1,
"map": 1,
"mkdir": 1,
"msgctl": 1,
"msgget": 1,
"msgrcv": 1,
"msgsnd": 1,
"my": 1,
"next": 1,
"no": 1,
"oct": 1,
"open": 1,
"opendir": 1,
"ord": 1,
"our": 1,
"pack": 1,
"package": 1,
"pipe": 1,
"pop": 1,
"pos": 1,
"print": 1,
"printf": 1,
"prototype": 1,
"push": 1,
"q": 1,
"qq": 1,
"qr": 1,
"qx": 1,
"qw": 1,
"quotemeta": 1,
"rand": 1,
"read": 1,
"readdir": 1,
"readline": 1,
"readlink": 1,
"readpipe": 1,
"recv": 1,
"redo": 1,
"ref": 1,
"rename": 1,
"reset": 1,
"return": 1,
"reverse": 1,
"rewinddir": 1,
"rindex": 1,
"rmdir": 1,
"s": 1,
"say": 1,
"scalar": 1,
"seek": 1,
"seekdir": 1,
"select": 1,
"semctl": 1,
"semget": 1,
"semop": 1,
"send": 1,
"setpgrp": 1,
"setpriority": 1,
"setsockopt": 1,
"shift": 1,
"shmctl": 1,
"shmget": 1,
"shmread": 1,
"shmwrite": 1,
"shutdown": 1,
"sin": 1,
"sleep": 1,
"socket": 1,
"socketpair": 1,
"sort": 1,
"splice": 1,
"split": 1,
"sprintf": 1,
"sqrt": 1,
"srand": 1,
"stat": 1,
"state": 1,
"study": 1,
"substr": 1,
"symlink": 1,
"syscall": 1,
"sysopen": 1,
"sysread": 1,
"sysseek": 1,
"system": 1,
"syswrite": 1,
"tell": 1,
"telldir": 1,
"tie": 1,
"tied": 1,
"time": 1,
"times": 1,
"tr": 1,
"truncate": 1,
"uc": 1,
"ucfirst": 1,
"umask": 1,
"undef": 1,
"unlink": 1,
"unpack": 1,
"untie": 1,
"unshift": 1,
"utime": 1,
"values": 1,
"vec": 1,
"wait": 1,
"waitpid": 1,
"wantarray": 1,
"warn": 1,
"when": 1,
"write": 1,
"y": 1,
}
#[[[end]]]
# Match a subroutine definition. Used by trg_from_pos()
_sub_pat = re.compile(r"\bsub\s+(\w+(::|'))*\w+$")
# All Perl trigger points occur at one of these characters:
# ' ' (space) only supported for built-in functions
# '(' (open paren)
# '>' (greater than) "->" actually
# ':' (colon) "::" actually
trg_chars = tuple(' (>:')
calltip_trg_chars = tuple(' (')
def trg_from_pos(self, buf, pos, implicit=True):
"""
Implemented triggers
calltip-space-call-signature
calltip-call-signature
complete-package-members
complete-*-subs meaning the actual trigger is one of:
complete-package-subs
complete-object-subs
complete-available-imports
Not yet implemented:
complete-module-exports
"""
DEBUG = False # not using 'logging' system, because want to be fast
if DEBUG:
print(banner("trg_from_pos(pos=%r, implicit=%r)"
% (pos, implicit)))
accessor = buf.accessor
last_pos = pos - 1
last_ch = accessor.char_at_pos(last_pos)
if DEBUG:
print(" last_pos: %s" % last_pos)
print(" last_ch: %r" % last_ch)
# All Perl trigger points occur at one of the trg_chars.
if last_ch not in self.trg_chars:
if DEBUG:
print("no: %r is not in %r" % (last_ch, self.trg_chars))
return None
elif last_ch == ':' \
and not (last_pos > 0
and accessor.char_at_pos(last_pos-1) == ':'):
if DEBUG:
penultimate_ch = (last_pos > 0
and accessor.char_at_pos(last_pos-1) or '')
print("no: %r is not '::'" % (penultimate_ch+last_ch))
return None
elif last_ch == '>' \
and not (last_pos > 0 and accessor.char_at_pos(last_pos-1) == '-'):
if DEBUG:
penultimate_ch = (last_pos > 0
and accessor.char_at_pos(last_pos-1) or '')
print("no: %r is not '->'" % (penultimate_ch+last_ch))
return None
# We should never trigger in some styles (strings, comments, etc.).
last_style = accessor.style_at_pos(last_pos)
if DEBUG:
last_style_names = buf.style_names_from_style_num(last_style)
print(" style: %s %s" % (last_style, last_style_names))
if (implicit and last_style in buf.implicit_completion_skip_styles
or last_style in buf.completion_skip_styles):
if DEBUG:
print("no: completion is suppressed "\
"in style at %s: %s %s"\
% (last_pos, last_style, last_style_names))
return None
WHITESPACE = tuple(' \t\n\r')
if last_ch == ' ':
# This can be either "calltip-space-call-signature",
# "complete-available-imports", or None (or
# "complete-module-exports" when that is implemented).
#
# calltip-call-signature:
# Perl syntax allows a parameter list to be passed to a
# function name without enclosing parens. From a quick perusal
# of sample Perl code (from a default ActivePerl install)
# calling function this way seems to be limited to a number of
# core Perl built-ins or some library methods. For efficiency
# Komodo will maintain an explicit list of such function names
# for which a calltip with trigger without parentheses.
# XXX May want to make this a user-configurable list.
#
# complete-available-imports:
# After 'use', 'require' or 'no' by itself on a line.
#
LIMIT = 50
text = accessor.text_range(max(
0, last_pos-LIMIT), last_pos) # working text
if DEBUG:
print(" working text: %r" % text)
i = len(text)-1
if i >= 0 and not (isident(text[i]) or isdigit(text[i])):
if DEBUG:
print("no: two before trigger point is not "\
"an ident char: '%s'" % text[i])
return None
while i >= 0: # parse out the preceding identifier
if not isident(text[i]):
identifier = text[i+1:]
# Whitespace is allowed between a Perl variable special
# char and the variable name, e.g.: "$ myvar", "@ mylist"
j = i
while j >= 0 and text[j] in WHITESPACE: # parse off whitespace
j -= 1
if j >= 0:
preceding_ch = text[j]
else:
preceding_ch = None
break
i -= 1
else:
preceding_ch = None
identifier = text
if DEBUG:
print(" identifier: %r" % identifier)
if not identifier:
if DEBUG:
print("no: no identifier preceding trigger point")
return None
if DEBUG:
print(" preceding char: %r" % preceding_ch)
if identifier in ("use", "require", "no"):
return Trigger("Perl", TRG_FORM_CPLN,
"available-imports", pos, implicit, prefix="")
if preceding_ch and preceding_ch in "$@&%\\*": # indicating a Perl variable
if DEBUG:
print("no: triggering on space after Perl "\
"variables not supported")
return None
if identifier not in self._allow_trg_on_space_from_identifier:
if DEBUG:
print ("no: identifier not in set for which "
"space-triggering is supported "
"(_allow_trg_on_space_from_identifier)")
return None
# Specifically disallow trigger on defining a sub matching one of
# space-trigger names, i.e.: 'sub split <|>'. Optmization: Assume
# that there is exacly one space between 'sub' and the subroutine
# name. Almost all examples in the Perl lib seem to follow this.
if i >= 3 and text[i-3:i+1] == "sub ":
if DEBUG:
print("no: do not trigger in sub definition")
return None
if DEBUG:
print("calltip-space-call-signature")
return Trigger("Perl", TRG_FORM_CALLTIP,
"space-call-signature", pos, implicit)
elif last_ch == '(':
# This can be either "calltip-call-signature" or None (or
# "complete-module-exports" when that is implemented).
LIMIT = 100
text = accessor.text_range(max(
0, last_pos-LIMIT), last_pos) # working text
if DEBUG:
print(" working text: %r" % text)
i = len(text)-1
while i >= 0 and text[i] in WHITESPACE: # parse off whitespace
i -= 1
if i >= 0 and not (isident(text[i]) or isdigit(text[i])):
if DEBUG:
print("no: first non-ws char before "\
"trigger point is not an ident char: '%s'" % text[i])
return None
end = i+1
while i >= 0: # parse out the preceding identifier
if not isident(text[i]):
identifier = text[i+1:end]
# Whitespace is allowed between a Perl variable special
# char and the variable name, e.g.: "$ myvar", "@ mylist"
j = i
while j >= 0 and text[j] in WHITESPACE: # parse off whitespace
j -= 1
if j >= 0:
preceding_ch = text[j]
else:
preceding_ch = None
break
i -= 1
else:
preceding_ch = None
identifier = text[:end]
if DEBUG:
print(" identifier: %r" % identifier)
if DEBUG:
assert ' ' not in identifier, "parse error: space in identifier: %r" % identifier
if not identifier:
if DEBUG:
print("no: no identifier preceding trigger point")
return None
if DEBUG:
print(" preceding char: %r" % preceding_ch)
if preceding_ch and preceding_ch in "$@%\\*":
# '&foo(' *is* a trigger point, but the others -- '$foo(',
# '&$foo(', etc. -- are not because current CodeIntel wouldn't
# practically be able to determine the method to which $foo
# refers.
if DEBUG:
print("no: calltip trigger on Perl var not supported")
return None
if identifier in ("if", "else", "elsif", "while", "for",
"sub", "unless", "my", "our"):
if DEBUG:
print("no: no trigger on anonymous sub or control structure")
return None
# Now we want to rule out the subroutine definition lines, e.g.:
# sub FOO(<|>
# sub FOO::BAR(<|>
# sub FOO'BAR(<|>
# sub FOO::BAR::BAZ(<|>
# Note: Frankly 80/20 rules out the last three.
line = text[:end].splitlines(0)[-1]
if DEBUG:
print(" trigger line: %r" % line)
if "sub " in line: # only use regex if "sub " on that line
if DEBUG:
print(" *could* be a subroutine definition")
if self._sub_pat.search(line):
if DEBUG:
print("no: no trigger on Perl sub definition")
return None
if DEBUG:
print("calltip-call-signature")
return Trigger("Perl", TRG_FORM_CALLTIP, "call-signature",
pos, implicit)
elif last_ch == '>':
# Must be "complete-package-subs", "complete-object-subs"
# or None. Note that we have already checked (above) that the
# trigger string is '->'. Basically, as long as the first
# non-whitespace char preceding the '->' is an identifier char,
# then this is a trigger point.
LIMIT = 50
text = accessor.text_range(max(
0, last_pos-1-LIMIT), last_pos-1) # working text
if DEBUG:
print(" working text: %r" % text)
i = len(text)-1
while i >= 0 and text[i] in WHITESPACE: # parse off whitespace
i -= 1
if i < 0:
if DEBUG:
print("no: no non-whitespace text preceding '->'")
return None
elif not (isident(text[i]) or text[i].isdigit()):
if DEBUG:
print("no: first non-ws char before "\
"trigger point is not an ident char: '%s'" % text[i])
return None
# At this point we know it is either "complete-package-subs"
# or "complete-object-subs". We don't really care to take
# the time to distinguish now -- trg_from_pos is supposed to be
# quick -- and we don't have to.
if DEBUG:
print("complete-*-subs")
return Trigger("Perl", TRG_FORM_CPLN, "*-subs", pos, implicit,
length=2)
elif last_ch == ':':
# Must be "complete-package-members" or
# "complete-available-imports" or None. Note that we have
# already checked (above) that the trigger string is '::'.
# Basically, as long as the first char preceding the '::' is
# an identifier char or one of Perl's funny variable
# identifying characters, then this is a trigger point.
LIMIT = 50
text = accessor.text_range(max(
0, last_pos-1-LIMIT), last_pos-1) # working text
if DEBUG:
print(" working text: %r" % text)
i = len(text)-1
if i < 0:
if DEBUG:
print("no: no text preceding '::'")
return None
ch = text[i]
if not (isident(ch) or isdigit(ch) or ch == '$'):
# Technically should allow '@', '%' and '&' in there, but
# there a total of 5 of all of this in the Perl std lib.
# 80/20 rule.
if DEBUG:
print("no: first char before trigger "\
"point is not an ident char or '$': '%s'" % ch)
return None
# Check if this is in a 'use' or 'require' statement.
while i > 0 and text[i-1] not in WHITESPACE: # skip to whitespace
i -= 1
prefix = text[i:pos-2]
while i > 0 and text[i-1] in WHITESPACE: # skip over whitespace
i -= 1
start_idx = end_idx = i
while start_idx > 0 and (isident(text[start_idx-1])
or text[start_idx-1] in '$@%'):
start_idx -= 1
ident = text[start_idx:end_idx]
if ident in ("use", "require", "no"):
if DEBUG:
print("complete-available-imports (prefix=%r)" % prefix)
return Trigger("Perl", TRG_FORM_CPLN, "available-imports",
pos, implicit, length=2, prefix=prefix)
if DEBUG:
print("complete-package-members (prefix=%r)" % prefix)
return Trigger("Perl", TRG_FORM_CPLN, "package-members", pos,
implicit, length=2, prefix=prefix)
return None
_perl_var_pat = re.compile(
r"((?P<prefix>[$@%\\*&]+)\s*)?(?P<scope>(::)?\b((?!\d)\w*?(::|'))*)(?P<name>(?!\d)\w+)$")
def citdl_expr_and_prefix_filter_from_trg(self, buf, trg):
"""Parse out the Perl expression at the given trigger and return
a CITDL expression for it (and possibly a variable prefixj
filter).
Returns a 2-tuple:
(<CITDL-expression>, <variable-prefix-filter>)
For all triggers except TRG_FORM_DEFN, we parse out the Perl
expression preceding the trigger position, simplify the
expression (by removing whitespace, etc.) and translate that to
an appropriate CITDL (*) expression. Set to None if there is no
appropriate such expression. For TRG_FORM_DEFN triggers we first
move forward to the end of the current word.
As well, a variable prefix filter may be returned, useful for
post-processing of completions. For example:
Perl code CITDL expression prefix filter
--------- ---------------- -------------
Foo::Bar<|>:: Foo::Bar None
$Foo::Bar<|>:: Foo::Bar $
Optimization Notes:
- We can throw out Perl expressions with function calls
because CodeIntel does not currently handle return values.
- We can throw out Perl exprs that span an EOL: 80/20 rule. (We
currently don't throw these away, though.)
- Abort at hash and list indexing: the types of elements in these
objects are not tracked by CodeIntel.
- Punt on Perl references, e.g. \$foo, \@bar. XXX I wonder if I can
just ignore references and assume the user is doing the right
thing. I.e. I just presume that a reference is dereferenced
properly when required. Dunno.
- Currently we don't really make use of the styling info because we
abort at indexing, function call arguments, etc. where recognizing
string/number/regex boundaries would be useful. This info might be
useful later if this algorithm is beefed up.
- Ignore ampersand, e.g. &foo. This is just an old way to call perl
functions - bug 87870, we can just ignore it for codeintel.
Examples:
GIVEN LEADING EXPR CITDL EXPR
----- ------------ ----------
split <|> split split
chmod(<|> chmod chmod
$Foo::bar(<|> $Foo::bar Foo.$bar
&$make_coffee(<|> &$make_coffee $make_coffee
Win32::OLE-><|> Win32::OLE Win32::OLE
Win32::OLE->GetObject(<|> Win32::OLE->GetObject Win32::OLE.GetObject
split join <|> join join
foo->bar(<|> foo->bar foo.bar
Note that the trigger character is sometimes necessary to resolve
ambiguity. Given "Foo::Bar" without the trailing trigger char, we
cannot know if the CITDL should be "Foo.Bar" or "Foo::Bar":
GIVEN CITDL EXPR
----- ----------
Foo::Bar::<|> Foo::Bar
$Foo::Bar::<|> Foo::Bar
Foo::Bar-><|> Foo::Bar
Foo::Bar(<|> Foo.Bar
Foo::Bar <|> Foo.Bar
$Foo::Bar-><|> Foo.$Bar
$Foo::Bar(<|> Foo.$Bar
$Foo::Bar <|> Foo.$Bar
* http://specs.tl.activestate.com/kd/kd-0100.html#citdl
"""
DEBUG = False
if DEBUG:
print()
print(banner("citdl_expr_and_prefix_filter_from_trg @ %d"
% trg.pos))
print(markup_text(buf.accessor.text, trg_pos=trg.pos))
print(banner(None, '-'))
if trg.implicit:
skip_styles = buf.implicit_completion_skip_styles
else:
skip_styles = {}
filter, citdl = None, []
accessor = buf.accessor
LIMIT = max(0, trg.pos-100) # working text area
if trg.form == TRG_FORM_DEFN:
# "Go to Definition" triggers can be in the middle of an
# expression. If so we want to move forward to the end of
# the current *part*. E.g., given:
# $fo<+>o->bar()
# move forward to:
# $foo<|>->bar()
# and NOT to:
# $foo->bar<|>()
#
# Perl package names are considered one "part":
# $Fo<+>o::Bar->blah() $Foo::Bar<|>->blah()
#
# Note: I suspect there are some problems with the
# subsequent parsing on when/if to convert "Foo::Bar" to
# "Foo.Bar" since codeintel2 changed Perl cpln eval.
p = trg.pos
length = accessor.length()
while p < length:
if not _is_perl_var_char(accessor.char_at_pos(p)):
break
p += 1
# Gracefully handle some situations where we are positioned
# after a trigger string. E.g. "Foo::Bar::<|> "
if p >= 2 and accessor.text_range(p-2, p) in ("->", "::"):
p = p - 2
if DEBUG:
print("'defn'-trigger: adjust position %d" % (p-trg.pos))
else:
p = trg.pos - trg.length
p -= 1
while p >= LIMIT:
# Parse off a perl variable/identifier.
if DEBUG:
print("look for Perl var at end of %r"\
% accessor.text_range(LIMIT, p+1))
match = self._perl_var_pat.search(
accessor.text_range(LIMIT, p+1))
if not match:
if DEBUG:
if p-LIMIT > 20:
segment = '...'+accessor.text_range(p-20, p+1)
else:
segment = accessor.text_range(LIMIT, p+1)
print("could not match a Perl var off %r" % segment)
citdl = None
break
prefix = match.group("prefix") or ""
if "&" in prefix:
prefix = prefix.replace("&", "")
scope = match.group("scope")
name = match.group("name")
trg_ch = None
try:
# TODO:PERF: Use the available faster accessor methods here.
trg_ch = accessor.char_at_pos(p+1)
except IndexError as ex:
if trg.form != TRG_FORM_DEFN:
log.warn("text does not include trailing trigger "
"char to resolve possible ambiguities in '%s'",
match.group(0))
if trg_ch == ':':
# XXX fix off-by-one here
# Foo::Bar<|>:: Foo::Bar
# $Foo::Bar<|>:: Foo::Bar
citdl.insert(0, scope+name) # intentionally drop prefix
# The prefix string is relevant for filtering the list of
# members for AutoComplete. E.g. if the prefix char is '&' then
# only subs should be shown. If '%', then only hashes.
filter = prefix
elif trg_ch == '-' and not prefix:
# XXX fix off-by-one here
# Foo::Bar<|>-> Foo::Bar
citdl.insert(0, scope+name)
else:
# XXX fix off-by-one here
# Foo::Bar<|>( Foo.Bar
# Foo::Bar<|> Foo.Bar # trigger char is a space here
# $Foo::Bar<|>-> Foo.$Bar
# $Foo::Bar<|>( Foo.$Bar
# $Foo::Bar<|> Foo.$Bar # trigger char is a space here
citdl.insert(0, prefix+name)
if scope:
scope = scope[:-2] # drop trailing '::'
if scope:
citdl.insert(0, scope)
p -= len(match.group(0))
if DEBUG:
print("parse out Perl var: %r (prefix=%r, scope=%r, "\
"name=%r): %r" % (match.group(0), prefix, scope,
name, citdl))
# Preceding characters will determine if we stop or continue.
WHITESPACE = tuple(" \t\n\r\v\f")
while p >= LIMIT and accessor.char_at_pos(p) in WHITESPACE:
# if DEBUG: print "drop whitespace: %r" % text[p]
p -= 1
if p >= LIMIT and accessor.style_at_pos(p) in skip_styles:
if DEBUG:
style = accessor.style_at_pos(p)
style_names = buf.style_names_from_style_num(style)
print("stop at style to ignore: %r (%s %s)"\
% (accessor.char_at_pos(p), style, style_names))
break
elif p >= LIMIT+1 and accessor.text_range(p-1, p+1) == '->':
if DEBUG:
print("parse out '->'")
p -= 2
while p >= LIMIT and accessor.char_at_pos(p) in WHITESPACE:
# if DEBUG: print "drop whitespace: %r" % text[p]
p -= 1
continue
else:
break
if citdl:
retval = ('.'.join(citdl), filter)
else:
retval = (None, filter)
if DEBUG:
print("returning: %r" % (retval,))
banner("done")
return retval
def async_eval_at_trg(self, buf, trg, ctlr):
if _xpcom_:
trg = UnwrapObject(trg)
ctlr = UnwrapObject(ctlr)
assert trg.lang == "Perl"
ctlr.start(buf, trg)
if trg.id == ("Perl", TRG_FORM_CPLN, "available-imports"):
evalr = PerlImportsEvaluator(ctlr, buf, trg)
buf.mgr.request_eval(evalr)
return
# Remaining triggers all use this parsed CITDL expr.
# Extract the leading CITDL expression (and possible filter,
# i.e. '$', '@', ...).
try:
citdl_expr, filter \
= self.citdl_expr_and_prefix_filter_from_trg(buf, trg)
except CodeIntelError as ex:
ctlr.error(str(ex))
ctlr.done("error")
return
# Perl's trg_from_pos doesn't distinguish btwn "package-subs"
# and "object-subs" trigger type -- calling them both "*-subs".
# Let's do so here.
if trg.type == "*-subs":
assert citdl_expr
if isident(citdl_expr[0]):
trg.type = "package-subs"
else:
trg.type = "object-subs"
if trg.id == ("Perl", TRG_FORM_CPLN, "package-members"):
# [prefix]SomePackage::<|>
# Note: This trigger has the "prefix" extra attr which could
# be used instead of the leading CITDL expr parse.
line = buf.accessor.line_from_pos(trg.pos)
evalr = PerlPackageMembersTreeEvaluator(ctlr, buf, trg, citdl_expr,
line, filter)
buf.mgr.request_eval(evalr)
elif trg.id == ("Perl", TRG_FORM_CPLN, "package-subs"):
# SomePackage-><|>
assert not filter, "shouldn't be Perl filter prefix for " \
"'complete-package-subs': %r" % filter
line = buf.accessor.line_from_pos(trg.pos)
evalr = PerlPackageSubsTreeEvaluator(
ctlr, buf, trg, citdl_expr, line)
buf.mgr.request_eval(evalr)
# TODO: Might want to handle TRG_FORM_DEFN differently.
else:
if citdl_expr is None:
ctlr.info("no CITDL expression found for %s" % trg)
ctlr.done("no trigger")
return
line = buf.accessor.line_from_pos(trg.pos)
if trg.id[1] == TRG_FORM_DEFN and citdl_expr[0] == '$':
current_pos = trg.pos
lim = buf.accessor.length()
while buf.accessor.style_at_pos(current_pos) == ScintillaConstants.SCE_PL_SCALAR and current_pos < lim:
current_pos += 1
c = buf.accessor.char_at_pos(current_pos)
if c == '[':
citdl_expr = '@' + citdl_expr[1:]
elif c == '{':
citdl_expr = '%' + citdl_expr[1:]
evalr = PerlTreeEvaluator(ctlr, buf, trg, citdl_expr,
line, filter)
buf.mgr.request_eval(evalr)
def libs_from_buf(self, buf):
env = buf.env
# A buffer's libs depend on its env and the buf itself so
# we cache it on the env and key off the buffer.
if "perl-buf-libs" not in env.cache:
env.cache["perl-buf-libs"] = weakref.WeakKeyDictionary()
cache = env.cache["perl-buf-libs"] # <buf-weak-ref> -> <libs>
if buf not in cache:
# - curdirlib
# Using the dirname of this buffer isn't always right, but
# hopefully is a good first approximation.
cwd = dirname(buf.path)
if cwd == "<Unsaved>":
libs = []
else:
libs = [self.mgr.db.get_lang_lib("Perl", "curdirlib", [cwd])]
libs += self._buf_indep_libs_from_env(env)
cache[buf] = libs
return cache[buf]
def perl_info_from_env(self, env):
# Return an array of [perl_ver, config_dirs, import_path]
cache_key = self.lang + "-info"
info = env.cache.get(cache_key)
if info is None:
perlInterpreter = self._perl_from_env(env)
if not perlInterpreter:
log.warn("no Perl interpreter was found from which to determine the "
"codeintel information")
info = None, None, None
else:
info = self._perl_info_from_perl(perlInterpreter, env)
env.cache[cache_key] = info
return info
def _perl_from_env(self, env):
import which
path = [d.strip()
for d in env.get_envvar("PATH", "").split(os.pathsep)
if d.strip()]
try:
return which.which("perl", path=path)
except which.WhichError:
return None
def _perl_info_from_perl(self, perl, env):
"""Call the given Perl and return:
(<version>, <config_dirs>, <import_path>)
where <config_dirs> is a dict with (relevant) dirs from
Config.pm.
"""
import process
info_cmd = (r'use Config;'
r'print "version:$Config{version}\n";'
r'print "siteprefix:$Config{siteprefix}\n";'
r'print "archlib:$Config{archlib}\n";'
r'print "privlib:$Config{privlib}\n";'
r'print "vendorarch:$Config{vendorarch}\n";'
r'print "vendorlib:$Config{vendorlib}\n";'
r'print join("\n", @INC);')
argv = [perl, "-e", info_cmd]
log.debug("run `%s -e ...'", perl)
p = process.ProcessOpen(argv, env=env.get_all_envvars(), stdin=None)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines(0)
retval = p.returncode
if retval:
log.warn("failed to determine Perl info:\n"
" path: %s\n"
" retval: %s\n"
" stdout:\n%s\n"
" stderr:\n%s\n",
perl, retval, indent('\n'.join(stdout_lines)),
indent(stderr))
perl_ver = stdout_lines[0].split(':', 1)[1]
config_dirs = dict(
siteprefix=stdout_lines[1].split(':', 1)[1],
archlib=stdout_lines[2].split(':', 1)[1],
privlib=stdout_lines[3].split(':', 1)[1],
vendorarch=stdout_lines[4].split(':', 1)[1],
vendorlib=stdout_lines[5].split(':', 1)[1],
)
import_path = stdout_lines[6:]
return perl_ver, config_dirs, import_path
#def _extra_dirs_from_env(self, env):
# extra_dirs = set()
# for pref in env.get_all_prefs("perlExtraPaths"):
# if not pref:
# continue
# extra_dirs.update(d.strip() for d in pref.split(os.pathsep)
# if exists(d.strip()))
# return tuple(extra_dirs)
def _buf_indep_libs_from_env(self, env):
"""Create the buffer-independent list of libs."""
cache_key = "perl-libs"
if cache_key not in env.cache:
env.add_pref_observer("perl", self._invalidate_cache)
env.add_pref_observer("perlExtraPaths",
self._invalidate_cache_and_rescan_extra_dirs)
env.add_pref_observer("codeintel_selected_catalogs",
self._invalidate_cache)
db = self.mgr.db
# Gather information about the current perl.
perl = None
if env.has_pref("perl"):
perl = env.get_pref("perl").strip() or None
if not perl or not exists(perl):
perl = self._perl_from_env(env)
if not perl:
log.warn("no Perl was found from which to determine the "
"import path")
perl_ver, config_dirs, import_path = None, {}, []
else:
perl_ver, config_dirs, import_path \
= self._perl_info_from_perl(perl, env)
libs = []
# - extradirslib
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
log.debug("Perl extra lib dirs: %r", extra_dirs)
libs.append(db.get_lang_lib("Perl", "extradirslib",
extra_dirs))
# Figuring out where the lib and sitelib dirs are is hard --
# or at least complex from my P.O.V.
# - For ActivePerl (on Linux, at least):
# $ perl -e 'print join "\n", @INC'
# /home/trentm/opt/ActivePerl-5.8.8.818/site/lib
# (sitearch, sitelib, siteprefix)
# /home/trentm/opt/ActivePerl-5.8.8.818/lib
# (archlib, privlib)
# . (???, we'll handle with curdirlib)
# - For /usr/bin/perl on skink (ubuntu 6):
# $ /usr/bin/perl -e 'print join "\n", @INC'
# /etc/perl (???)
# /usr/local/lib/perl/5.8.7 (sitearch, siteprefix)
# /usr/local/share/perl/5.8.7 (sitelib, siteprefix)
# /usr/lib/perl5 (vendorarch)
# /usr/share/perl5 (vendorlib)
# /usr/lib/perl/5.8 (archlib)
# /usr/share/perl/5.8 (privlib)
# /usr/local/lib/site_perl (???, siteprefix)
paths_from_libname = {"sitelib": [], "envlib": [], "stdlib": []}
for dir in import_path:
dir = normpath(dir)
if dir == ".": # -> curdirlib (handled separately)
continue
if islink(dir):
# Note: this doesn't handle multiple levels of
# links.
link_value = os.readlink(dir)
if isabs(link_value):
dir = link_value
else:
dir = normpath(join(dirname(dir), link_value))
if not isdir(dir):
log.debug("perl @INC value '%s' is not a dir: dropping it",
dir)
continue
for config_dir_name in ("archlib", "privlib",
"vendorarch", "vendorlib"):
if config_dirs[config_dir_name] \
and dir.startswith(config_dirs[config_dir_name]):
paths_from_libname["stdlib"].append(dir)
break
else:
if config_dirs["siteprefix"] \
and dir.startswith(config_dirs["siteprefix"]):
paths_from_libname["sitelib"].append(dir)
else:
paths_from_libname["envlib"].append(dir)
log.debug("Perl %s paths for each lib:\n%s",
perl_ver, indent(pformat(paths_from_libname)))
# - envlib, sitelib, cataloglib, stdlib
if paths_from_libname["envlib"]:
libs.append(db.get_lang_lib("Perl", "envlib",
paths_from_libname["envlib"]))
if paths_from_libname["sitelib"]:
libs.append(db.get_lang_lib("Perl", "sitelib",
paths_from_libname["sitelib"]))
catalog_selections = env.get_pref("codeintel_selected_catalogs")
libs += [
db.get_catalog_lib("Perl", catalog_selections),
db.get_stdlib("Perl", perl_ver)
]
env.cache[cache_key] = libs
return env.cache[cache_key]
def _invalidate_cache(self, env, pref_name):
for key in ("perl-buf-libs", "perl-libs"):
if key in env.cache:
log.debug("invalidate '%s' cache on %r", key, env)
del env.cache[key]
def _invalidate_cache_and_rescan_extra_dirs(self, env, pref_name):
self._invalidate_cache(env, pref_name)
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
extradirslib = self.mgr.db.get_lang_lib(
"Perl", "extradirslib", extra_dirs)
request = PreloadLibRequest(extradirslib)
self.mgr.idxr.stage_request(request, 1.0)
#---- code browser integration
cb_import_group_title = "Uses and Requires"
def cb_import_data_from_elem(self, elem):
alias = elem.get("alias")
symbol = elem.get("symbol")
module = elem.get("module")
if symbol:
if symbol == "*":
name = module
detail = "use %s" % module
elif symbol == "**":
name = module
detail = "use %s qw(:<tag>)" % module
else:
name = "::".join([module, symbol])
detail = "use %s qw(%s)" % (module, symbol)
else:
name = module
# This is either "use Foo ();" or "require Foo;". A search
# the of the Perl 5.8 site lib should that the latter is about
# 6 times more likely -- lets use that.
detail = "require %s" % module
return {"name": name, "detail": detail}
class PerlBuffer(CitadelBuffer):
lang = "Perl"
sce_prefixes = ["SCE_PL_"]
cb_show_if_empty = True
# 'cpln_fillup_chars' exclusions for Perl:
# - cannot be '-' for "complete-*-subs" because:
# attributes::->import(__PACKAGE__, \$x, 'Bent');
# - cannot be '{' for "complete-object-subs" because:
# my $d = $self->{'escape'};
# - shouldn't be ')' because:
# $dumper->dumpValue(\*::);
# - shouldn't be ':' (bug 65292)
cpln_fillup_chars = "~`!@#$%^&*(=+}[]|\\;'\",.<>?/ "
cpln_stop_chars = "-~`!@#$%^&*()=+{}[]|\\;:'\",.<>?/ "
def __init__(self, *args, **kwargs):
CitadelBuffer.__init__(self, *args, **kwargs)
# Some Perl styles in addition to the usual comment and string styles
# in which completion triggering should not happen.
self.completion_skip_styles[ScintillaConstants.SCE_PL_REGEX] = True
@property
def libs(self):
return self.langintel.libs_from_buf(self)
@property
def stdlib(self):
return self.libs[-1]
class PerlImportHandler(ImportHandler):
PATH_ENV_VAR = "PERL5LIB"
sep = "::"
def _shellOutForPath(self, compiler):
import process
sep = "--WomBa-woMbA--"
argv = [compiler, "-e", "print join('%s', @INC);" % sep]
env = dict(os.environ)
if "PERL5LIB" in env:
del env["PERL5LIB"]
if "PERLLIB" in env:
del env["PERLLIB"]
p = process.ProcessOpen(argv, env=env, stdin=None)
output, error = p.communicate()
retval = p.returncode
if retval:
raise CodeIntelError("could not determine Perl import path: %s"
% error)
path = [normpath(d) for d in output.split(sep)]
# cwd handled separately
path = [p for p in path if p not in (os.curdir, os.getcwd())]
return path
def setCorePath(self, compiler=None, extra=None):
if compiler is None:
import which
compiler = which.which("perl")
self.corePath = self._shellOutForPath(compiler)
def _findScannableFiles(self, xxx_todo_changeme,
dirname, names):
(files, searchedDirs,
skipTheseDirs, skipRareImports) = xxx_todo_changeme
if sys.platform.startswith("win"):
cpath = dirname.lower()
else:
cpath = dirname
if cpath in searchedDirs:
while names:
del names[0]
return
else:
searchedDirs[cpath] = 1
if skipRareImports:
# Skip .pl files when scanning a Perl lib/sitelib.
scannableExts = (".pm",)
else:
scannableExts = (".pl", ".pm")
for i in range(len(names)-1, -1, -1): # backward so can del from list
path = join(dirname, names[i])
if isdir(path):
if normcase(path) in skipTheseDirs:
del names[i]
elif skipRareImports and not ('A' <= names[i][0] <= 'Z'):
# Perl good practice dictates that all module directories
# begin with a capital letter. Therefore, we skip dirs
# that start with a lower case.
del names[i]
elif splitext(names[i])[1] in scannableExts:
# XXX The list of extensions should be settable on
# the ImportHandler and Komodo should set whatever is
# set in prefs.
# XXX This check for files should probably include
# scripts, which might likely not have the
# extension: need to grow filetype-from-content smarts.
files.append(path)
def find_importables_in_dir(self, dir):
"""See citadel.py::ImportHandler.find_importables_in_dir() for
details.
Importables for Perl look like this:
{"Shell": ("Shell.pm", None, False),
"LWP": ("LWP.pm", None, True),
"XML": (None, None, True)}
Notes:
- Drop the "auto" dir (it holds the binary module bits).
- Keep non-capitalized dirs and modules (e.g. want "strict" in
cplns for "use <|>").
"""
from os.path import join, isdir, splitext
if dir == "<Unsaved>":
# TODO: stop these getting in here.
return {}
# TODO: log the fs-stat'ing a la codeintel.db logging.
try:
names = os.listdir(dir)
except OSError as ex:
return {}
dirs, nondirs = set(), set()
for name in names:
if isdir(join(dir, name)):
dirs.add(name)
else:
nondirs.add(name)
importables = {}
dirs.discard("auto")
for name in nondirs:
base, ext = splitext(name)
if ext != ".pm":
continue
if base in dirs:
importables[base] = (name, None, True)
dirs.remove(base)
else:
importables[base] = (name, None, False)
for name in dirs:
importables[name] = (None, None, True)
return importables
class PerlCILEDriver(CILEDriver):
lang = lang
def scan_purelang(self, buf):
log.info("scan_purelang: path: %r lang: %s", buf.path, buf.lang)
return perlcile.scan_purelang(buf)
def scan_multilang(self, buf, csl_cile_driver=None):
"""Scan the given multilang (UDL-based) buffer and return a CIX
element tree, and shuffle any CSL tokens to the CSL CileDriver.
"""
log.info("scan_multilang: path: %r lang: %s", buf.path, buf.lang)
tree = Element("codeintel", version="2.0")
path = buf.path
if sys.platform == "win32":
path = path.replace('\\', '/')
file_node = SubElement(tree, "file", lang=buf.lang, path=path)
# module = SubElement(file_node, "scope", ilk="blob", lang="Perl",
# name=basename(path))
csl_tokens, has_perl_code = perlcile.scan_multilang(
buf.accessor.gen_tokens(), file_node)
blob_node = file_node.getchildren()[0]
if not has_perl_code:
assert len(blob_node) == 0
# The CILE clients don't want to hear there's no perl code in the
# buffer
file_node.remove(blob_node)
else:
blob_node.set('name', basename(path))
if csl_cile_driver and csl_tokens:
csl_cile_driver.scan_csl_tokens(file_node, basename(buf.path),
csl_tokens)
return tree
#---- internal support stuff
def _is_perl_var_char(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or "0" <= char <= "9" \
or char in "_:$%@"
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=PerlLexer(),
buf_class=PerlBuffer,
langintel_class=PerlLangIntel,
import_handler_class=PerlImportHandler,
cile_driver_class=PerlCILEDriver,
is_cpln_lang=True)
| {
"content_hash": "b332a40c2705031c8cb87c828f3a80e7",
"timestamp": "",
"source": "github",
"line_count": 1415,
"max_line_length": 119,
"avg_line_length": 38.298233215547704,
"alnum_prop": 0.4814179214644228,
"repo_name": "prisis/sublime-text-packages",
"id": "e69a2b20e5a4779364dc4ee68161e7b05b21c4e3",
"size": "55898",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Packages/SublimeCodeIntel/libs/codeintel2/lang_perl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "318"
},
{
"name": "Batchfile",
"bytes": "786"
},
{
"name": "C++",
"bytes": "56562"
},
{
"name": "CSS",
"bytes": "18339"
},
{
"name": "HTML",
"bytes": "1757"
},
{
"name": "JavaScript",
"bytes": "206342"
},
{
"name": "PHP",
"bytes": "2193174"
},
{
"name": "Pascal",
"bytes": "7460"
},
{
"name": "PowerShell",
"bytes": "397"
},
{
"name": "Python",
"bytes": "19331281"
},
{
"name": "Shell",
"bytes": "1903"
},
{
"name": "Smarty",
"bytes": "4883"
},
{
"name": "SourcePawn",
"bytes": "4479"
},
{
"name": "Tcl",
"bytes": "88877"
}
],
"symlink_target": ""
} |
import unittest
from test import test_support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import sys
import operator
import random
import copy
import pickle
from functools import reduce
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
self.assertEqual(list(combinations('abc', 32)), []) # r > n
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(r) // fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
@test_support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
self.assertEqual(list(cwr('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) // fact(r) // fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
@test_support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) // fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
@test_support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) // fact(r) // fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) // fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) // fact(r) // fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, filter(set(cwr).__contains__, perm)) # comb: perm that is a cwr
self.assertEqual(comb, filter(set(perm).__contains__, cwr)) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(list(islice(count(maxsize-5), 10)), range(maxsize-5, maxsize+5))
self.assertEqual(list(islice(count(-maxsize-5), 10)), range(-maxsize-5, -maxsize+5))
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
c.next()
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(c.next(), -8)
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i)).replace('L', '')
r2 = 'count(%r)'.__mod__(i).replace('L', '')
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, sys.maxint-5, sys.maxint+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(next(pickle.loads(pickle.dumps(c, proto))), value)
def test_count_with_stride(self):
self.assertEqual(zip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(zip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertEqual(zip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(zip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
c.next()
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
c.next()
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
c.next()
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
for i in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 10, sys.maxint-5, sys.maxint+5):
for j in (-sys.maxint-5, -sys.maxint+5 ,-10, -1, 0, 1, 10, sys.maxint-5, sys.maxint+5):
# Test repr (ignoring the L in longs)
r1 = repr(count(i, j)).replace('L', '')
if j == 1:
r2 = ('count(%r)' % i).replace('L', '')
else:
r2 = ('count(%r, %r)' % (i, j)).replace('L', '')
self.assertEqual(r1, r2)
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(ifilter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(ifilterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_tuple_reuse(self):
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_iziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
# target = map(None, *args) <- this raises a py3k warning
# this is the replacement:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(izip_longest(*args)), target)
self.assertEqual(list(izip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(izip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,izip_longest('abcdef', count())), zip('abcdef', range(3))) # take 3 from infinite input
self.assertEqual(list(izip_longest()), zip())
self.assertEqual(list(izip_longest([])), zip([]))
self.assertEqual(list(izip_longest('abcdef')), zip('abcdef'))
self.assertEqual(list(izip_longest('abc', 'defg', **{})),
zip(list('abc') + [None], 'defg')) # empty keyword dict
self.assertRaises(TypeError, izip_longest, 3)
self.assertRaises(TypeError, izip_longest, range(3), 3)
for stmt in [
"izip_longest('abc', fv=1)",
"izip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip_longest('abc', 'def')],
zip('abc', 'def'))
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_izip_longest_tuple_reuse(self):
ids = map(id, izip_longest('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip_longest('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_bug_7244(self):
class Repeater(object):
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def next(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in izip_longest(r1, r2, fillvalue=0):
with test_support.captured_output('stdout'):
print (i, j)
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = izip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = map(tuple, args) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', xrange(0), xrange(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@test_support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@test_support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(list(repeat(object='a', times=0)), [])
self.assertEqual(list(repeat(object='a', times=-1)), [])
self.assertEqual(list(repeat(object='a', times=-2)), [])
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None)), range(10))
self.assertEqual(list(islice(xrange(10), None, None, None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
test_support.gc_collect()
self.assertIsNone(wr())
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestExamples(unittest.TestCase):
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_ifilter(self):
self.assertEqual(list(ifilter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_imap(self):
self.assertEqual(list(imap(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_izip(self):
self.assertEqual(list(izip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_izip_longest(self):
self.assertEqual(list(izip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(xrange(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_izip_longest(self):
a = []
self.makecycle(izip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(izip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, list, compress(N(s), repeat(1)))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_iziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip_longest(g(s))), zip(g(s)))
self.assertEqual(list(izip_longest(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip_longest, X(s))
self.assertRaises(TypeError, list, izip_longest(N(s)))
self.assertRaises(ZeroDivisionError, list, izip_longest(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
from test.test_iterlen import len
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, izip, ifilter, ifilterfalse, chain, imap,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError, err:
# we expect type errors because of wrong argument count
self.assertNotIn("does not take keyword arguments", err.args[0])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.iteritems()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def enumerate(iterable, start=0):
... return izip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return imap(function, count(start))
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(imap(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... for elem in b:
... break
... return izip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return izip_longest(fillvalue=fillvalue, *args)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).next for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return imap(next, imap(itemgetter(1), groupby(iterable, key)))
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| {
"content_hash": "d474f08a3315e440ddccd5702e5cd494",
"timestamp": "",
"source": "github",
"line_count": 1717,
"max_line_length": 119,
"avg_line_length": 40.429819452533486,
"alnum_prop": 0.5404073871330203,
"repo_name": "wang1352083/pythontool",
"id": "8b5f051f18ee099a2e6e4187973913bb02783a8a",
"size": "69418",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-2.7.12-lib/test/test_itertools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "252"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "153685"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "18207080"
},
{
"name": "Shell",
"bytes": "390"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import prodomo
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'prodomo'
copyright = u'2014, Giuseppe Acito'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = prodomo.__version__
# The full version, including alpha/beta/rc tags.
release = prodomo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'prodomodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'prodomo.tex',
u'prodomo Documentation',
u'Giuseppe Acito', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'prodomo',
u'prodomo Documentation',
[u'Giuseppe Acito'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'prodomo',
u'prodomo Documentation',
u'Giuseppe Acito',
'prodomo',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "2cce308ac4e931f3f8772dd9a62136f2",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 30.588461538461537,
"alnum_prop": 0.7022507229976109,
"repo_name": "giupo/prodomo",
"id": "9fa12a949f40e2f0a5101f8acc226ea2ff47cff6",
"size": "8398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "262"
},
{
"name": "Cucumber",
"bytes": "810"
},
{
"name": "HTML",
"bytes": "182"
},
{
"name": "JavaScript",
"bytes": "5778"
},
{
"name": "Makefile",
"bytes": "1272"
},
{
"name": "Python",
"bytes": "13532"
}
],
"symlink_target": ""
} |
"""
The VMware API VM utility module to build SOAP object specs.
"""
import copy
import functools
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet']
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type=constants.DEFAULT_OS_TYPE,
allocations=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
# Configure cpu information
if (allocations is not None and
('cpu_limit' in allocations or
'cpu_reservation' in allocations or
'cpu_shares_level' in allocations)):
allocation = client_factory.create('ns0:ResourceAllocationInfo')
if 'cpu_limit' in allocations:
allocation.limit = allocations['cpu_limit']
if 'cpu_reservation' in allocations:
allocation.reservation = allocations['cpu_reservation']
if 'cpu_shares_level' in allocations:
shares = client_factory.create('ns0:SharesInfo')
shares.level = allocations['cpu_shares_level']
if (shares.level == 'custom' and
'cpu_shares_share' in allocations):
shares.shares = allocations['cpu_shares_share']
else:
shares.shares = 0
allocation.shares = shares
config_spec.cpuAllocation = allocation
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path(session, vm_ref, instance):
"""Gets the vmdk file path for specified instance."""
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
(vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
return vmdk_path
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = constants.DEFAULT_DISK_TYPE
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
elif device.__class__.__name__ == "ParaVirtualSCSIController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in [constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
# NOTE(mdbooth): this convenience function is temporarily duplicated in
# ds_util. The correct fix is to handle paginated results as they are returned
# from the relevant vim_util function. However, vim_util is currently
# effectively deprecated as we migrate to oslo.vmware. This duplication will be
# removed when we fix it properly in oslo.vmware.
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session._get_vim(),
"FindAllByUuid",
session._get_vim().service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
# TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
| dict_mors = {
| 'respool-1001': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| 'domain-1002': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| }
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session._get_vim(),
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
task_info = session._wait_for_task(vm_create_task)
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session._get_vim().client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session._get_vim(),
"CreateVirtualDisk_Task",
session._get_vim().service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk. This is also
done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session._get_vim()
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise vexc.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref,
disk_move_type='moveAllDiskBackingsAndDisallowSharing')
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug("Cloning VM for instance %s", instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug("Cloned VM for instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug("Disassociating VM from instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Disassociated VM from instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug("Associating VM to instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Associated VM to instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session._get_vim(),
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def get_values_from_object_properties(session, props):
"""Get the specific values from a object list.
The object values will be returned as a dictionary.
"""
dictionary = {}
while props:
for elem in props.objects:
propdict = propset_dict(elem.propSet)
dictionary.update(propdict)
token = _get_token(props)
if not token:
break
props = session._call_method(vim_util,
"continue_to_get_objects",
token)
return dictionary
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session._get_vim(),
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
| {
"content_hash": "e07e9a1b07287ae0729bede97a1565bc",
"timestamp": "",
"source": "github",
"line_count": 1510,
"max_line_length": 79,
"avg_line_length": 38.94304635761589,
"alnum_prop": 0.6101625739745595,
"repo_name": "berrange/nova",
"id": "1f44e170984701e31f71b84e404d464633e76605",
"size": "59555",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/vm_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15269775"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.