text stringlengths 4 1.02M | meta dict |
|---|---|
import pickle
import textwrap
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy import units as u
from astropy.utils import NumpyRNGContext
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS, \
BaseHighLevelWCS
from .test_nduncertainty import FakeUncertainty
from astropy.nddata import _testing as nd_testing
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self):
pass
def __array__(self):
pass
@property
def dtype(self):
return 'fake'
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(self, data, uncertainty=None, mask=None, wcs=None,
meta=None, unit=None):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)),
mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize('data', [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behaves the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity.value))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = np.ma.array(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(np.array([1]), mask=False, uncertainty=StdDevUncertainty(10), unit=u.s,
meta={'dest': 'mordor'}, wcs=WCS(naxis=1))
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs is nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
# now what happens if we overwrite them all too
nd3 = NDData(nd1, mask=True, uncertainty=StdDevUncertainty(200), unit=u.km,
meta={'observer': 'ME'}, wcs=WCS(naxis=1))
assert nd3.data is nd1.data
assert nd3.wcs is not nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, 'gollum', 100)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(np.array([1, 2]), uncert, 3, HighLevelWCSWrapper(WCS(naxis=1)),
{'enemy': 'black knight'}, u.km)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs is bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({'a': 'dict'})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return '7'
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5))*2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=WCS(naxis=1))
assert nd.wcs is not None
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=WCS(naxis=1))
assert nd2.wcs is not None and nd2.wcs is not nd.wcs
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={'image': 'sun'})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={'image': 'moon'})
assert len(nd3.meta) == 1
assert nd3.meta['image'] == 'moon'
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3])*u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit='meter')
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit='cm')
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3])*u.m, mask=False)
nd2 = NDData(mq, unit=u.s)
assert nd2.unit == u.s
# (another NDData as data)
nd3 = NDData(nd, unit='km')
assert nd3.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(np.ones(3),
uncertainty=StdDevUncertainty(np.ones(5), unit=u.m),
unit=u.m)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(np.ones(3),
uncertainty=StdDevUncertainty(np.ones(5), unit=u.m),
unit=u.m)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array,
uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == '[1 2 3]'
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent("""
[[1 2]
[3 4]]"""[1:])
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent("""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[1:])
def test_nddata_repr():
arr1d = NDData(np.array([1, 2, 3]))
assert repr(arr1d) == 'NDData([1, 2, 3])'
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert repr(arr2d) == textwrap.dedent("""
NDData([[1, 2],
[3, 4]])"""[1:])
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert repr(arr3d) == textwrap.dedent("""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[1:])
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
def test_nddata_wcs_setter_error_cases():
ndd = NDData(np.ones((5, 5)))
# Setting with a non-WCS should raise an error
with pytest.raises(TypeError):
ndd.wcs = "I am not a WCS"
naxis = 2
# This should succeed since the WCS is currently None
ndd.wcs = nd_testing._create_wcs_simple(naxis=naxis,
ctype=['deg'] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis)
with pytest.raises(ValueError):
# This should fail since the WCS is not None
ndd.wcs = nd_testing._create_wcs_simple(naxis=naxis,
ctype=['deg'] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis)
def test_nddata_wcs_setter_with_low_level_wcs():
ndd = NDData(np.ones((5, 5)))
wcs = WCS()
# If the wcs property is set with a low level WCS it should get
# wrapped to high level.
low_level = SlicedLowLevelWCS(wcs, 5)
assert not isinstance(low_level, BaseHighLevelWCS)
ndd.wcs = low_level
assert isinstance(ndd.wcs, BaseHighLevelWCS)
| {
"content_hash": "23eb0de164e8ca64ea02d4659abc0d52",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 96,
"avg_line_length": 31.11088295687885,
"alnum_prop": 0.6169229753811629,
"repo_name": "MSeifert04/astropy",
"id": "954317e211f7779d299052ac48c683b0ae7c815b",
"size": "15217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "astropy/nddata/tests/test_nddata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9891588"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import re
from utils import *
from entities.record import Record
def __extractTag(text):
"""
Extracts tag from record.
@tag
@param {string} text.
@return {string} tag.
"""
return re.match('@\w+', text).group(0)
def __extractType(text, tag):
"""
Extracts type expression from record.
{type}
@param {string} text.
@param {string} tag.
@return {string} Type expression.
"""
typeExpression = extractTextBetweenTokens(text, '{')
return typeExpression
def __extractName(text, tag):
"""
Extracts name of variable from record.
@param {string} text.
@param {string} tag.
@return {string} Name.
"""
name = None
if tag not in {'@return', '@inheritDoc'}:
name = text.split(' ')[0]
return name
def __extractDescription(text, tag):
"""
Extracts description of variable from record without newlines.
@param {string} text.
@param {string} tag.
@return {string} Description.
"""
return text.replace('\n', ' ')
def extractRecord(text):
"""
Extracts from code a record object, which contain such information as
tag, type, name of variable abd its description.
@param {string} text.
@return {jsCodeParser.record.Record} Record
"""
tag = __extractTag(text)
position = text.find(tag) + len(tag)
text = text[position:]
recordMap = {
'type': {
'extractor': __extractType,
'value': ''
},
'name': {
'extractor': __extractName,
'value': ''
},
'description': {
'extractor': __extractDescription,
'value': ''
}
}
while text:
for key in ['type', 'name', 'description']:
extractor = recordMap[key]['extractor']
value = extractor(text, tag)
if value:
recordMap[key]['value'] = value
position = text.find(value) + len(value)
text = text[position:]
text = text.strip('. ')
typeExpression = recordMap['type']['value']
name = recordMap['name']['value']
description = recordMap['description']['value']
return Record(tag, typeExpression, name, description)
| {
"content_hash": "b6c7954b0c7a77bfb49e3b6fb7f23e8a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 77,
"avg_line_length": 22.844660194174757,
"alnum_prop": 0.5410114747131322,
"repo_name": "LiveTex/Livetex-Tools",
"id": "8367ac06bf7a8d1d5f2effe922aa8b24777618aa",
"size": "2353",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/externs-extractor/extractors/recordsExtractor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "21385"
},
{
"name": "Makefile",
"bytes": "9662"
},
{
"name": "Shell",
"bytes": "731"
}
],
"symlink_target": ""
} |
"""
:mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: Victor Rodionov <victor.rodionov@nexenta.com>
.. moduleauthor:: Mikhail Khodos <mikhail.khodos@nexenta.com>
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
"""
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
VERSION = '1.2.1'
LOG = logging.getLogger(__name__)
class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.0.1 - Fixed bug #1236626: catch "does not exist" exception of
lu_exists.
1.1.0 - Changed class name to NexentaISCSIDriver.
1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy.
1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs
clone.
1.1.3 - Extended volume stats provided by _update_volume_stats method.
1.2.0 - Added volume migration with storage assist method.
1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location
of migrated volume; after migrating volume migrate_volume
destroy snapshot on migration destination.
"""
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(NexentaISCSIDriver, self).__init__(*args, **kwargs)
self.nms = None
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_VOLUME_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTIONS)
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nms_password = self.configuration.nexenta_password
self.volume = self.configuration.nexenta_volume
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.iscsi_target_portal_port = \
self.configuration.nexenta_iscsi_target_portal_port
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(self.volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
self.volume)
def _get_zvol_name(self, volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (self.volume, volume_name)
def _get_target_name(self, volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (self.configuration.nexenta_target_prefix, volume_name)
def _get_target_group_name(self, volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (self.configuration.nexenta_target_group_prefix,
volume_name)
@staticmethod
def _get_clone_snapshot_name(volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
@staticmethod
def _is_clone_snapshot_name(snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
:return: model update dict for volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
self.configuration.nexenta_blocksize,
self.configuration.nexenta_sparse)
return self.create_export(None, volume)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
volume_name = self._get_zvol_name(volume['name'])
props = self.nms.zvol.get_child_props(volume_name, 'origin') or {}
try:
self.nms.zvol.destroy(volume_name, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Volume %s does not exist, it seems it was already '
'deleted.'), volume_name)
return
if 'zvol has children' in exc.args[0]:
raise exception.VolumeIsBusy(volume_name=volume_name)
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
volume, snapshot = origin.split('@')
volume = volume.lstrip('%s/' % self.configuration.nexenta_volume)
try:
self.delete_snapshot({'volume_name': volume, 'name': snapshot})
except nexenta.NexentaException as exc:
LOG.warning(_('Cannot delete snapshot %(origin)s: %(exc)s'),
{'origin': origin, 'exc': exc})
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume)}
LOG.debug(_('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s'), snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
# we check its origin property and delete source snapshot if needed.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
LOG.error(_('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
@staticmethod
def get_nms_for_url(url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path =\
utils.parse_nms_url(url)
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug(_('Enter: migrate_volume: id=%(id)s, host=%(host)s') %
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'capabilities' not in host:
return false_ret
capabilities = host['capabilities']
if 'location_info' not in capabilities or \
'iscsi_target_portal_port' not in capabilities or \
'nms_url' not in capabilities:
return false_ret
iscsi_target_portal_port = capabilities['iscsi_target_portal_port']
nms_url = capabilities['nms_url']
dst_parts = capabilities['location_info'].split(':')
if capabilities.get('vendor_name') != 'Nexenta' or \
dst_parts[0] != self.__class__.__name__ or \
capabilities['free_capacity_gb'] < volume['size']:
return false_ret
dst_host, dst_volume = dst_parts[1:]
ssh_bound = False
ssh_bindings = self.nms.appliance.ssh_list_bindings()
for bind in ssh_bindings:
if bind.index(dst_host) != -1:
ssh_bound = True
break
if not ssh_bound:
LOG.warning(_("Remote NexentaStor appliance at %s should be "
"SSH-bound."), dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
'volume_name': volume['name'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': self.volume,
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume])
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'],
snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
provider_location = '%(host)s:%(port)s,1 %(name)s 0' % {
'host': dst_host,
'port': iscsi_target_portal_port,
'name': self._get_target_name(volume['name'])
}
return True, {'provider_location': provider_location}
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: snapshot reference
"""
volume_name = self._get_zvol_name(snapshot['volume_name'])
snapshot_name = '%s@%s' % (volume_name, snapshot['name'])
try:
self.nms.snapshot.destroy(snapshot_name, '')
except nexenta.NexentaException as exc:
if "does not exist" in exc.args[0]:
LOG.info(_('Snapshot %s does not exist, it seems it was '
'already deleted.'), snapshot_name)
return
if "snapshot has dependent clones" in exc.args[0]:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _target_exists(self, target):
"""Check if iSCSI target exist.
:param target: target name
:return: True if target exist, else False
"""
targets = self.nms.stmf.list_targets()
if not targets:
return False
return target in self.nms.stmf.list_targets()
def _target_group_exists(self, target_group):
"""Check if target group exist.
:param target_group: target group
:return: True if target group exist, else False
"""
groups = self.nms.stmf.list_targetgroups()
if not groups:
return False
return target_group in groups
def _target_member_in_target_group(self, target_group, target_member):
"""Check if target member in target group.
:param target_group: target group
:param target_member: target member
:return: True if target member in target group, else False
:raises: NexentaException if target group doesn't exist
"""
members = self.nms.stmf.list_targetgroup_members(target_group)
if not members:
return False
return target_member in members
def _lu_exists(self, zvol_name):
"""Check if LU exists on appliance.
:param zvol_name: Zvol name
:raises: NexentaException if zvol not exists
:return: True if LU exists, else False
"""
try:
return bool(self.nms.scsidisk.lu_exists(zvol_name))
except nexenta.NexentaException as exc:
if 'does not exist' not in exc.args[0]:
raise
return False
def _is_lu_shared(self, zvol_name):
"""Check if LU exists on appliance and shared.
:param zvol_name: Zvol name
:raises: NexentaException if Zvol not exist
:return: True if LU exists and shared, else False
"""
try:
shared = self.nms.scsidisk.lu_shared(zvol_name) > 0
except nexenta.NexentaException as exc:
if 'does not exist for zvol' not in exc.args[0]:
raise # Zvol does not exists
shared = False # LU does not exist
return shared
def _is_volume_exported(self, volume):
"""Check if volume exported.
:param volume: volume object
:return: True if volume exported, else False
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
return (self._target_exists(target_name) and
self._target_group_exists(target_group_name) and
self._target_member_in_target_group(target_group_name,
target_name) and
self._lu_exists(zvol_name) and
self._is_lu_shared(zvol_name))
def _get_provider_location(self, volume):
"""Returns volume iscsiadm-formatted provider location string."""
return '%(host)s:%(port)s,1 %(name)s 0' % {
'host': self.nms_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'name': self._get_target_name(volume['name'])
}
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
if not self._target_exists(target_name):
try:
self.nms.iscsitarget.create_target({
'target_name': target_name})
except nexenta.NexentaException as exc:
if ensure and 'already configured' in exc.args[0]:
LOG.info(_('Ignored target creation error "%s" while '
'ensuring export'), exc)
else:
raise
if not self._target_group_exists(target_group_name):
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
LOG.info(_('Ignored target group creation error "%s" '
'while ensuring export'), exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
target_name):
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
LOG.info(_('Ignored target group member addition error '
'"%s" while ensuring export'), exc)
else:
raise
if not self._lu_exists(zvol_name):
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[0]:
raise
LOG.info(_('Ignored LU creation error "%s" while ensuring '
'export'), exc)
if not self._is_lu_shared(zvol_name):
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[0]:
raise
LOG.info(_('Ignored LUN mapping entry addition error "%s" '
'while ensuring export'), exc)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
self._do_export(_ctx, volume, ensure=False)
return {'provider_location': self._get_provider_location(volume)}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is '
'already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug(_('Updating volume stats'))
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')
total_amount = utils.str2gib_size(stats['size'])
free_amount = utils.str2gib_size(stats['available'])
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.nms_host,
'volume': self.volume
}
self._stats = {
'vendor_name': 'Nexenta',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_amount,
'free_capacity_gb': free_amount,
'reserved_percentage': 0,
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_portal_port,
'nms_url': self.nms.url
}
| {
"content_hash": "5c5cf83d579bf361d095bc281b8636b4",
"timestamp": "",
"source": "github",
"line_count": 587,
"max_line_length": 79,
"avg_line_length": 41.0221465076661,
"alnum_prop": 0.5683554817275748,
"repo_name": "NeCTAR-RC/cinder",
"id": "7635ea606eb69991ffe392b498c199d9706533b0",
"size": "24716",
"binary": false,
"copies": "3",
"ref": "refs/heads/nectar/icehouse",
"path": "cinder/volume/drivers/nexenta/iscsi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "6176241"
},
{
"name": "Shell",
"bytes": "15237"
}
],
"symlink_target": ""
} |
import subprocess
import os
def burn_subtitle(video, subtitle):
ffmpeg = "/home/zousiyu1/bin/ffmpeg/ffmpeg"
exec_list = [ffmpeg, "-i"]
# parameters
exec_list.append(video) # video file
exec_list.append("-vf") # subtitle
vf_args = "ass=" + subtitle
exec_list.append(vf_args)
# output
exec_list.append(video[0:-4] + "-batch" + ".mp4")
print("\n", exec_list, "\n")
# run
subprocess.call(exec_list)
# main
videos = []
subtitles = []
# get files
for _file in os.listdir("."):
if _file.endswith("batch.mp4"):
continue
elif _file.endswith(".mp4"):
videos.append(_file)
elif _file.endswith(".ass"):
subtitles.append(_file)
videos = sorted(videos)
subtitles = sorted(subtitles)
print(videos)
print(subtitles)
for num in range(len(videos)):
burn_subtitle(videos[num], subtitles[num])
| {
"content_hash": "ab184ed662dc446032f8fe537dff658a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 53,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.6210045662100456,
"repo_name": "Zousiyu/code_snippet",
"id": "7d73ff9acc1a2e110e618d7743d72f5b7ef189bd",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/python/batch_burn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4465"
},
{
"name": "MATLAB",
"bytes": "1342"
},
{
"name": "Python",
"bytes": "5651"
},
{
"name": "TeX",
"bytes": "52671"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'types06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_write_formula_default(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, '="0"&".0"', None, '0.0')
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "4db8b02dd0fa5dfadc16a48bf83a6f83",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 68,
"avg_line_length": 28.457142857142856,
"alnum_prop": 0.5953815261044176,
"repo_name": "jkyeung/XlsxWriter",
"id": "302a826d377619d089a6c5763b36c58f0116e6b9",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_types06.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
import collections
import json
import math
import re
import six
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import units
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_conf
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import hypermetro
from cinder.volume.drivers.huawei import replication
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
huawei_opts = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei driver.'),
cfg.StrOpt('hypermetro_devices',
default=None,
help='The remote device hypermetro will use.'),
cfg.StrOpt('metro_san_user',
default=None,
help='The remote metro device san user.'),
cfg.StrOpt('metro_san_password',
default=None,
secret=True,
help='The remote metro device san password.'),
cfg.StrOpt('metro_domain_name',
default=None,
help='The remote metro device domain name.'),
cfg.StrOpt('metro_san_address',
default=None,
help='The remote metro device request url.'),
cfg.StrOpt('metro_storage_pools',
default=None,
help='The remote metro device pool names.'),
]
CONF = cfg.CONF
CONF.register_opts(huawei_opts, group=configuration.SHARED_CONF_GROUP)
snap_attrs = ('id', 'volume_id', 'volume', 'provider_location')
Snapshot = collections.namedtuple('Snapshot', snap_attrs)
vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata')
Volume = collections.namedtuple('Volume', vol_attrs)
class HuaweiBaseDriver(driver.VolumeDriver):
# ThirdPartySytems wiki page
CI_WIKI_NAME = "Huawei_volume_CI"
def __init__(self, *args, **kwargs):
super(HuaweiBaseDriver, self).__init__(*args, **kwargs)
if not self.configuration:
msg = _('Configuration is not found.')
raise exception.InvalidInput(reason=msg)
self.active_backend_id = kwargs.get('active_backend_id')
self.configuration.append_config_values(huawei_opts)
self.huawei_conf = huawei_conf.HuaweiConf(self.configuration)
self.support_func = None
self.metro_flag = False
self.replica = None
def check_func_support(self, obj_name):
try:
self.client._get_object_count(obj_name)
return True
except Exception:
return False
def get_local_and_remote_dev_conf(self):
self.loc_dev_conf = self.huawei_conf.get_local_device()
# Now just support one replication device.
replica_devs = self.huawei_conf.get_replication_devices()
self.replica_dev_conf = replica_devs[0] if replica_devs else {}
def get_local_and_remote_client_conf(self):
if self.active_backend_id:
return self.replica_dev_conf, self.loc_dev_conf
else:
return self.loc_dev_conf, self.replica_dev_conf
def do_setup(self, context):
"""Instantiate common class and login storage system."""
# Set huawei private configuration into Configuration object.
self.huawei_conf.update_config_value()
self.get_local_and_remote_dev_conf()
client_conf, replica_client_conf = (
self.get_local_and_remote_client_conf())
# init local client
if not client_conf:
msg = _('Get active client failed.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
self.client = rest_client.RestClient(self.configuration,
**client_conf)
self.client.login()
# init remote client
metro_san_address = self.configuration.safe_get("metro_san_address")
metro_san_user = self.configuration.safe_get("metro_san_user")
metro_san_password = self.configuration.safe_get("metro_san_password")
if metro_san_address and metro_san_user and metro_san_password:
metro_san_address = metro_san_address.split(";")
self.rmt_client = rest_client.RestClient(self.configuration,
metro_san_address,
metro_san_user,
metro_san_password)
self.rmt_client.login()
self.metro_flag = True
else:
self.metro_flag = False
LOG.warning("Remote device not configured in cinder.conf")
# init replication manager
if replica_client_conf:
self.replica_client = rest_client.RestClient(self.configuration,
**replica_client_conf)
self.replica_client.try_login()
self.replica = replication.ReplicaPairManager(self.client,
self.replica_client,
self.configuration)
def check_for_setup_error(self):
pass
def get_volume_stats(self, refresh=False):
"""Get volume status and reload huawei config file."""
self.huawei_conf.update_config_value()
stats = self.client.update_volume_stats()
stats = self.update_support_capability(stats)
if self.replica:
stats = self.replica.update_replica_capability(stats)
targets = [self.replica_dev_conf['backend_id']]
stats['replication_targets'] = targets
stats['replication_enabled'] = True
return stats
def update_support_capability(self, stats):
for pool in stats['pools']:
pool['smartpartition'] = (
self.check_func_support("SMARTCACHEPARTITION"))
pool['smartcache'] = self.check_func_support("smartcachepool")
pool['QoS_support'] = self.check_func_support("ioclass")
pool['splitmirror'] = self.check_func_support("splitmirror")
pool['luncopy'] = self.check_func_support("luncopy")
pool['thick_provisioning_support'] = True
pool['thin_provisioning_support'] = True
pool['smarttier'] = True
pool['consistent_group_snapshot_enabled'] = True
if self.configuration.san_product == "Dorado":
pool['smarttier'] = False
pool['thick_provisioning_support'] = False
if self.metro_flag:
pool['hypermetro'] = self.check_func_support("HyperMetroPair")
# assign the support function to global parameter.
self.support_func = pool
return stats
def _get_volume_type(self, volume):
volume_type = None
type_id = volume.volume_type_id
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
return volume_type
def _get_volume_params(self, volume_type):
"""Return the parameters for creating the volume."""
specs = {}
if volume_type:
specs = dict(volume_type).get('extra_specs')
opts = self._get_volume_params_from_specs(specs)
return opts
def _get_group_type(self, group):
opts = []
vol_types = group.volume_types
for vol_type in vol_types:
specs = vol_type.extra_specs
opts.append(self._get_volume_params_from_specs(specs))
return opts
def _check_volume_type_support(self, opts, vol_type):
if not opts:
return False
support = True
for opt in opts:
if opt.get(vol_type) != 'true':
support = False
break
return support
def _get_volume_params_from_specs(self, specs):
"""Return the volume parameters from extra specs."""
opts_capability = {
'smarttier': False,
'smartcache': False,
'smartpartition': False,
'thin_provisioning_support': False,
'thick_provisioning_support': False,
'hypermetro': False,
'replication_enabled': False,
'replication_type': 'async',
}
opts_value = {
'policy': None,
'partitionname': None,
'cachename': None,
}
opts_associate = {
'smarttier': 'policy',
'smartcache': 'cachename',
'smartpartition': 'partitionname',
}
opts = self._get_opts_from_specs(opts_capability,
opts_value,
opts_associate,
specs)
opts = smartx.SmartX().get_smartx_specs_opts(opts)
opts = replication.get_replication_opts(opts)
LOG.debug('volume opts %(opts)s.', {'opts': opts})
return opts
def _get_opts_from_specs(self, opts_capability, opts_value,
opts_associate, specs):
"""Get the well defined extra specs."""
opts = {}
opts.update(opts_capability)
opts.update(opts_value)
for key, value in specs.items():
# Get the scope, if it is using scope format.
scope = None
key_split = key.split(':')
if len(key_split) > 2 and key_split[0] != "capabilities":
continue
if len(key_split) == 1:
key = key_split[0].lower()
else:
scope = key_split[0].lower()
key = key_split[1].lower()
if ((not scope or scope == 'capabilities')
and key in opts_capability):
words = value.split()
if words and len(words) == 2 and words[0] in ('<is>', '<in>'):
opts[key] = words[1].lower()
elif key == 'replication_type':
LOG.error("Extra specs must be specified as "
"replication_type='<in> sync' or "
"'<in> async'.")
else:
LOG.error("Extra specs must be specified as "
"capabilities:%s='<is> True'.", key)
if ((scope in opts_capability)
and (key in opts_value)
and (scope in opts_associate)
and (opts_associate[scope] == key)):
opts[key] = value
return opts
def _get_lun_params(self, volume, opts):
pool_name = volume_utils.extract_host(volume.host, level='pool')
params = {
'TYPE': '11',
'NAME': huawei_utils.encode_name(volume.id),
'PARENTTYPE': '216',
'PARENTID': self.client.get_pool_id(pool_name),
'DESCRIPTION': volume.name,
'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type),
'CAPACITY': huawei_utils.get_volume_size(volume),
'WRITEPOLICY': self.configuration.lun_write_type,
'PREFETCHPOLICY': self.configuration.lun_prefetch_type,
'PREFETCHVALUE': self.configuration.lun_prefetch_value,
'DATATRANSFERPOLICY':
opts.get('policy', self.configuration.lun_policy),
'READCACHEPOLICY': self.configuration.lun_read_cache_policy,
'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, }
LOG.info('volume: %(volume)s, lun params: %(params)s.',
{'volume': volume.id, 'params': params})
return params
def _create_volume(self, lun_params):
# Create LUN on the array.
lun_info = self.client.create_lun(lun_params)
metadata = {'huawei_lun_id': lun_info['ID'],
'huawei_lun_wwn': lun_info['WWN']}
model_update = {'metadata': metadata}
return lun_info, model_update
def _create_base_type_volume(self, opts, volume, volume_type):
"""Create volume and add some base type.
Base type is the service type which doesn't conflict with the other.
"""
lun_params = self._get_lun_params(volume, opts)
lun_info, model_update = self._create_volume(lun_params)
lun_id = lun_info['ID']
try:
qos = smartx.SmartQos.get_qos_by_volume_type(volume_type)
if qos:
if not self.support_func.get('QoS_support'):
msg = (_("Can't support qos on the array"))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
smart_qos = smartx.SmartQos(self.client)
smart_qos.add(qos, lun_id)
smartpartition = smartx.SmartPartition(self.client)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.client)
smartcache.add(opts, lun_id)
except Exception as err:
self._delete_lun_with_check(lun_id)
msg = _('Create volume error. Because %s.') % six.text_type(err)
raise exception.VolumeBackendAPIException(data=msg)
return lun_params, lun_info, model_update
def _add_extend_type_to_volume(self, opts, lun_params, lun_info,
model_update):
"""Add the extend type.
Extend type is the service type which may conflict with the other.
So add it after those services.
"""
lun_id = lun_info['ID']
if opts.get('hypermetro') == 'true':
metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
try:
metro_info = metro.create_hypermetro(lun_id, lun_params)
model_update['metadata'].update(metro_info)
except exception.VolumeBackendAPIException as err:
LOG.error('Create hypermetro error: %s.', err)
self._delete_lun_with_check(lun_id)
raise
if opts.get('replication_enabled') == 'true':
replica_model = opts.get('replication_type')
try:
replica_info = self.replica.create_replica(lun_info,
replica_model)
model_update.update(replica_info)
except Exception as err:
LOG.exception('Create replication volume error.')
self._delete_lun_with_check(lun_id)
raise
return model_update
def create_volume(self, volume):
"""Create a volume."""
volume_type = self._get_volume_type(volume)
opts = self._get_volume_params(volume_type)
if (opts.get('hypermetro') == 'true'
and opts.get('replication_enabled') == 'true'):
err_msg = _("Hypermetro and Replication can not be "
"used in the same volume_type.")
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
lun_params, lun_info, model_update = (
self._create_base_type_volume(opts, volume, volume_type))
model_update = self._add_extend_type_to_volume(opts, lun_params,
lun_info, model_update)
model_update['provider_location'] = huawei_utils.to_string(
**model_update.pop('metadata'))
return model_update
def _delete_volume(self, volume):
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
if not lun_id:
return
lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id)
if lun_group_ids and len(lun_group_ids) == 1:
self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id)
self.client.delete_lun(lun_id)
def delete_volume(self, volume):
"""Delete a volume.
Three steps:
Firstly, remove associate from lungroup.
Secondly, remove associate from QoS policy.
Thirdly, remove the lun.
"""
lun_id = self._check_volume_exist_on_array(
volume, constants.VOLUME_NOT_EXISTS_WARN)
if not lun_id:
return
if self.support_func.get('QoS_support'):
qos_id = self.client.get_qosid_by_lunid(lun_id)
if qos_id:
smart_qos = smartx.SmartQos(self.client)
smart_qos.remove(qos_id, lun_id)
metadata = huawei_utils.get_lun_metadata(volume)
if metadata.get('hypermetro_id'):
metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
try:
metro.delete_hypermetro(volume)
except exception.VolumeBackendAPIException as err:
LOG.error('Delete hypermetro error: %s.', err)
# We have checked the LUN WWN above,
# no need to check again here.
self._delete_volume(volume)
raise
# Delete a replication volume
replica_data = volume.replication_driver_data
if replica_data:
try:
self.replica.delete_replica(volume)
except exception.VolumeBackendAPIException as err:
with excutils.save_and_reraise_exception():
LOG.exception("Delete replication error.")
self._delete_volume(volume)
self._delete_volume(volume)
def _delete_lun_with_check(self, lun_id, lun_wwn=None):
if not lun_id:
return
if self.client.check_lun_exist(lun_id, lun_wwn):
if self.support_func.get('QoS_support'):
qos_id = self.client.get_qosid_by_lunid(lun_id)
if qos_id:
smart_qos = smartx.SmartQos(self.client)
smart_qos.remove(qos_id, lun_id)
self.client.delete_lun(lun_id)
def _is_lun_migration_complete(self, src_id, dst_id):
result = self.client.get_lun_migration_task()
found_migration_task = False
if 'data' not in result:
return False
for item in result['data']:
if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']):
found_migration_task = True
if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']:
return True
if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']:
msg = _("Lun migration error.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not found_migration_task:
err_msg = _("Cannot find migration task.")
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return False
def _is_lun_migration_exist(self, src_id, dst_id):
try:
result = self.client.get_lun_migration_task()
except Exception:
LOG.error("Get LUN migration error.")
return False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
return True
return False
def _migrate_lun(self, src_id, dst_id):
try:
self.client.create_lun_migration(src_id, dst_id)
def _is_lun_migration_complete():
return self._is_lun_migration_complete(src_id, dst_id)
wait_interval = constants.MIGRATION_WAIT_INTERVAL
huawei_utils.wait_for_condition(_is_lun_migration_complete,
wait_interval,
self.configuration.lun_timeout)
# Clean up if migration failed.
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
finally:
if self._is_lun_migration_exist(src_id, dst_id):
self.client.delete_lun_migration(src_id, dst_id)
self._delete_lun_with_check(dst_id)
LOG.debug("Migrate lun %s successfully.", src_id)
return True
def _wait_volume_ready(self, lun_id):
wait_interval = self.configuration.lun_ready_wait_interval
def _volume_ready():
result = self.client.get_lun_info(lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(_volume_ready,
wait_interval,
wait_interval * 10)
def _get_original_status(self, volume):
return 'in-use' if volume.volume_attachment else 'available'
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status=None):
orig_lun_name = huawei_utils.encode_name(volume.id)
new_lun_id, lun_wwn = huawei_utils.get_volume_lun_id(
self.client, new_volume)
new_metadata = huawei_utils.get_lun_metadata(new_volume)
model_update = {
'provider_location': huawei_utils.to_string(**new_metadata),
}
try:
self.client.rename_lun(new_lun_id, orig_lun_name)
except exception.VolumeBackendAPIException:
LOG.error('Unable to rename lun %s on array.', new_lun_id)
model_update['_name_id'] = new_volume.name_id
else:
LOG.debug("Renamed lun %(id)s to %(name)s successfully.",
{'id': new_lun_id,
'name': orig_lun_name})
model_update['_name_id'] = None
return model_update
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate a volume within the same array."""
self._check_volume_exist_on_array(volume,
constants.VOLUME_NOT_EXISTS_RAISE)
# NOTE(jlc): Replication volume can't migrate. But retype
# can remove replication relationship first then do migrate.
# So don't add this judgement into _check_migration_valid().
volume_type = self._get_volume_type(volume)
opts = self._get_volume_params(volume_type)
if opts.get('replication_enabled') == 'true':
return (False, None)
return self._migrate_volume(volume, host, new_type)
def _check_migration_valid(self, host, volume):
if 'pool_name' not in host['capabilities']:
return False
target_device = host['capabilities']['location_info']
# Source and destination should be on same array.
if target_device != self.client.device_id:
return False
# Same protocol should be used if volume is in-use.
protocol = self.configuration.san_protocol
if (host['capabilities']['storage_protocol'] != protocol
and self._get_original_status(volume) == 'in-use'):
return False
pool_name = host['capabilities']['pool_name']
if len(pool_name) == 0:
return False
return True
def _migrate_volume(self, volume, host, new_type=None):
if not self._check_migration_valid(host, volume):
return (False, None)
type_id = volume.volume_type_id
volume_type = None
if type_id:
volume_type = volume_types.get_volume_type(None, type_id)
pool_name = host['capabilities']['pool_name']
pools = self.client.get_all_pools()
pool_info = self.client.get_pool_info(pool_name, pools)
dst_volume_name = six.text_type(uuid.uuid4())
src_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
opts = None
qos = None
if new_type:
# If new type exists, use new type.
new_specs = new_type['extra_specs']
opts = self._get_volume_params_from_specs(new_specs)
if 'LUNType' not in opts:
opts['LUNType'] = self.configuration.lun_type
qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
elif volume_type:
qos = smartx.SmartQos.get_qos_by_volume_type(volume_type)
if not opts:
opts = self._get_volume_params(volume_type)
lun_info = self.client.get_lun_info(src_id)
if opts['policy']:
policy = opts['policy']
else:
policy = lun_info.get('DATATRANSFERPOLICY',
self.configuration.lun_policy)
lun_params = {
'NAME': dst_volume_name,
'PARENTID': pool_info['ID'],
'DESCRIPTION': lun_info['DESCRIPTION'],
'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']),
'CAPACITY': lun_info['CAPACITY'],
'WRITEPOLICY': lun_info['WRITEPOLICY'],
'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'],
'PREFETCHVALUE': lun_info['PREFETCHVALUE'],
'DATATRANSFERPOLICY': policy,
'READCACHEPOLICY': lun_info.get(
'READCACHEPOLICY',
self.configuration.lun_read_cache_policy),
'WRITECACHEPOLICY': lun_info.get(
'WRITECACHEPOLICY',
self.configuration.lun_write_cache_policy),
'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], }
for item in lun_params.keys():
if lun_params.get(item) == '--':
del lun_params[item]
lun_info = self.client.create_lun(lun_params)
lun_id = lun_info['ID']
if qos:
LOG.info('QoS: %s.', qos)
SmartQos = smartx.SmartQos(self.client)
SmartQos.add(qos, lun_id)
if opts:
smartpartition = smartx.SmartPartition(self.client)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.client)
smartcache.add(opts, lun_id)
dst_id = lun_info['ID']
self._wait_volume_ready(dst_id)
moved = self._migrate_lun(src_id, dst_id)
return moved, {}
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
volume_type = self._get_volume_type(volume)
opts = self._get_volume_params(volume_type)
if (opts.get('hypermetro') == 'true'
and opts.get('replication_enabled') == 'true'):
msg = _("Hypermetro and Replication can not be "
"used in the same volume_type.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot)
if snapshot_id is None:
msg = _('create_volume_from_snapshot: Snapshot %(name)s '
'does not exist.') % {'name': snapshot.id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
lun_params, lun_info, model_update = (
self._create_base_type_volume(opts, volume, volume_type))
tgt_lun_id = lun_info['ID']
luncopy_name = huawei_utils.encode_name(volume.id)
LOG.info(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.',
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
wait_interval = self.configuration.lun_ready_wait_interval
def _volume_ready():
result = self.client.get_lun_info(tgt_lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(_volume_ready,
wait_interval,
wait_interval * 10)
self._copy_volume(volume, luncopy_name,
snapshot_id, tgt_lun_id)
# NOTE(jlc): Actually, we just only support replication here right
# now, not hypermetro.
model_update = self._add_extend_type_to_volume(opts, lun_params,
lun_info, model_update)
model_update['provider_location'] = huawei_utils.to_string(
**model_update.pop('metadata'))
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
self._check_volume_exist_on_array(src_vref,
constants.VOLUME_NOT_EXISTS_RAISE)
# Form the snapshot structure.
snapshot = Snapshot(id=uuid.uuid4().__str__(),
volume_id=src_vref.id,
volume=src_vref,
provider_location=None)
# Create snapshot.
self.create_snapshot(snapshot)
try:
# Create volume from snapshot.
model_update = self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.VolumeBackendAPIException:
LOG.warning(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.',
{'snapshot_id': snapshot.id,
'volume_id': src_vref.id},)
return model_update
def _check_volume_exist_on_array(self, volume, action):
"""Check whether the volume exists on the array.
If the volume exists on the array, return the LUN ID.
If not exists, raise or log warning.
"""
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
if not lun_id:
msg = _("Volume %s does not exist on the array.") % volume.id
if action == constants.VOLUME_NOT_EXISTS_WARN:
LOG.warning(msg)
if action == constants.VOLUME_NOT_EXISTS_RAISE:
raise exception.VolumeBackendAPIException(data=msg)
return
if not lun_wwn:
LOG.debug("No LUN WWN recorded for volume %s.", volume.id)
if not self.client.check_lun_exist(lun_id, lun_wwn):
msg = (_("Volume %s does not exist on the array.")
% volume.id)
if action == constants.VOLUME_NOT_EXISTS_WARN:
LOG.warning(msg)
if action == constants.VOLUME_NOT_EXISTS_RAISE:
raise exception.VolumeBackendAPIException(data=msg)
return
return lun_id
def extend_volume(self, volume, new_size):
"""Extend a volume."""
lun_id = self._check_volume_exist_on_array(
volume, constants.VOLUME_NOT_EXISTS_RAISE)
volume_type = self._get_volume_type(volume)
opts = self._get_volume_params(volume_type)
if opts.get('replication_enabled') == 'true':
msg = (_("Can't extend replication volume, volume: %(id)s") %
{"id": volume.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
lun_info = self.client.get_lun_info(lun_id)
old_size = int(lun_info.get('CAPACITY'))
new_size = int(new_size) * units.Gi / 512
if new_size == old_size:
LOG.info("New size is equal to the real size from backend"
" storage, no need to extend."
" realsize: %(oldsize)s, newsize: %(newsize)s.",
{'oldsize': old_size,
'newsize': new_size})
return
if new_size < old_size:
msg = (_("New size should be bigger than the real size from "
"backend storage."
" realsize: %(oldsize)s, newsize: %(newsize)s."),
{'oldsize': old_size,
'newsize': new_size})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info('Extend volume: %(id)s, oldsize: %(oldsize)s, '
'newsize: %(newsize)s.',
{'id': volume.id,
'oldsize': old_size,
'newsize': new_size})
self.client.extend_lun(lun_id, new_size)
def create_snapshot(self, snapshot):
volume = snapshot.volume
if not volume:
msg = _("Can't get volume id from snapshot, snapshot: %(id)s"
) % {'id': snapshot.id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
snapshot_name = huawei_utils.encode_name(snapshot.id)
snapshot_description = snapshot.id
snapshot_info = self.client.create_snapshot(lun_id,
snapshot_name,
snapshot_description)
snapshot_id = snapshot_info['ID']
self.client.activate_snapshot(snapshot_id)
location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id)
return {'provider_location': location,
'lun_info': snapshot_info}
def delete_snapshot(self, snapshot):
LOG.info('Delete snapshot %s.', snapshot.id)
snapshot_id = huawei_utils.get_snapshot_id(self.client, snapshot)
if snapshot_id and self.client.check_snapshot_exist(snapshot_id):
self.client.stop_snapshot(snapshot_id)
self.client.delete_snapshot(snapshot_id)
else:
LOG.warning("Can't find snapshot on the array.")
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, "
"diff=%(diff)s, host=%(host)s.", {'id': volume.id,
'new_type': new_type,
'diff': diff,
'host': host})
self._check_volume_exist_on_array(
volume, constants.VOLUME_NOT_EXISTS_RAISE)
# Check what changes are needed
migration, change_opts, lun_id = self.determine_changes_when_retype(
volume, new_type, host)
model_update = {}
replica_enabled_change = change_opts.get('replication_enabled')
replica_type_change = change_opts.get('replication_type')
if replica_enabled_change and replica_enabled_change[0] == 'true':
try:
self.replica.delete_replica(volume)
model_update.update({'replication_status': 'disabled',
'replication_driver_data': None})
except exception.VolumeBackendAPIException:
LOG.exception('Retype volume error. '
'Delete replication failed.')
return False
try:
if migration:
LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with "
"change %(change_opts)s.",
{"lun_id": lun_id, "change_opts": change_opts})
if not self._migrate_volume(volume, host, new_type):
LOG.warning("Storage-assisted migration failed during "
"retype.")
return False
else:
# Modify lun to change policy
self.modify_lun(lun_id, change_opts)
except exception.VolumeBackendAPIException:
LOG.exception('Retype volume error.')
return False
if replica_enabled_change and replica_enabled_change[1] == 'true':
try:
# If replica_enabled_change is not None, the
# replica_type_change won't be None. See function
# determine_changes_when_retype.
lun_info = self.client.get_lun_info(lun_id)
replica_info = self.replica.create_replica(
lun_info, replica_type_change[1])
model_update.update(replica_info)
except exception.VolumeBackendAPIException:
LOG.exception('Retype volume error. '
'Create replication failed.')
return False
return (True, model_update)
def modify_lun(self, lun_id, change_opts):
if change_opts.get('partitionid'):
old, new = change_opts['partitionid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.client.remove_lun_from_partition(lun_id, old_id)
if new_id:
self.client.add_lun_to_partition(lun_id, new_id)
LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) success.",
{"lun_id": lun_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
if change_opts.get('cacheid'):
old, new = change_opts['cacheid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.client.remove_lun_from_cache(lun_id, old_id)
if new_id:
self.client.add_lun_to_cache(lun_id, new_id)
LOG.info("Retype LUN(id: %(lun_id)s) smartcache from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) successfully.",
{'lun_id': lun_id,
'old_id': old_id, "old_name": old_name,
'new_id': new_id, "new_name": new_name})
if change_opts.get('policy'):
old_policy, new_policy = change_opts['policy']
self.client.change_lun_smarttier(lun_id, new_policy)
LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from "
"%(old_policy)s to %(new_policy)s success.",
{'lun_id': lun_id,
'old_policy': old_policy,
'new_policy': new_policy})
if change_opts.get('qos'):
old_qos, new_qos = change_opts['qos']
old_qos_id = old_qos[0]
old_qos_value = old_qos[1]
if old_qos_id:
smart_qos = smartx.SmartQos(self.client)
smart_qos.remove(old_qos_id, lun_id)
if new_qos:
smart_qos = smartx.SmartQos(self.client)
smart_qos.add(new_qos, lun_id)
LOG.info("Retype LUN(id: %(lun_id)s) smartqos from "
"%(old_qos_value)s to %(new_qos)s success.",
{'lun_id': lun_id,
'old_qos_value': old_qos_value,
'new_qos': new_qos})
def get_lun_specs(self, lun_id):
lun_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'LUNType': None,
}
lun_info = self.client.get_lun_info(lun_id)
lun_opts['LUNType'] = int(lun_info['ALLOCTYPE'])
if lun_info.get('DATATRANSFERPOLICY'):
lun_opts['policy'] = lun_info['DATATRANSFERPOLICY']
if lun_info.get('SMARTCACHEPARTITIONID'):
lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID']
if lun_info.get('CACHEPARTITIONID'):
lun_opts['partitionid'] = lun_info['CACHEPARTITIONID']
return lun_opts
def _check_capability_support(self, new_opts, new_type):
new_cache_name = new_opts['cachename']
if new_cache_name:
if not self.support_func.get('smartcache'):
msg = (_(
"Can't support cache on the array, cache name is: "
"%(name)s.") % {'name': new_cache_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_partition_name = new_opts['partitionname']
if new_partition_name:
if not self.support_func.get('smartpartition'):
msg = (_(
"Can't support partition on the array, partition name is: "
"%(name)s.") % {'name': new_partition_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if new_opts['policy']:
if (not self.support_func.get('smarttier')
and new_opts['policy'] != '0'):
msg = (_("Can't support tier on the array."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
if not self.support_func.get('QoS_support'):
if new_qos:
msg = (_("Can't support qos on the array."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _check_needed_changes(self, lun_id, old_opts, new_opts,
change_opts, new_type):
new_cache_id = None
new_cache_name = new_opts['cachename']
if new_cache_name:
if self.support_func.get('smartcache'):
new_cache_id = self.client.get_cache_id_by_name(
new_cache_name)
if new_cache_id is None:
msg = (_(
"Can't find cache name on the array, cache name is: "
"%(name)s.") % {'name': new_cache_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_partition_id = None
new_partition_name = new_opts['partitionname']
if new_partition_name:
if self.support_func.get('smartpartition'):
new_partition_id = self.client.get_partition_id_by_name(
new_partition_name)
if new_partition_id is None:
msg = (_(
"Can't find partition name on the array, partition name "
"is: %(name)s.") % {'name': new_partition_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# smarttier
if old_opts['policy'] != new_opts['policy']:
if not (old_opts['policy'] == '--'
and new_opts['policy'] is None):
change_opts['policy'] = (old_opts['policy'],
new_opts['policy'])
# smartcache
old_cache_id = old_opts['cacheid']
if old_cache_id == '--':
old_cache_id = None
if old_cache_id != new_cache_id:
old_cache_name = None
if self.support_func.get('smartcache'):
if old_cache_id:
cache_info = self.client.get_cache_info_by_id(
old_cache_id)
old_cache_name = cache_info['NAME']
change_opts['cacheid'] = ([old_cache_id, old_cache_name],
[new_cache_id, new_cache_name])
# smartpartition
old_partition_id = old_opts['partitionid']
if old_partition_id == '--':
old_partition_id = None
if old_partition_id != new_partition_id:
old_partition_name = None
if self.support_func.get('smartpartition'):
if old_partition_id:
partition_info = self.client.get_partition_info_by_id(
old_partition_id)
old_partition_name = partition_info['NAME']
change_opts['partitionid'] = ([old_partition_id,
old_partition_name],
[new_partition_id,
new_partition_name])
# smartqos
new_qos = smartx.SmartQos.get_qos_by_volume_type(new_type)
if not self.support_func.get('QoS_support'):
if new_qos:
msg = (_("Can't support qos on the array."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
old_qos_id = self.client.get_qosid_by_lunid(lun_id)
old_qos = self._get_qos_specs_from_array(old_qos_id)
if old_qos != new_qos:
change_opts['qos'] = ([old_qos_id, old_qos], new_qos)
return change_opts
def determine_changes_when_retype(self, volume, new_type, host):
migration = False
change_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'qos': None,
'host': None,
'LUNType': None,
'replication_enabled': None,
'replication_type': None,
}
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(self.client, volume)
old_opts = self.get_lun_specs(lun_id)
new_specs = new_type['extra_specs']
new_opts = self._get_volume_params_from_specs(new_specs)
if 'LUNType' not in new_opts:
new_opts['LUNType'] = self.configuration.lun_type
if volume.host != host['host']:
migration = True
change_opts['host'] = (volume.host, host['host'])
if old_opts['LUNType'] != new_opts['LUNType']:
migration = True
change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType'])
volume_type = self._get_volume_type(volume)
volume_opts = self._get_volume_params(volume_type)
if (volume_opts['replication_enabled'] == 'true'
or new_opts['replication_enabled'] == 'true'):
# If replication_enabled changes,
# then replication_type in change_opts will be set.
change_opts['replication_enabled'] = (
volume_opts['replication_enabled'],
new_opts['replication_enabled'])
change_opts['replication_type'] = (volume_opts['replication_type'],
new_opts['replication_type'])
change_opts = self._check_needed_changes(lun_id, old_opts, new_opts,
change_opts, new_type)
LOG.debug("Determine changes when retype. Migration: "
"%(migration)s, change_opts: %(change_opts)s.",
{'migration': migration, 'change_opts': change_opts})
return migration, change_opts, lun_id
def _get_qos_specs_from_array(self, qos_id):
qos = {}
qos_info = {}
if qos_id:
qos_info = self.client.get_qos_info(qos_id)
for key, value in qos_info.items():
key = key.upper()
if key in constants.QOS_KEYS:
if key == 'LATENCY' and value == '0':
continue
else:
qos[key] = value
return qos
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def create_export_snapshot(self, context, snapshot, connector):
"""Export a snapshot."""
pass
def remove_export_snapshot(self, context, snapshot):
"""Remove an export for a snapshot."""
pass
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
metadata = huawei_utils.get_volume_metadata(volume)
copyspeed = metadata.get('copyspeed')
luncopy_id = self.client.create_luncopy(copy_name,
src_lun,
tgt_lun,
copyspeed)
wait_interval = self.configuration.lun_copy_wait_interval
try:
self.client.start_luncopy(luncopy_id)
def _luncopy_complete():
luncopy_info = self.client.get_luncopy_info(luncopy_id)
if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY:
# luncopy_info['status'] means for the running status of
# the luncopy. If luncopy_info['status'] is equal to '40',
# this luncopy is completely ready.
return True
elif luncopy_info['state'] != constants.STATUS_HEALTH:
# luncopy_info['state'] means for the healthy status of the
# luncopy. If luncopy_info['state'] is not equal to '1',
# this means that an error occurred during the LUNcopy
# operation and we should abort it.
err_msg = (_(
'An error occurred during the LUNcopy operation. '
'LUNcopy name: %(luncopyname)s. '
'LUNcopy status: %(luncopystatus)s. '
'LUNcopy state: %(luncopystate)s.')
% {'luncopyname': luncopy_id,
'luncopystatus': luncopy_info['status'],
'luncopystate': luncopy_info['state']},)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
huawei_utils.wait_for_condition(_luncopy_complete,
wait_interval,
self.configuration.lun_timeout)
except Exception:
with excutils.save_and_reraise_exception():
self.client.delete_luncopy(luncopy_id)
self.delete_volume(volume)
self.client.delete_luncopy(luncopy_id)
def _check_lun_valid_for_manage(self, lun_info, external_ref):
lun_id = lun_info.get('ID')
lun_name = lun_info.get('NAME')
# Check whether the LUN is already in LUN group.
if lun_info.get('ISADD2LUNGROUP') == 'true':
msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN "
"group.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN is Normal.
if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH:
msg = _("Can't import LUN %s to Cinder. LUN status is not "
"normal.") % lun_id
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a HyperMetroPair.
if self.support_func.get('hypermetro'):
try:
hypermetro_pairs = self.client.get_hypermetro_pairs()
except exception.VolumeBackendAPIException:
hypermetro_pairs = []
LOG.debug("Can't get hypermetro info, pass the check.")
for pair in hypermetro_pairs:
if pair.get('LOCALOBJID') == lun_id:
msg = (_("Can't import LUN %s to Cinder. Already exists "
"in a HyperMetroPair.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a SplitMirror.
if self.support_func.get('splitmirror'):
try:
split_mirrors = self.client.get_split_mirrors()
except exception.VolumeBackendAPIException as ex:
if re.search('License is unavailable', ex.msg):
# Can't check whether the LUN has SplitMirror with it,
# just pass the check and log it.
split_mirrors = []
LOG.warning('No license for SplitMirror.')
else:
msg = _("Failed to get SplitMirror.")
raise exception.VolumeBackendAPIException(data=msg)
for mirror in split_mirrors:
try:
target_luns = self.client.get_target_luns(mirror.get('ID'))
except exception.VolumeBackendAPIException:
msg = _("Failed to get target LUN of SplitMirror.")
raise exception.VolumeBackendAPIException(data=msg)
if ((mirror.get('PRILUNID') == lun_id)
or (lun_id in target_luns)):
msg = (_("Can't import LUN %s to Cinder. Already exists "
"in a SplitMirror.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a migration task.
try:
migration_tasks = self.client.get_migration_task()
except exception.VolumeBackendAPIException as ex:
if re.search('License is unavailable', ex.msg):
# Can't check whether the LUN has migration task with it,
# just pass the check and log it.
migration_tasks = []
LOG.warning('No license for migration.')
else:
msg = _("Failed to get migration task.")
raise exception.VolumeBackendAPIException(data=msg)
for migration in migration_tasks:
if lun_id in (migration.get('PARENTID'),
migration.get('TARGETLUNID')):
msg = (_("Can't import LUN %s to Cinder. Already exists in a "
"migration task.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a LUN copy task.
if self.support_func.get('luncopy'):
lun_copy = lun_info.get('LUNCOPYIDS')
if lun_copy and lun_copy[1:-1]:
msg = (_("Can't import LUN %s to Cinder. Already exists in "
"a LUN copy task.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a remote replication task.
rmt_replication = lun_info.get('REMOTEREPLICATIONIDS')
if rmt_replication and rmt_replication[1:-1]:
msg = (_("Can't import LUN %s to Cinder. Already exists in "
"a remote replication task.") % lun_id)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check whether the LUN exists in a LUN mirror.
if self.client.is_lun_in_mirror(lun_name):
msg = (_("Can't import LUN %s to Cinder. Already exists in "
"a LUN mirror.") % lun_name)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
def manage_existing(self, volume, external_ref):
"""Manage an existing volume on the backend storage."""
# Check whether the LUN is belonged to the specified pool.
pool = volume_utils.extract_host(volume.host, 'pool')
LOG.debug("Pool specified is: %s.", pool)
lun_info = self._get_lun_info_by_ref(external_ref)
lun_id = lun_info.get('ID')
description = lun_info.get('DESCRIPTION', '')
if len(description) <= (
constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1):
description = volume.name + ' ' + description
lun_pool = lun_info.get('PARENTNAME')
LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.",
{"lun": lun_id, "pool": lun_pool})
if pool != lun_pool:
msg = (_("The specified LUN does not belong to the given "
"pool: %s.") % pool)
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
# Check other stuffs to determine whether this LUN can be imported.
self._check_lun_valid_for_manage(lun_info, external_ref)
type_id = volume.volume_type_id
new_opts = None
if type_id:
# Handle volume type if specified.
old_opts = self.get_lun_specs(lun_id)
volume_type = volume_types.get_volume_type(None, type_id)
new_specs = volume_type.get('extra_specs')
new_opts = self._get_volume_params_from_specs(new_specs)
if ('LUNType' in new_opts and
old_opts['LUNType'] != new_opts['LUNType']):
msg = (_("Can't import LUN %(lun_id)s to Cinder. "
"LUN type mismatched.") % lun_id)
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
if volume_type:
self._check_capability_support(new_opts, volume_type)
change_opts = {'policy': None, 'partitionid': None,
'cacheid': None, 'qos': None}
change_opts = self._check_needed_changes(lun_id, old_opts,
new_opts, change_opts,
volume_type)
self.modify_lun(lun_id, change_opts)
# Rename the LUN to make it manageable for Cinder.
new_name = huawei_utils.encode_name(volume.id)
LOG.debug("Rename LUN %(old_name)s to %(new_name)s.",
{'old_name': lun_info.get('NAME'),
'new_name': new_name})
self.client.rename_lun(lun_id, new_name, description)
location = huawei_utils.to_string(huawei_lun_id=lun_id,
huawei_lun_wwn=lun_info['WWN'])
model_update = {'provider_location': location}
if new_opts and new_opts.get('replication_enabled'):
LOG.debug("Manage volume need to create replication.")
try:
lun_info = self.client.get_lun_info(lun_id)
replica_info = self.replica.create_replica(
lun_info, new_opts.get('replication_type'))
model_update.update(replica_info)
except exception.VolumeBackendAPIException:
with excutils.save_and_reraise_exception():
LOG.exception("Manage exist volume failed.")
return model_update
def _get_lun_info_by_ref(self, external_ref):
LOG.debug("Get external_ref: %s", external_ref)
name = external_ref.get('source-name')
id = external_ref.get('source-id')
if not (name or id):
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
lun_id = id or self.client.get_lun_id_by_name(name)
if not lun_id:
msg = _("Can't find LUN on the array, please check the "
"source-name or source-id.")
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
lun_info = self.client.get_lun_info(lun_id)
return lun_info
def unmanage(self, volume):
"""Export Huawei volume from Cinder."""
LOG.debug("Unmanage volume: %s.", volume.id)
def manage_existing_get_size(self, volume, external_ref):
"""Get the size of the existing volume."""
lun_info = self._get_lun_info_by_ref(external_ref)
size = int(math.ceil(lun_info.get('CAPACITY') /
constants.CAPACITY_UNIT))
return size
def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref):
snapshot_id = snapshot_info.get('ID')
# Check whether the snapshot is normal.
if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH:
msg = _("Can't import snapshot %s to Cinder. "
"Snapshot status is not normal"
" or running status is not online.") % snapshot_id
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false':
msg = _("Can't import snapshot %s to Cinder. "
"Snapshot is exposed to initiator.") % snapshot_id
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
def _get_snapshot_info_by_ref(self, external_ref):
LOG.debug("Get snapshot external_ref: %s.", external_ref)
name = external_ref.get('source-name')
id = external_ref.get('source-id')
if not (name or id):
msg = _('Must specify snapshot source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
snapshot_id = id or self.client.get_snapshot_id_by_name(name)
if not snapshot_id:
msg = _("Can't find snapshot on array, please check the "
"source-name or source-id.")
raise exception.ManageExistingInvalidReference(
existing_ref=external_ref, reason=msg)
snapshot_info = self.client.get_snapshot_info(snapshot_id)
return snapshot_info
def manage_existing_snapshot(self, snapshot, existing_ref):
snapshot_info = self._get_snapshot_info_by_ref(existing_ref)
snapshot_id = snapshot_info.get('ID')
parent_lun_id, lun_wwn = huawei_utils.get_volume_lun_id(
self.client, snapshot.volume)
if parent_lun_id != snapshot_info.get('PARENTID'):
msg = (_("Can't import snapshot %s to Cinder. "
"Snapshot doesn't belong to volume."), snapshot_id)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Check whether this snapshot can be imported.
self._check_snapshot_valid_for_manage(snapshot_info, existing_ref)
# Rename the snapshot to make it manageable for Cinder.
description = snapshot.id
snapshot_name = huawei_utils.encode_name(snapshot.id)
self.client.rename_snapshot(snapshot_id, snapshot_name, description)
if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE:
self.client.activate_snapshot(snapshot_id)
LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.",
{'old_name': snapshot_info.get('NAME'),
'new_name': snapshot_name})
location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id)
return {'provider_location': location}
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Get the size of the existing snapshot."""
snapshot_info = self._get_snapshot_info_by_ref(existing_ref)
size = int(math.ceil(snapshot_info.get('USERCAPACITY') /
constants.CAPACITY_UNIT))
return size
def unmanage_snapshot(self, snapshot):
"""Unmanage the specified snapshot from Cinder management."""
LOG.debug("Unmanage snapshot: %s.", snapshot.id)
def remove_host_with_check(self, host_id):
wwns_in_host = (
self.client.get_host_fc_initiators(host_id))
iqns_in_host = (
self.client.get_host_iscsi_initiators(host_id))
if not (wwns_in_host or iqns_in_host or
self.client.is_host_associated_to_hostgroup(host_id)):
self.client.remove_host(host_id)
@huawei_utils.check_whether_operate_consistency_group
def create_group(self, context, group):
"""Creates a group."""
model_update = {'status': fields.GroupStatus.AVAILABLE}
opts = self._get_group_type(group)
if self._check_volume_type_support(opts, 'hypermetro'):
metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
metro.create_consistencygroup(group)
return model_update
# Array will create group at create_group_snapshot time. Cinder will
# maintain the group and volumes relationship in the db.
return model_update
@huawei_utils.check_whether_operate_consistency_group
def delete_group(self, context, group, volumes):
opts = self._get_group_type(group)
if self._check_volume_type_support(opts, 'hypermetro'):
metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
return metro.delete_consistencygroup(context, group, volumes)
model_update = {}
volumes_model_update = []
model_update.update({'status': fields.GroupStatus.DELETED})
for volume_ref in volumes:
try:
self.delete_volume(volume_ref)
volumes_model_update.append(
{'id': volume_ref.id, 'status': 'deleted'})
except Exception:
volumes_model_update.append(
{'id': volume_ref.id, 'status': 'error_deleting'})
return model_update, volumes_model_update
@huawei_utils.check_whether_operate_consistency_group
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
model_update = {'status': fields.GroupStatus.AVAILABLE}
opts = self._get_group_type(group)
if self._check_volume_type_support(opts, 'hypermetro'):
metro = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
metro.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
return model_update, None, None
# Array will create group at create_group_snapshot time. Cinder will
# maintain the group and volumes relationship in the db.
return model_update, None, None
@huawei_utils.check_whether_operate_consistency_group
def create_group_from_src(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
err_msg = _("Huawei Storage doesn't support create_group_from_src.")
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
@huawei_utils.check_whether_operate_consistency_group
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Create group snapshot."""
LOG.info('Create group snapshot for group'
': %(group_id)s', {'group_id': group_snapshot.group_id})
model_update = {}
snapshots_model_update = []
added_snapshots_info = []
try:
for snapshot in snapshots:
volume = snapshot.volume
if not volume:
msg = _("Can't get volume id from snapshot, "
"snapshot: %(id)s") % {'id': snapshot.id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
lun_id, lun_wwn = huawei_utils.get_volume_lun_id(
self.client, volume)
snapshot_name = huawei_utils.encode_name(snapshot.id)
snapshot_description = snapshot.id
info = self.client.create_snapshot(lun_id,
snapshot_name,
snapshot_description)
location = huawei_utils.to_string(
huawei_snapshot_id=info['ID'])
snap_model_update = {'id': snapshot.id,
'status': fields.SnapshotStatus.AVAILABLE,
'provider_location': location}
snapshots_model_update.append(snap_model_update)
added_snapshots_info.append(info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Create group snapshots failed. "
"Group snapshot id: %s.", group_snapshot.id)
snapshot_ids = [added_snapshot['ID']
for added_snapshot in added_snapshots_info]
try:
self.client.activate_snapshot(snapshot_ids)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Active group snapshots failed. "
"Group snapshot id: %s.", group_snapshot.id)
model_update['status'] = fields.GroupSnapshotStatus.AVAILABLE
return model_update, snapshots_model_update
@huawei_utils.check_whether_operate_consistency_group
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Delete group snapshot."""
LOG.info('Delete group snapshot %(snap_id)s for group: '
'%(group_id)s',
{'snap_id': group_snapshot.id,
'group_id': group_snapshot.group_id})
model_update = {}
snapshots_model_update = []
model_update['status'] = fields.GroupSnapshotStatus.DELETED
for snapshot in snapshots:
try:
self.delete_snapshot(snapshot)
snapshot_model = {'id': snapshot.id,
'status': fields.SnapshotStatus.DELETED}
snapshots_model_update.append(snapshot_model)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Delete group snapshot failed. "
"Group snapshot id: %s", group_snapshot.id)
return model_update, snapshots_model_update
def _classify_volume(self, volumes):
normal_volumes = []
replica_volumes = []
for v in volumes:
volume_type = self._get_volume_type(v)
opts = self._get_volume_params(volume_type)
if opts.get('replication_enabled') == 'true':
replica_volumes.append(v)
else:
normal_volumes.append(v)
return normal_volumes, replica_volumes
def _failback_normal_volumes(self, volumes):
volumes_update = []
for v in volumes:
v_update = {}
v_update['volume_id'] = v.id
metadata = huawei_utils.get_volume_metadata(v)
old_status = 'available'
if 'old_status' in metadata:
old_status = metadata.pop('old_status')
v_update['updates'] = {'status': old_status,
'metadata': metadata}
volumes_update.append(v_update)
return volumes_update
def _failback(self, volumes):
if self.active_backend_id in ('', None):
return 'default', []
normal_volumes, replica_volumes = self._classify_volume(volumes)
volumes_update = []
replica_volumes_update = self.replica.failback(replica_volumes)
volumes_update.extend(replica_volumes_update)
normal_volumes_update = self._failback_normal_volumes(normal_volumes)
volumes_update.extend(normal_volumes_update)
self.active_backend_id = ""
secondary_id = 'default'
# Switch array connection.
self.client, self.replica_client = self.replica_client, self.client
self.replica = replication.ReplicaPairManager(self.client,
self.replica_client,
self.configuration)
return secondary_id, volumes_update
def _failover_normal_volumes(self, volumes):
volumes_update = []
for v in volumes:
v_update = {}
v_update['volume_id'] = v.id
metadata = huawei_utils.get_volume_metadata(v)
metadata.update({'old_status': v.status})
v_update['updates'] = {'status': 'error',
'metadata': metadata}
volumes_update.append(v_update)
return volumes_update
def _failover(self, volumes):
if self.active_backend_id not in ('', None):
return self.replica_dev_conf['backend_id'], []
normal_volumes, replica_volumes = self._classify_volume(volumes)
volumes_update = []
replica_volumes_update = self.replica.failover(replica_volumes)
volumes_update.extend(replica_volumes_update)
normal_volumes_update = self._failover_normal_volumes(normal_volumes)
volumes_update.extend(normal_volumes_update)
self.active_backend_id = self.replica_dev_conf['backend_id']
secondary_id = self.active_backend_id
# Switch array connection.
self.client, self.replica_client = self.replica_client, self.client
self.replica = replication.ReplicaPairManager(self.client,
self.replica_client,
self.configuration)
return secondary_id, volumes_update
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover all volumes to secondary."""
if secondary_id == 'default':
secondary_id, volumes_update = self._failback(volumes)
elif (secondary_id == self.replica_dev_conf['backend_id']
or secondary_id is None):
secondary_id, volumes_update = self._failover(volumes)
else:
msg = _("Invalid secondary id %s.") % secondary_id
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return secondary_id, volumes_update, []
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
"""Map a snapshot to a host and return target iSCSI information."""
# From the volume structure.
volume = Volume(id=snapshot.id,
provider_location=snapshot.provider_location,
lun_type=constants.SNAPSHOT_TYPE,
metadata=None)
return self.initialize_connection(volume, connector)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Delete map between a snapshot and a host."""
# From the volume structure.
volume = Volume(id=snapshot.id,
provider_location=snapshot.provider_location,
lun_type=constants.SNAPSHOT_TYPE,
metadata=None)
return self.terminate_connection(volume, connector)
def get_lun_id_and_type(self, volume):
if hasattr(volume, 'lun_type'):
metadata = huawei_utils.get_snapshot_metadata(volume)
lun_id = metadata['huawei_snapshot_id']
lun_type = constants.SNAPSHOT_TYPE
else:
lun_id = self._check_volume_exist_on_array(
volume, constants.VOLUME_NOT_EXISTS_RAISE)
lun_type = constants.LUN_TYPE
return lun_id, lun_type
@interface.volumedriver
class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
"""ISCSI driver for Huawei storage arrays.
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor storage 18000 driver
1.1.1 - Code refactor
CHAP support
Multiple pools support
ISCSI multipath support
SmartX support
Volume migration support
Volume retype support
2.0.0 - Rename to HuaweiISCSIDriver
2.0.1 - Manage/unmanage volume support
2.0.2 - Refactor HuaweiISCSIDriver
2.0.3 - Manage/unmanage snapshot support
2.0.5 - Replication V2 support
2.0.6 - Support iSCSI configuration in Replication
2.0.7 - Hypermetro support
Hypermetro consistency group support
Consistency group support
Cgsnapshot support
2.0.8 - Backup snapshot optimal path support
2.0.9 - Support reporting disk type of pool
"""
VERSION = "2.0.9"
def __init__(self, *args, **kwargs):
super(HuaweiISCSIDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@coordination.synchronized('huawei-mapping-{connector[host]}')
def initialize_connection(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
lun_id, lun_type = self.get_lun_id_and_type(volume)
initiator_name = connector['initiator']
LOG.info(
'initiator name: %(initiator_name)s, '
'LUN ID: %(lun_id)s.',
{'initiator_name': initiator_name,
'lun_id': lun_id})
(iscsi_iqns,
target_ips,
portgroup_id) = self.client.get_iscsi_params(connector)
LOG.info('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s, '
'portgroup_id: %(portgroup_id)s.',
{'iscsi_iqn': iscsi_iqns,
'target_ip': target_ips,
'portgroup_id': portgroup_id},)
# Create hostgroup if not exist.
host_id = self.client.add_host_with_check(connector['host'])
# Add initiator to the host.
self.client.ensure_initiator_added(initiator_name,
host_id)
hostgroup_id = self.client.add_host_to_hostgroup(host_id)
# Mapping lungroup and hostgroup to view.
self.client.do_mapping(lun_id, hostgroup_id,
host_id, portgroup_id,
lun_type)
hostlun_id = self.client.get_host_lun_id(host_id, lun_id,
lun_type)
LOG.info("initialize_connection, host lun id is: %s.",
hostlun_id)
chapinfo = self.client.find_chap_info(self.client.iscsi_info,
initiator_name)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume.id
multipath = connector.get('multipath', False)
hostlun_id = int(hostlun_id)
if not multipath:
properties['target_portal'] = ('%s:3260' % target_ips[0])
properties['target_iqn'] = iscsi_iqns[0]
properties['target_lun'] = hostlun_id
else:
properties['target_iqns'] = [iqn for iqn in iscsi_iqns]
properties['target_portals'] = [
'%s:3260' % ip for ip in target_ips]
properties['target_luns'] = [hostlun_id] * len(target_ips)
# If use CHAP, return CHAP info.
if chapinfo:
chap_username, chap_password = chapinfo.split(';')
properties['auth_method'] = 'CHAP'
properties['auth_username'] = chap_username
properties['auth_password'] = chap_password
LOG.info("initialize_connection success. Return data: %s.",
strutils.mask_password(properties))
return {'driver_volume_type': 'iscsi', 'data': properties}
@coordination.synchronized('huawei-mapping-{connector[host]}')
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
lun_id, lun_type = self.get_lun_id_and_type(volume)
initiator_name = connector['initiator']
host_name = connector['host']
lungroup_id = None
LOG.info(
'terminate_connection: initiator name: %(ini)s, '
'LUN ID: %(lunid)s.',
{'ini': initiator_name,
'lunid': lun_id},)
portgroup = None
portgroup_id = None
view_id = None
left_lunnum = -1
for ini in self.client.iscsi_info:
if ini['Name'] == initiator_name:
for key in ini:
if key == 'TargetPortGroup':
portgroup = ini['TargetPortGroup']
break
if portgroup:
portgroup_id = self.client.get_tgt_port_group(portgroup)
host_id = huawei_utils.get_host_id(self.client, host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.client.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.client.find_lungroup_from_map(view_id)
# Remove lun from lungroup.
if lun_id and lungroup_id:
lungroup_ids = self.client.get_lungroupids_by_lunid(
lun_id, lun_type)
if lungroup_id in lungroup_ids:
self.client.remove_lun_from_lungroup(lungroup_id,
lun_id,
lun_type)
else:
LOG.warning("LUN is not in lungroup. "
"LUN ID: %(lun_id)s. "
"Lungroup id: %(lungroup_id)s.",
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
# Remove portgroup from mapping view if no lun left in lungroup.
if lungroup_id:
left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id)
if portgroup_id and view_id and (int(left_lunnum) <= 0):
if self.client.is_portgroup_associated_to_view(view_id,
portgroup_id):
self.client.delete_portgroup_mapping_view(view_id,
portgroup_id)
if view_id and (int(left_lunnum) <= 0):
self.client.remove_chap(initiator_name)
if self.client.lungroup_associated(view_id, lungroup_id):
self.client.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.client.delete_lungroup(lungroup_id)
if self.client.is_initiator_associated_to_host(initiator_name,
host_id):
self.client.remove_iscsi_from_host(initiator_name)
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.client.find_hostgroup(hostgroup_name)
if hostgroup_id:
if self.client.hostgroup_associated(view_id, hostgroup_id):
self.client.delete_hostgoup_mapping_view(view_id,
hostgroup_id)
self.client.remove_host_from_hostgroup(hostgroup_id,
host_id)
self.client.delete_hostgroup(hostgroup_id)
self.client.remove_host(host_id)
self.client.delete_mapping_view(view_id)
@interface.volumedriver
class HuaweiFCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
"""FC driver for Huawei OceanStor storage arrays.
Version history:
.. code-block:: none
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
Multiple pools support
SmartX support
Volume migration support
Volume retype support
FC zone enhancement
Volume hypermetro support
2.0.0 - Rename to HuaweiFCDriver
2.0.1 - Manage/unmanage volume support
2.0.2 - Refactor HuaweiFCDriver
2.0.3 - Manage/unmanage snapshot support
2.0.4 - Balanced FC port selection
2.0.5 - Replication V2 support
2.0.7 - Hypermetro support
Hypermetro consistency group support
Consistency group support
Cgsnapshot support
2.0.8 - Backup snapshot optimal path support
2.0.9 - Support reporting disk type of pool
"""
VERSION = "2.0.9"
def __init__(self, *args, **kwargs):
super(HuaweiFCDriver, self).__init__(*args, **kwargs)
self.fcsan = None
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@fczm_utils.add_fc_zone
@coordination.synchronized('huawei-mapping-{connector[host]}')
def initialize_connection(self, volume, connector):
lun_id, lun_type = self.get_lun_id_and_type(volume)
wwns = connector['wwpns']
LOG.info(
'initialize_connection, initiator: %(wwpns)s,'
' LUN ID: %(lun_id)s.',
{'wwpns': wwns,
'lun_id': lun_id},)
portg_id = None
host_id = self.client.add_host_with_check(connector['host'])
if not self.fcsan:
self.fcsan = fczm_utils.create_lookup_service()
if self.fcsan:
# Use FC switch.
zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client)
try:
(tgt_port_wwns, portg_id, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns, host_id, lun_id,
lun_type))
except Exception as err:
self.remove_host_with_check(host_id)
msg = _('build_ini_targ_map fails. %s') % err
raise exception.VolumeBackendAPIException(data=msg)
for ini in init_targ_map:
self.client.ensure_fc_initiator_added(ini, host_id)
else:
# Not use FC switch.
online_wwns_in_host = (
self.client.get_host_online_fc_initiators(host_id))
online_free_wwns = self.client.get_online_free_wwns()
fc_initiators_on_array = self.client.get_fc_initiator_on_array()
wwns = [i for i in wwns if i in fc_initiators_on_array]
for wwn in wwns:
if (wwn not in online_wwns_in_host
and wwn not in online_free_wwns):
wwns_in_host = (
self.client.get_host_fc_initiators(host_id))
iqns_in_host = (
self.client.get_host_iscsi_initiators(host_id))
if not (wwns_in_host or iqns_in_host or
self.client.is_host_associated_to_hostgroup(host_id)):
self.client.remove_host(host_id)
msg = _('No FC initiator can be added to host.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for wwn in wwns:
if wwn in online_free_wwns:
self.client.add_fc_port_to_host(host_id, wwn)
(tgt_port_wwns, init_targ_map) = (
self.client.get_init_targ_map(wwns))
# Add host into hostgroup.
hostgroup_id = self.client.add_host_to_hostgroup(host_id)
metadata = huawei_utils.get_lun_metadata(volume)
LOG.info("initialize_connection, metadata is: %s.", metadata)
hypermetro_lun = metadata.get('hypermetro_id') is not None
map_info = self.client.do_mapping(lun_id, hostgroup_id,
host_id, portg_id,
lun_type, hypermetro_lun)
host_lun_id = self.client.get_host_lun_id(host_id, lun_id,
lun_type)
# Return FC properties.
fc_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': int(host_lun_id),
'target_discovered': True,
'target_wwn': tgt_port_wwns,
'volume_id': volume.id,
'initiator_target_map': init_targ_map,
'map_info': map_info}, }
# Deal with hypermetro connection.
if hypermetro_lun:
loc_tgt_wwn = fc_info['data']['target_wwn']
local_ini_tgt_map = fc_info['data']['initiator_target_map']
hyperm = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
rmt_fc_info = hyperm.connect_volume_fc(volume, connector)
rmt_tgt_wwn = rmt_fc_info['data']['target_wwn']
rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map']
fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn)
wwns = connector['wwpns']
for wwn in wwns:
if (wwn in local_ini_tgt_map
and wwn in rmt_ini_tgt_map):
fc_info['data']['initiator_target_map'][wwn].extend(
rmt_ini_tgt_map[wwn])
elif (wwn not in local_ini_tgt_map
and wwn in rmt_ini_tgt_map):
fc_info['data']['initiator_target_map'][wwn] = (
rmt_ini_tgt_map[wwn])
# else, do nothing
loc_map_info = fc_info['data']['map_info']
rmt_map_info = rmt_fc_info['data']['map_info']
same_host_id = self._get_same_hostid(loc_map_info,
rmt_map_info)
self.client.change_hostlun_id(loc_map_info, same_host_id)
hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id)
fc_info['data']['target_lun'] = same_host_id
hyperm.rmt_client.logout()
LOG.info("Return FC info is: %s.", fc_info)
return fc_info
def _get_same_hostid(self, loc_fc_info, rmt_fc_info):
loc_aval_luns = loc_fc_info['aval_luns']
loc_aval_luns = json.loads(loc_aval_luns)
rmt_aval_luns = rmt_fc_info['aval_luns']
rmt_aval_luns = json.loads(rmt_aval_luns)
same_host_id = None
for i in range(1, 512):
if i in rmt_aval_luns and i in loc_aval_luns:
same_host_id = i
break
LOG.info("The same hostid is: %s.", same_host_id)
if not same_host_id:
msg = _("Can't find the same host id from arrays.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return same_host_id
@fczm_utils.remove_fc_zone
@coordination.synchronized('huawei-mapping-{connector[host]}')
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
lun_id, lun_type = self.get_lun_id_and_type(volume)
wwns = connector['wwpns']
host_name = connector['host']
left_lunnum = -1
lungroup_id = None
view_id = None
LOG.info('terminate_connection: wwpns: %(wwns)s, '
'LUN ID: %(lun_id)s.',
{'wwns': wwns, 'lun_id': lun_id})
host_id = huawei_utils.get_host_id(self.client, host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.client.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.client.find_lungroup_from_map(view_id)
if lun_id and lungroup_id:
lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id,
lun_type)
if lungroup_id in lungroup_ids:
self.client.remove_lun_from_lungroup(lungroup_id,
lun_id,
lun_type)
else:
LOG.warning("LUN is not in lungroup. "
"LUN ID: %(lun_id)s. "
"Lungroup id: %(lungroup_id)s.",
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning("Can't find lun on the array.")
if lungroup_id:
left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id)
if int(left_lunnum) > 0:
fc_info = {'driver_volume_type': 'fibre_channel',
'data': {}}
else:
fc_info, portg_id = self._delete_zone_and_remove_fc_initiators(
wwns, host_id)
if lungroup_id:
if view_id and self.client.lungroup_associated(
view_id, lungroup_id):
self.client.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.client.delete_lungroup(lungroup_id)
if portg_id:
if view_id and self.client.is_portgroup_associated_to_view(
view_id, portg_id):
self.client.delete_portgroup_mapping_view(view_id,
portg_id)
self.client.delete_portgroup(portg_id)
if host_id:
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.client.find_hostgroup(hostgroup_name)
if hostgroup_id:
if view_id and self.client.hostgroup_associated(
view_id, hostgroup_id):
self.client.delete_hostgoup_mapping_view(
view_id, hostgroup_id)
self.client.remove_host_from_hostgroup(
hostgroup_id, host_id)
self.client.delete_hostgroup(hostgroup_id)
if not self.client.check_fc_initiators_exist_in_host(
host_id):
self.client.remove_host(host_id)
if view_id:
self.client.delete_mapping_view(view_id)
# Deal with hypermetro connection.
metadata = huawei_utils.get_lun_metadata(volume)
LOG.info("Detach Volume, metadata is: %s.", metadata)
if metadata.get('hypermetro_id'):
hyperm = hypermetro.HuaweiHyperMetro(self.client,
self.rmt_client,
self.configuration)
hyperm.disconnect_volume_fc(volume, connector)
LOG.info("terminate_connection, return data is: %s.",
fc_info)
return fc_info
def _delete_zone_and_remove_fc_initiators(self, wwns, host_id):
# Get tgt_port_wwns and init_targ_map to remove zone.
portg_id = None
if not self.fcsan:
self.fcsan = fczm_utils.create_lookup_service()
if self.fcsan:
zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan,
self.client)
(tgt_port_wwns, portg_id, init_targ_map) = (
zone_helper.get_init_targ_map(wwns, host_id))
else:
(tgt_port_wwns, init_targ_map) = (
self.client.get_init_targ_map(wwns))
# Remove the initiators from host if need.
if host_id:
fc_initiators = self.client.get_host_fc_initiators(host_id)
for wwn in wwns:
if wwn in fc_initiators:
self.client.remove_fc_from_host(wwn)
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': tgt_port_wwns,
'initiator_target_map': init_targ_map}}
return info, portg_id
| {
"content_hash": "d28999c57ba0e12e41c801d3ec38162b",
"timestamp": "",
"source": "github",
"line_count": 2360,
"max_line_length": 79,
"avg_line_length": 41.970338983050844,
"alnum_prop": 0.5421201413427562,
"repo_name": "j-griffith/cinder",
"id": "1638fe460bfdd7a886d6021542b8649c865e4888",
"size": "99699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/huawei/huawei_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20155959"
},
{
"name": "Shell",
"bytes": "16354"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import mraa
import pickle
import pywapi
import subprocess
import threading
import time
import unirest
import calendar_api
# initialize Buttons
stop_button = mraa.Gpio(4)
stop_button.dir(mraa.DIR_IN)
snooze_button = mraa.Gpio(3)
snooze_button.dir(mraa.DIR_IN)
# initialize global variables
snooze_time = 0
def generate_mp3(text, filename):
response = unirest.post(
"https://voicerss-text-to-speech.p.mashape.com/?key=bf473308587d4902b44e871e2ff6ce48",
headers={
"X-Mashape-Key": "qBldwVHX3RmshYE6QarGAsRwoMBop1uOPOHjsncd1fF8YKh3b6",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"c": "mp3",
"f": "8khz_8bit_mono",
"hl": "pt-br",
"r": 0,
"src": text
}
)
with open(filename, 'wb') as output:
pickle.dump(response, output, pickle.HIGHEST_PROTOCOL)
def good_morning():
greeting = u'Bom dia, agora são %s!' % time.strftime("%H:%M", time.localtime())
weather_com = pywapi.get_weather_from_weather_com('BRXX0222','metric')
conditions = weather_com['current_conditions']
weather = u' O tempo hoje está %s e faz %s graus lá fora!' % (conditions['text'],
conditions['temperature'])
forecasts = weather_com['forecasts']
weather += u' A máxima para hoje é de %s graus e a mínima é de %s!' % (forecasts[0]['high'],
forecasts[0]['low'])
weather += u' A chance de chuva é de %s %!' % forecasts[0]['day']['chance_precip']
calendar = u'Seus eventos para hoje são: %s' % str(calendar_api.get_events())
# saving and reproducing
report = greeting + weather + calendar
generate_mp3(report, "report.mp3")
subprocess.Popen(['gst-launch-1.0', 'filesrc', 'location=/home/root/clock/report.mp3',
'!', 'mad', '!', 'pulsesink'])
def wake_up():
stop_button_pressed = False
snooze_button_pressed = False
# play the alarm ringtone while the stop button is not pressed
while stop_button_pressed == False:
subprocess.Popen(['gst-launch-1.0', 'filesrc', 'location=/home/root/clock/wake.mp3',
'!', 'mad', '!', 'pulsesink'])
print("wake up playing")
stop_button_pressed = stop_button.read()
snooze_button_pressed = snooze_button.read()
if snooze_button_pressed == True:
print("snooze button pressed")
stop_button_pressed = snooze()
if stop_button_pressed == True:
print("stop button pressed")
good_morning()
def snooze():
stop_button_pressed = stop_button.read()
# set 1 minute snooze time
snooze_time = datetime.utcnow() + timedelta(minutes = 1)
print("snooze time: " + str(snooze_time))
# checks if you want to end the snooze and play the report
while stop_button_pressed == False:
if (datetime.utcnow().hour == snooze_time.hour and
datetime.utcnow().minute == snooze_time.minute and
datetime.utcnow().second == snooze_time.second):
return False
stop_button_pressed = stop_button.read()
return True
if __name__ == "__main__":
while True:
# get today events and set the alarm time
calendar_api.get_events()
alarm_time = calendar_api.alarm_time
# print alarm_time and now time to debug
print("alarm time: " + str(alarm_time))
print("now: " + str(datetime.utcnow()))
# checks if it's time to wake up
if (datetime.utcnow().hour == alarm_time.hour and
datetime.utcnow().minute == alarm_time.minute):
# checks if it's 10 secs before or after, since it takes some time to execute the code
# so it can miss the exactly second
if (datetime.utcnow().second >= alarm_time.second - 10 and
datetime.utcnow().second <= alarm_time.second + 10):
wake_up()
| {
"content_hash": "0cd6b9f69153d996ef32d925204534d3",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 93,
"avg_line_length": 30.51260504201681,
"alnum_prop": 0.6700633434315616,
"repo_name": "julianaklulo/clockwise",
"id": "4590efb956e57a88d9b41c6d499c485042fbb7b9",
"size": "3657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intelligent_clock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6310"
}
],
"symlink_target": ""
} |
from typing import Iterable
class Index:
def __init__(self, item):
try:
self.length = len(item)
except TypeError:
self.length = int(item)
def index(self, item: int, error=None) -> int:
if item < 0:
result = self.length + int(item)
else:
result = int(item)
if error and not (0 <= result < self.length):
raise error('index out of range')
return result
def slicing(self, *args) -> Iterable[int]:
if isinstance(args[0], slice):
s = args[0]
else:
s = slice(*args)
return range(*s.indices(self.length))
| {
"content_hash": "5269c9bd9b93b41c98cfe5eb0ab1ded2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 53,
"avg_line_length": 26.76,
"alnum_prop": 0.5171898355754858,
"repo_name": "mozman/ezdxf",
"id": "76d36f0243c00a8a8c6b849f36175aafd4f1982a",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ezdxf/tools/indexing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
print 'Hello, World!'
| {
"content_hash": "139a7c5835bd83f7c2bf4bbad5161ee3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.6818181818181818,
"repo_name": "dheerajgoudb/Hackerrank",
"id": "36fa3f056aa9a4d0ae33fcd856c0d5d9a43ee33c",
"size": "22",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Introduction/Hello,World.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10414"
}
],
"symlink_target": ""
} |
import os
import sys
import csv
import shutil
import tempfile
import traceback
import contextlib
import multiprocessing
import cPickle as pickle
import simplejson as json
from google.protobuf.descriptor import FieldDescriptor
from distributions.io.stream import open_compressed
from distributions.io.stream import json_load
from distributions.io.stream import protobuf_stream_load
import loom.schema_pb2
import parsable
parsable = parsable.Parsable()
THREADS = int(os.environ.get('LOOM_THREADS', multiprocessing.cpu_count()))
VERBOSITY = int(os.environ.get('LOOM_VERBOSITY', 1))
class LoomError(Exception):
pass
class KnownBug(LoomError):
pass
def fixme(name, message):
message = 'FIXME({}) {}'.format(name, message)
if 'nose' in sys.modules:
import nose
return nose.SkipTest(message)
else:
return KnownBug(message)
def LOG(message):
if VERBOSITY:
sys.stdout.write('{}\n'.format(message))
sys.stdout.flush()
@contextlib.contextmanager
def chdir(wd):
oldwd = os.getcwd()
try:
os.chdir(wd)
yield wd
finally:
os.chdir(oldwd)
@contextlib.contextmanager
def tempdir(cleanup_on_error=True):
oldwd = os.getcwd()
wd = tempfile.mkdtemp()
try:
os.chdir(wd)
yield wd
cleanup_on_error = True
finally:
os.chdir(oldwd)
if cleanup_on_error:
shutil.rmtree(wd)
@contextlib.contextmanager
def temp_copy(infile):
infile = os.path.abspath(infile)
dirname, basename = os.path.split(infile)
outfile = os.path.join(dirname, 'temp.{}'.format(basename))
try:
yield outfile
os.rename(outfile, infile)
finally:
rm_rf(outfile)
def mkdir_p(dirname):
'like mkdir -p'
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if not os.path.exists(dirname):
raise e
def rm_rf(path):
'like rm -rf'
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def cp_ns(source, destin):
'like cp -ns, link destin to source if destin does not exist'
if not os.path.exists(destin):
assert os.path.exists(source), source
dirname = os.path.dirname(destin)
if dirname:
mkdir_p(dirname)
try:
os.symlink(source, destin)
except OSError as e:
if not os.path.exists(destin):
raise e
def print_trace((fun, arg)):
try:
return fun(arg)
except Exception, e:
print e
traceback.print_exc()
raise
def parallel_map(fun, args):
if not isinstance(args, list):
args = list(args)
is_daemon = multiprocessing.current_process().daemon
if THREADS == 1 or len(args) < 2 or is_daemon:
print 'Running {} in this thread'.format(fun.__name__)
return map(fun, args)
else:
print 'Running {} in {:d} threads'.format(fun.__name__, THREADS)
pool = multiprocessing.Pool(THREADS)
fun_args = [(fun, arg) for arg in args]
return pool.map(print_trace, fun_args, chunksize=1)
@contextlib.contextmanager
def csv_reader(filename):
with open_compressed(filename, 'rb') as f:
yield csv.reader(f)
@contextlib.contextmanager
def csv_writer(filename):
with open_compressed(filename, 'wb') as f:
yield csv.writer(f)
def pickle_dump(data, filename):
with open_compressed(filename, 'wb') as f:
pickle.dump(data, f)
def pickle_load(filename):
with open_compressed(filename, 'rb') as f:
return pickle.load(f)
def protobuf_to_dict(message):
assert message.IsInitialized()
raw = {}
for field in message.DESCRIPTOR.fields:
value = getattr(message, field.name)
if field.label == FieldDescriptor.LABEL_REPEATED:
if field.type == FieldDescriptor.TYPE_MESSAGE:
value = map(protobuf_to_dict, value)
else:
value = list(value)
if len(value) == 0:
value = None
else:
if field.type == FieldDescriptor.TYPE_MESSAGE:
if value.IsInitialized():
value = protobuf_to_dict(value)
else:
value = None
if value is not None:
raw[field.name] = value
return raw
def dict_to_protobuf(raw, message):
assert isinstance(raw, dict)
for key, raw_value in raw.iteritems():
if isinstance(raw_value, dict):
value = getattr(message, key)
dict_to_protobuf(raw_value, value)
elif isinstance(raw_value, list):
value = getattr(message, key)
list_to_protobuf(raw_value, value)
else:
setattr(message, key, raw_value)
def list_to_protobuf(raw, message):
assert isinstance(raw, list)
if raw:
if isinstance(raw[0], dict):
for value in raw:
dict_to_protobuf(value, message.add())
elif isinstance(raw[0], list):
for value in raw:
list_to_protobuf(value, message.add())
else:
message[:] = raw
GUESS_MESSAGE_TYPE = {
'rows': 'Row',
'diffs': 'Row',
'shuffled': 'Row',
'tares': 'ProductValue',
'schema': 'ProductValue',
'assign': 'Assignment',
'model': 'CrossCat',
'init': 'CrossCat',
'mixture': 'ProductModel.Group',
'config': 'Config',
'checkpoint': 'Checkpoint',
'log': 'LogMessage',
'infer_log': 'LogMessage',
'query_log': 'LogMessage',
'requests': 'Query.Request',
'responses': 'Query.Response',
}
def get_message(filename, message_type='guess'):
if message_type == 'guess':
prefix = os.path.basename(filename).split('.')[0]
try:
message_type = GUESS_MESSAGE_TYPE[prefix]
except KeyError:
raise LoomError(
'Cannot guess message type for {}'.format(filename))
Message = loom.schema_pb2
for attr in message_type.split('.'):
Message = getattr(Message, attr)
return Message()
@parsable.command
def pretty_print(filename, message_type='guess'):
'''
Print text/json/protobuf messages from a raw/gz/bz2 file.
'''
parts = os.path.basename(filename).split('.')
if parts[-1] in ['gz', 'bz2']:
parts.pop()
protocol = parts[-1]
if protocol == 'json':
data = json_load(filename)
print json.dumps(data, sort_keys=True, indent=4)
elif protocol == 'pb':
message = get_message(filename, message_type)
with open_compressed(filename) as f:
message.ParseFromString(f.read())
print message
elif protocol == 'pbs':
message = get_message(filename, message_type)
for string in protobuf_stream_load(filename):
message.ParseFromString(string)
print message
elif protocol == 'pickle':
data = pickle_load(filename)
print repr(data)
else:
with open_compressed(filename) as f:
for line in f:
print line,
@parsable.command
def cat(*filenames):
'''
Print text/json/protobuf messages from multiple raw/gz/bz2 files.
'''
for filename in filenames:
pretty_print(filename)
if __name__ == '__main__':
parsable.dispatch()
| {
"content_hash": "a0b800c0afa949f220e4f0612c8893c4",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 74,
"avg_line_length": 26.18661971830986,
"alnum_prop": 0.5993007933306441,
"repo_name": "fritzo/loom",
"id": "4414f4b8d4a7b84771d577fdb93e690e33996c56",
"size": "9014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loom/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "400979"
},
{
"name": "CMake",
"bytes": "3958"
},
{
"name": "Makefile",
"bytes": "1851"
},
{
"name": "Protocol Buffer",
"bytes": "11819"
},
{
"name": "Python",
"bytes": "323689"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='id_token_verify',
version='0.1',
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/its-dirg/id_token_verify',
license='Apache 2.0',
author='Rebecka Gulliksson',
author_email='rebecka.gulliksson@umu.se',
description='Utility/service for verifying signed OpenID Connect ID Tokens.',
install_requires=['oic', 'requests']
)
| {
"content_hash": "4e69a9163852c5e468e9c1d1bac85cf7",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 32,
"alnum_prop": 0.6741071428571429,
"repo_name": "its-dirg/id_token_verify",
"id": "ccebf0241a55d7545cfa59e995ef01bf37b75687",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9242"
}
],
"symlink_target": ""
} |
from typing import Callable, Tuple, Optional, List, Dict, Any, TYPE_CHECKING
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.utils.typing import EnvType, MultiEnvDict, EnvID, \
AgentID, MultiAgentDict
if TYPE_CHECKING:
from ray.rllib.models.preprocessors import Preprocessor
ASYNC_RESET_RETURN = "async_reset_return"
@PublicAPI
class BaseEnv:
"""The lowest-level env interface used by RLlib for sampling.
BaseEnv models multiple agents executing asynchronously in multiple
environments. A call to poll() returns observations from ready agents
keyed by their environment and agent ids, and actions for those agents
can be sent back via send_actions().
All other env types can be adapted to BaseEnv. RLlib handles these
conversions internally in RolloutWorker, for example:
gym.Env => rllib.VectorEnv => rllib.BaseEnv
rllib.MultiAgentEnv => rllib.BaseEnv
rllib.ExternalEnv => rllib.BaseEnv
Attributes:
action_space (gym.Space): Action space. This must be defined for
single-agent envs. Multi-agent envs can set this to None.
observation_space (gym.Space): Observation space. This must be defined
for single-agent envs. Multi-agent envs can set this to None.
Examples:
>>> env = MyBaseEnv()
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [2.4, 1.6],
"car_1": [3.4, -3.2],
},
"env_1": {
"car_0": [8.0, 4.1],
},
"env_2": {
"car_0": [2.3, 3.3],
"car_1": [1.4, -0.2],
"car_3": [1.2, 0.1],
},
}
>>> env.send_actions(
actions={
"env_0": {
"car_0": 0,
"car_1": 1,
}, ...
})
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [4.1, 1.7],
"car_1": [3.2, -4.2],
}, ...
}
>>> print(dones)
{
"env_0": {
"__all__": False,
"car_0": False,
"car_1": True,
}, ...
}
"""
@staticmethod
def to_base_env(env: EnvType,
make_env: Callable[[int], EnvType] = None,
num_envs: int = 1,
remote_envs: bool = False,
remote_env_batch_wait_ms: bool = 0) -> "BaseEnv":
"""Wraps any env type as needed to expose the async interface."""
from ray.rllib.env.remote_vector_env import RemoteVectorEnv
if remote_envs and num_envs == 1:
raise ValueError(
"Remote envs only make sense to use if num_envs > 1 "
"(i.e. vectorization is enabled).")
if not isinstance(env, BaseEnv):
if isinstance(env, MultiAgentEnv):
if remote_envs:
env = RemoteVectorEnv(
make_env,
num_envs,
multiagent=True,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
else:
env = _MultiAgentEnvToBaseEnv(
make_env=make_env,
existing_envs=[env],
num_envs=num_envs)
elif isinstance(env, ExternalEnv):
if num_envs != 1:
raise ValueError(
"External(MultiAgent)Env does not currently support "
"num_envs > 1. One way of solving this would be to "
"treat your Env as a MultiAgentEnv hosting only one "
"type of agent but with several copies.")
env = _ExternalEnvToBaseEnv(env)
elif isinstance(env, VectorEnv):
env = _VectorEnvToBaseEnv(env)
else:
if remote_envs:
env = RemoteVectorEnv(
make_env,
num_envs,
multiagent=False,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
else:
env = VectorEnv.wrap(
make_env=make_env,
existing_envs=[env],
num_envs=num_envs,
action_space=env.action_space,
observation_space=env.observation_space)
env = _VectorEnvToBaseEnv(env)
assert isinstance(env, BaseEnv), env
return env
@PublicAPI
def poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
"""Returns observations from ready agents.
The returns are two-level dicts mapping from env_id to a dict of
agent_id to values. The number of agents and envs can vary over time.
Returns
-------
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the
episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key
"__all__" is used to indicate env termination.
infos (dict): Info values for each ready agent.
off_policy_actions (dict): Agents may take off-policy actions. When
that happens, there will be an entry in this dict that contains
the taken action. There is no need to send_actions() for agents
that have already chosen off-policy actions.
"""
raise NotImplementedError
@PublicAPI
def send_actions(self, action_dict: MultiEnvDict) -> None:
"""Called to send actions back to running agents in this env.
Actions should be sent for each ready agent that returned observations
in the previous poll() call.
Args:
action_dict (dict): Actions values keyed by env_id and agent_id.
"""
raise NotImplementedError
@PublicAPI
def try_reset(self,
env_id: Optional[EnvID] = None) -> Optional[MultiAgentDict]:
"""Attempt to reset the sub-env with the given id or all sub-envs.
If the environment does not support synchronous reset, None can be
returned here.
Args:
env_id (Optional[int]): The sub-env ID if applicable. If None,
reset the entire Env (i.e. all sub-envs).
Returns:
obs (dict|None): Resetted observation or None if not supported.
"""
return None
@PublicAPI
def get_unwrapped(self) -> List[EnvType]:
"""Return a reference to the underlying gym envs, if any.
Returns:
envs (list): Underlying gym envs or [].
"""
return []
@PublicAPI
def stop(self) -> None:
"""Releases all resources used."""
for env in self.get_unwrapped():
if hasattr(env, "close"):
env.close()
# Fixed agent identifier when there is only the single agent in the env
_DUMMY_AGENT_ID = "agent0"
def _with_dummy_agent_id(env_id_to_values: Dict[EnvID, Any],
dummy_id: "AgentID" = _DUMMY_AGENT_ID
) -> MultiEnvDict:
return {k: {dummy_id: v} for (k, v) in env_id_to_values.items()}
class _ExternalEnvToBaseEnv(BaseEnv):
"""Internal adapter of ExternalEnv to BaseEnv."""
def __init__(self,
external_env: ExternalEnv,
preprocessor: "Preprocessor" = None):
self.external_env = external_env
self.prep = preprocessor
self.multiagent = issubclass(type(external_env), ExternalMultiAgentEnv)
self.action_space = external_env.action_space
if preprocessor:
self.observation_space = preprocessor.observation_space
else:
self.observation_space = external_env.observation_space
external_env.start()
@override(BaseEnv)
def poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
with self.external_env._results_avail_condition:
results = self._poll()
while len(results[0]) == 0:
self.external_env._results_avail_condition.wait()
results = self._poll()
if not self.external_env.isAlive():
raise Exception("Serving thread has stopped.")
limit = self.external_env._max_concurrent_episodes
assert len(results[0]) < limit, \
("Too many concurrent episodes, were some leaked? This "
"ExternalEnv was created with max_concurrent={}".format(limit))
return results
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
if self.multiagent:
for env_id, actions in action_dict.items():
self.external_env._episodes[env_id].action_queue.put(actions)
else:
for env_id, action in action_dict.items():
self.external_env._episodes[env_id].action_queue.put(
action[_DUMMY_AGENT_ID])
def _poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
all_obs, all_rewards, all_dones, all_infos = {}, {}, {}, {}
off_policy_actions = {}
for eid, episode in self.external_env._episodes.copy().items():
data = episode.get_data()
cur_done = episode.cur_done_dict[
"__all__"] if self.multiagent else episode.cur_done
if cur_done:
del self.external_env._episodes[eid]
if data:
if self.prep:
all_obs[eid] = self.prep.transform(data["obs"])
else:
all_obs[eid] = data["obs"]
all_rewards[eid] = data["reward"]
all_dones[eid] = data["done"]
all_infos[eid] = data["info"]
if "off_policy_action" in data:
off_policy_actions[eid] = data["off_policy_action"]
if self.multiagent:
# Ensure a consistent set of keys
# rely on all_obs having all possible keys for now.
for eid, eid_dict in all_obs.items():
for agent_id in eid_dict.keys():
def fix(d, zero_val):
if agent_id not in d[eid]:
d[eid][agent_id] = zero_val
fix(all_rewards, 0.0)
fix(all_dones, False)
fix(all_infos, {})
return (all_obs, all_rewards, all_dones, all_infos,
off_policy_actions)
else:
return _with_dummy_agent_id(all_obs), \
_with_dummy_agent_id(all_rewards), \
_with_dummy_agent_id(all_dones, "__all__"), \
_with_dummy_agent_id(all_infos), \
_with_dummy_agent_id(off_policy_actions)
class _VectorEnvToBaseEnv(BaseEnv):
"""Internal adapter of VectorEnv to BaseEnv.
We assume the caller will always send the full vector of actions in each
call to send_actions(), and that they call reset_at() on all completed
environments before calling send_actions().
"""
def __init__(self, vector_env: VectorEnv):
self.vector_env = vector_env
self.action_space = vector_env.action_space
self.observation_space = vector_env.observation_space
self.num_envs = vector_env.num_envs
self.new_obs = None # lazily initialized
self.cur_rewards = [None for _ in range(self.num_envs)]
self.cur_dones = [False for _ in range(self.num_envs)]
self.cur_infos = [None for _ in range(self.num_envs)]
@override(BaseEnv)
def poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
if self.new_obs is None:
self.new_obs = self.vector_env.vector_reset()
new_obs = dict(enumerate(self.new_obs))
rewards = dict(enumerate(self.cur_rewards))
dones = dict(enumerate(self.cur_dones))
infos = dict(enumerate(self.cur_infos))
self.new_obs = []
self.cur_rewards = []
self.cur_dones = []
self.cur_infos = []
return _with_dummy_agent_id(new_obs), \
_with_dummy_agent_id(rewards), \
_with_dummy_agent_id(dones, "__all__"), \
_with_dummy_agent_id(infos), {}
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
action_vector = [None] * self.num_envs
for i in range(self.num_envs):
action_vector[i] = action_dict[i][_DUMMY_AGENT_ID]
self.new_obs, self.cur_rewards, self.cur_dones, self.cur_infos = \
self.vector_env.vector_step(action_vector)
@override(BaseEnv)
def try_reset(self,
env_id: Optional[EnvID] = None) -> Optional[MultiAgentDict]:
return {_DUMMY_AGENT_ID: self.vector_env.reset_at(env_id)}
@override(BaseEnv)
def get_unwrapped(self) -> List[EnvType]:
return self.vector_env.get_unwrapped()
class _MultiAgentEnvToBaseEnv(BaseEnv):
"""Internal adapter of MultiAgentEnv to BaseEnv.
This also supports vectorization if num_envs > 1.
"""
def __init__(self, make_env: Callable[[int], EnvType],
existing_envs: List[MultiAgentEnv], num_envs: int):
"""Wrap existing multi-agent envs.
Args:
make_env (func|None): Factory that produces a new multiagent env.
Must be defined if the number of existing envs is less than
num_envs.
existing_envs (list): List of existing multiagent envs.
num_envs (int): Desired num multiagent envs to keep total.
"""
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
self.dones = set()
while len(self.envs) < self.num_envs:
self.envs.append(self.make_env(len(self.envs)))
for env in self.envs:
assert isinstance(env, MultiAgentEnv)
self.env_states = [_MultiAgentEnvState(env) for env in self.envs]
@override(BaseEnv)
def poll(self) -> Tuple[MultiEnvDict, MultiEnvDict, MultiEnvDict,
MultiEnvDict, MultiEnvDict]:
obs, rewards, dones, infos = {}, {}, {}, {}
for i, env_state in enumerate(self.env_states):
obs[i], rewards[i], dones[i], infos[i] = env_state.poll()
return obs, rewards, dones, infos, {}
@override(BaseEnv)
def send_actions(self, action_dict: MultiEnvDict) -> None:
for env_id, agent_dict in action_dict.items():
if env_id in self.dones:
raise ValueError("Env {} is already done".format(env_id))
env = self.envs[env_id]
obs, rewards, dones, infos = env.step(agent_dict)
assert isinstance(obs, dict), "Not a multi-agent obs"
assert isinstance(rewards, dict), "Not a multi-agent reward"
assert isinstance(dones, dict), "Not a multi-agent return"
assert isinstance(infos, dict), "Not a multi-agent info"
if set(obs.keys()) != set(rewards.keys()):
raise ValueError(
"Key set for obs and rewards must be the same: "
"{} vs {}".format(obs.keys(), rewards.keys()))
if set(infos).difference(set(obs)):
raise ValueError("Key set for infos must be a subset of obs: "
"{} vs {}".format(infos.keys(), obs.keys()))
if "__all__" not in dones:
raise ValueError(
"In multi-agent environments, '__all__': True|False must "
"be included in the 'done' dict: got {}.".format(dones))
if dones["__all__"]:
self.dones.add(env_id)
self.env_states[env_id].observe(obs, rewards, dones, infos)
@override(BaseEnv)
def try_reset(self,
env_id: Optional[EnvID] = None) -> Optional[MultiAgentDict]:
obs = self.env_states[env_id].reset()
assert isinstance(obs, dict), "Not a multi-agent obs"
if obs is not None and env_id in self.dones:
self.dones.remove(env_id)
return obs
@override(BaseEnv)
def get_unwrapped(self) -> List[EnvType]:
return [state.env for state in self.env_states]
class _MultiAgentEnvState:
def __init__(self, env: MultiAgentEnv):
assert isinstance(env, MultiAgentEnv)
self.env = env
self.initialized = False
def poll(self) -> Tuple[MultiAgentDict, MultiAgentDict, MultiAgentDict,
MultiAgentDict, MultiAgentDict]:
if not self.initialized:
self.reset()
self.initialized = True
obs, rew, dones, info = (self.last_obs, self.last_rewards,
self.last_dones, self.last_infos)
self.last_obs = {}
self.last_rewards = {}
self.last_dones = {"__all__": False}
self.last_infos = {}
return obs, rew, dones, info
def observe(self, obs: MultiAgentDict, rewards: MultiAgentDict,
dones: MultiAgentDict, infos: MultiAgentDict):
self.last_obs = obs
self.last_rewards = rewards
self.last_dones = dones
self.last_infos = infos
def reset(self) -> MultiAgentDict:
self.last_obs = self.env.reset()
self.last_rewards = {
agent_id: None
for agent_id in self.last_obs.keys()
}
self.last_dones = {
agent_id: False
for agent_id in self.last_obs.keys()
}
self.last_infos = {agent_id: {} for agent_id in self.last_obs.keys()}
self.last_dones["__all__"] = False
return self.last_obs
| {
"content_hash": "956e293fa57bd972f066f23e41759eeb",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 79,
"avg_line_length": 39.59148936170213,
"alnum_prop": 0.5512145313843508,
"repo_name": "robertnishihara/ray",
"id": "6c9544833eef726b0eafc4ffe640a1e9481e1026",
"size": "18608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/env/base_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
} |
import os
import re
from datetime import datetime, timedelta
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.utils.encoding import force_text
from django_statsd.clients import statsd
from multidb import get_replica
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from olympia.amo.storage_utils import rm_stored_dir
from olympia.constants.blocklist import (
MLBF_TIME_CONFIG_KEY,
MLBF_BASE_ID_CONFIG_KEY,
REMOTE_SETTINGS_COLLECTION_MLBF)
from olympia.files.models import File
from olympia.lib.remote_settings import RemoteSettings
from olympia.users.utils import get_task_user
from olympia.zadmin.models import set_config
from .mlbf import MLBF
from .models import Block, BlocklistSubmission, LegacyImport
from .utils import (
block_activity_log_delete,
block_activity_log_save,
datetime_to_ts,
split_regex_to_list)
log = olympia.core.logger.getLogger('z.amo.blocklist')
bracket_open_regex = re.compile(r'(?<!\\){')
bracket_close_regex = re.compile(r'(?<!\\)}')
BLOCKLIST_RECORD_MLBF_BASE = 'bloomfilter-base'
@task
@use_primary_db
def process_blocklistsubmission(multi_block_submit_id, **kw):
obj = BlocklistSubmission.objects.get(pk=multi_block_submit_id)
if obj.action == BlocklistSubmission.ACTION_ADDCHANGE:
# create the blocks from the guids in the multi_block
obj.save_to_block_objects()
elif obj.action == BlocklistSubmission.ACTION_DELETE:
# delete the blocks
obj.delete_block_objects()
@task
@use_primary_db
@transaction.atomic
def import_block_from_blocklist(record):
legacy_id = record.get('id')
using_db = get_replica()
log.info('Processing block id: [%s]', legacy_id)
legacy_import, import_created = LegacyImport.objects.update_or_create(
legacy_id=legacy_id,
defaults={'record': record, 'timestamp': record.get('last_modified')})
if not import_created:
log.info(
'LegacyRS %s: updating existing LegacyImport object', legacy_id)
existing_block_ids = list(
Block.objects.filter(legacy_id__in=(legacy_id, f'*{legacy_id}'))
.values_list('id', flat=True))
guid = record.get('guid')
if not guid:
legacy_import.outcome = LegacyImport.OUTCOME_MISSINGGUID
legacy_import.save()
log.error('LegacyRS %s: GUID is falsey, skipping.', legacy_id)
return
version_range = (record.get('versionRange') or [{}])[0]
target_application = version_range.get('targetApplication') or [{}]
target_GUID = target_application[0].get('guid')
if target_GUID and target_GUID != amo.FIREFOX.guid:
legacy_import.outcome = LegacyImport.OUTCOME_NOTFIREFOX
legacy_import.save()
log.error(
'LegacyRS %s: targetApplication (%s) is not Firefox, skipping.',
legacy_id, target_GUID)
return
block_kw = {
'min_version': version_range.get('minVersion', '0'),
'max_version': version_range.get('maxVersion', '*'),
'url': record.get('details', {}).get('bug') or '',
'reason': record.get('details', {}).get('why') or '',
'legacy_id': legacy_id,
'updated_by': get_task_user(),
}
modified_date = datetime.fromtimestamp(
record.get('last_modified', datetime_to_ts()) / 1000)
if guid.startswith('/'):
# need to escape the {} brackets or mysql chokes.
guid_regexp = bracket_open_regex.sub(r'\{', guid[1:-1])
guid_regexp = bracket_close_regex.sub(r'\}', guid_regexp)
# we're going to try to split the regex into a list for efficiency.
guids_list = split_regex_to_list(guid_regexp)
if guids_list:
log.info(
'LegacyRS %s: Broke down regex into list; '
'attempting to create Blocks for guids in %s',
legacy_id, guids_list)
statsd.incr(
'blocklist.tasks.import_blocklist.record_guid',
count=len(guids_list))
addons_guids_qs = Addon.unfiltered.using(using_db).filter(
guid__in=guids_list).values_list('guid', flat=True)
else:
log.info(
'LegacyRS %s: Unable to break down regex into list; '
'attempting to create Blocks for guids matching [%s]',
legacy_id, guid_regexp)
# mysql doesn't support \d - only [:digit:]
guid_regexp = guid_regexp.replace(r'\d', '[[:digit:]]')
addons_guids_qs = Addon.unfiltered.using(using_db).filter(
guid__regex=guid_regexp).values_list('guid', flat=True)
# We need to mark this id in a way so we know its from a
# regex guid - otherwise we might accidentally overwrite it.
block_kw['legacy_id'] = '*' + block_kw['legacy_id']
regex = True
else:
log.info(
'LegacyRS %s: Attempting to create a Block for guid [%s]',
legacy_id, guid)
statsd.incr('blocklist.tasks.import_blocklist.record_guid')
addons_guids_qs = Addon.unfiltered.using(using_db).filter(
guid=guid).values_list('guid', flat=True)
regex = False
new_blocks = []
for guid in addons_guids_qs:
valid_files_qs = File.objects.filter(
version__addon__guid=guid, is_webextension=True)
if not valid_files_qs.exists():
log.info(
'LegacyRS %s: Skipped Block for [%s] because it has no '
'webextension files', legacy_id, guid)
statsd.incr('blocklist.tasks.import_blocklist.block_skipped')
continue
(block, created) = Block.objects.update_or_create(
guid=guid, defaults=dict(guid=guid, **block_kw))
block_activity_log_save(block, change=not created)
if created:
log.info('LegacyRS %s: Added Block for [%s]', legacy_id, guid)
statsd.incr('blocklist.tasks.import_blocklist.block_added')
block.update(modified=modified_date)
else:
log.info('LegacyRS %s: Updated Block for [%s]', legacy_id, guid)
statsd.incr('blocklist.tasks.import_blocklist.block_updated')
new_blocks.append(block)
if new_blocks:
legacy_import.outcome = (
LegacyImport.OUTCOME_REGEXBLOCKS if regex else
LegacyImport.OUTCOME_BLOCK
)
else:
legacy_import.outcome = LegacyImport.OUTCOME_NOMATCH
log.info('LegacyRS %s: No addon found', legacy_id)
if not import_created:
# now reconcile the blocks that were connected to the import last time
# but weren't changed this time - i.e. blocks we need to delete
delete_qs = (
Block.objects.filter(id__in=existing_block_ids)
.exclude(id__in=(block.id for block in new_blocks)))
for block in delete_qs:
block_activity_log_delete(
block, delete_user=block_kw['updated_by'])
block.delete()
statsd.incr('blocklist.tasks.import_blocklist.block_deleted')
legacy_import.save()
if import_created:
statsd.incr(
'blocklist.tasks.import_blocklist.new_record_processed')
else:
statsd.incr(
'blocklist.tasks.import_blocklist.modified_record_processed')
@task
@use_primary_db
@transaction.atomic
def delete_imported_block_from_blocklist(legacy_id):
existing_blocks = (
Block.objects.filter(legacy_id__in=(legacy_id, f'*{legacy_id}')))
task_user = get_task_user()
for block in existing_blocks:
block_activity_log_delete(
block, delete_user=task_user)
block.delete()
statsd.incr('blocklist.tasks.import_blocklist.block_deleted')
LegacyImport.objects.get(legacy_id=legacy_id).delete()
statsd.incr('blocklist.tasks.import_blocklist.deleted_record_processed')
@task
def upload_filter(generation_time, is_base=True):
bucket = settings.REMOTE_SETTINGS_WRITER_BUCKET
server = RemoteSettings(
bucket, REMOTE_SETTINGS_COLLECTION_MLBF, sign_off_needed=False)
mlbf = MLBF.load_from_storage(generation_time)
if is_base:
# clear the collection for the base - we want to be the only filter
server.delete_all_records()
statsd.incr('blocklist.tasks.upload_filter.reset_collection')
# Then the bloomfilter
data = {
'key_format': MLBF.KEY_FORMAT,
'generation_time': generation_time,
'attachment_type': BLOCKLIST_RECORD_MLBF_BASE,
}
with storage.open(mlbf.filter_path, 'rb') as filter_file:
attachment = (
'filter.bin', filter_file, 'application/octet-stream')
server.publish_attachment(data, attachment)
statsd.incr('blocklist.tasks.upload_filter.upload_mlbf')
statsd.incr('blocklist.tasks.upload_filter.upload_mlbf.base')
else:
# If we have a stash, write that
stash_data = {
'key_format': MLBF.KEY_FORMAT,
'stash_time': generation_time,
'stash': mlbf.stash_json,
}
server.publish_record(stash_data)
statsd.incr('blocklist.tasks.upload_filter.upload_stash')
server.complete_session()
set_config(MLBF_TIME_CONFIG_KEY, generation_time, json_value=True)
if is_base:
set_config(MLBF_BASE_ID_CONFIG_KEY, generation_time, json_value=True)
@task
def cleanup_old_files(*, base_filter_id):
log.info('Starting clean up of old MLBF folders...')
six_months_ago = datetime_to_ts(
datetime.now() - timedelta(weeks=26))
base_filter_ts = int(base_filter_id)
for dir in storage.listdir(settings.MLBF_STORAGE_PATH)[0]:
dir = force_text(dir)
# skip non-numeric folder names
if not dir.isdigit():
log.info('Skipping %s because not a timestamp', dir)
continue
dir_ts = int(dir)
dir_as_date = datetime.fromtimestamp(dir_ts / 1000)
# delete if >6 months old and <base_filter_id
if dir_ts > six_months_ago:
log.info(
'Skipping %s because < 6 months old (%s)', dir, dir_as_date)
elif dir_ts > base_filter_ts:
log.info(
'Skipping %s because more recent (%s) than base mlbf (%s)',
dir, dir_as_date, datetime.fromtimestamp(base_filter_ts / 1000)
)
else:
log.info(
'Deleting %s because > 6 months old (%s)', dir, dir_as_date)
rm_stored_dir(
os.path.join(settings.MLBF_STORAGE_PATH, dir), storage)
| {
"content_hash": "aedca5730fa2351276ae21295ca890b2",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 40.11851851851852,
"alnum_prop": 0.6256462333825702,
"repo_name": "eviljeff/olympia",
"id": "0bebf5a00a268c9299915a228b7c947552c34795",
"size": "10832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/blocklist/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "251925"
},
{
"name": "Dockerfile",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "314372"
},
{
"name": "JavaScript",
"bytes": "865804"
},
{
"name": "Less",
"bytes": "307222"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6146705"
},
{
"name": "Shell",
"bytes": "8000"
},
{
"name": "Smarty",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from opbeat.instrumentation.packages.base import AbstractInstrumentedModule
class PythonMemcachedInstrumentation(AbstractInstrumentedModule):
name = 'python_memcached'
method_list = [
'add',
'append',
'cas',
'decr',
'delete',
'delete_multi',
'disconnect_all',
'flush_all',
'get',
'get_multi',
'get_slabs',
'get_stats',
'gets',
'incr',
'prepend',
'replace',
'set',
'set_multi',
'touch'
]
# Took out 'set_servers', 'reset_cas', 'debuglog', 'check_key' and
# 'forget_dead_hosts' because they involve no communication.
def get_instrument_list(self):
return [("memcache", "Client." + method) for method in self.method_list]
def call(self, module, method, wrapped, instance, args, kwargs):
name = self.get_wrapped_name(wrapped, instance, method)
with self.client.capture_trace(name, "cache.memcached"):
return wrapped(*args, **kwargs)
| {
"content_hash": "230e6cf3db9bc1862e735858a61c41d6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 27.657894736842106,
"alnum_prop": 0.5737392959086585,
"repo_name": "daikeren/opbeat_python",
"id": "0528946dac75126e7e85782a74340126056c63d2",
"size": "1051",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "opbeat/instrumentation/packages/python_memcached.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "81877"
},
{
"name": "HTML",
"bytes": "284"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "407176"
}
],
"symlink_target": ""
} |
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
from tensorflow.python.training import training as train
def _get_checkpoint_filename(filepattern):
"""Returns checkpoint filename given directory or specific filepattern."""
if gfile.IsDirectory(filepattern):
return saver.latest_checkpoint(filepattern)
return filepattern
def load_checkpoint(filepattern):
"""Returns CheckpointReader for latest checkpoint.
Args:
filepattern: Directory with checkpoints file or path to checkpoint.
Returns:
`CheckpointReader` object.
Raises:
ValueError: if checkpoint_dir doesn't have 'checkpoint' file or checkpoints.
"""
filename = _get_checkpoint_filename(filepattern)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % filepattern)
return train.NewCheckpointReader(filename)
def load_variable(checkpoint_dir, name):
"""Returns a Tensor with the contents of the given variable in the checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
name: Name of the tensor to return.
Returns:
`Tensor` object.
"""
reader = load_checkpoint(checkpoint_dir)
return reader.get_tensor(name)
def list_variables(checkpoint_dir):
"""Returns list of all variables in the latest checkpoint.
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
# TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
name="checkpoint_initializer"):
"""Sets variable initializer to assign op form value in checkpoint's tensor.
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = gen_io_ops._restore_slice(
file_pattern,
tensor_name,
slice_spec,
base_type,
preferred_shard=-1,
name=name)
variable._initializer_op = state_ops.assign(variable, restore_op)
def _set_variable_or_list_initializer(variable_or_list, file_pattern,
tensor_name):
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
if slice_name is None:
slice_name = v._save_slice_info.full_name
elif slice_name != v._save_slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, v._save_slice_info.full_name))
_set_checkpoint_initializer(v, file_pattern, tensor_name,
v._save_slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, file_pattern, tensor_name, "")
def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assingment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports next syntax:
`'scope_name/': 'checkpoint_scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
`'scope_name/variable_name': 'checkpoint_scope_name/some_other_variable'` -
will initalize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
`variable: 'scope_varaible_name'` - will initialize given variable with
variable from the checkpoint.
`'scope_name/': '/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
'<variable>/part_<part #>'.
Example:
```python
# Create variables.
with tf.variable_scope('test'):
m = tf.get_variable('my_var')
with tf.variable_scope('test2'):
var2 = tf.get_variable('my_var')
...
# Specify which variables to intialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'test/my_var': 'some_var',
'test2/', 'some_scope/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
var2: 'some_scope/var2',
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of current variables
(in default graph) and values are names of the variables
in the checkpoint.
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for current_name, tensor_name in six.iteritems(assignment_map):
scopes = ""
var = None
# Check if this is Variable object.
if isinstance(current_name, variables.Variable):
var = current_name
else:
var_scope = vs._get_default_variable_store()
# Check if this is variable in var_store.
var = var_scope._vars.get(current_name, None)
# Also check if variable is partitioned as list.
if var is None:
if current_name + "/part_0" in var_scope._vars:
var = []
i = 0
while current_name + "/part_%d" % i in var_scope._vars:
var.append(var_scope._vars[current_name + "/part_%d" % i])
i += 1
if var is not None:
# If 1 to 1 mapping was provided, find variable in the scope.
if tensor_name not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint" % (
tensor_name, checkpoint_dir
))
if isinstance(var, variables.Variable):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(variable_map[tensor_name]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name, str(variable_map[tensor_name])
))
_set_variable_or_list_initializer(var, filepattern, tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
current_name, checkpoint_dir, tensor_name
))
else:
if "/" in current_name:
scopes = current_name[:current_name.rindex("/")]
current_name = current_name[current_name.rindex("/") + 1:]
if not tensor_name.endswith("/"):
raise ValueError(
"Assignment map with scope only name (%s) "
"should map to scope only (%s). "
"Should be 'scope/': 'other_scope/'." % (
scopes, tensor_name
))
# If scope to scope mapping was provided, find all variables in the scope.
for var_name in var_scope._vars:
if var_name.startswith(scopes):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
if tensor_name != "/":
full_tensor_name = tensor_name + var_name[len(scopes) + 1:]
else:
full_tensor_name = var_name[len(scopes) + 1:]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:], tensor_name,
checkpoint_dir
))
var = var_scope._vars[var_name]
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name
))
# pylint: enable=protected-access
| {
"content_hash": "a229975832f488bd8be119d7c2d7e9ef",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 37.50813008130081,
"alnum_prop": 0.6539503630649182,
"repo_name": "sachinpro/sachinpro.github.io",
"id": "da42175570cddb2395bf3aaf10f7dbd568508498",
"size": "9953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/utils/checkpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156005"
},
{
"name": "C++",
"bytes": "9215678"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "783708"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773496"
},
{
"name": "Protocol Buffer",
"bytes": "112087"
},
{
"name": "Python",
"bytes": "6754480"
},
{
"name": "Shell",
"bytes": "185704"
},
{
"name": "TypeScript",
"bytes": "410434"
}
],
"symlink_target": ""
} |
'''
Created by auto_sdk on 2014.06.04
'''
from top.api.base import RestApi
class FuwuSaleLinkGenRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.nick = None
self.param_str = None
def getapiname(self):
return 'taobao.fuwu.sale.link.gen'
| {
"content_hash": "5a6a0c872697efd28cfbeaf14a998a28",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 27,
"alnum_prop": 0.6820987654320988,
"repo_name": "colaftc/webtool",
"id": "ad7beca886c5bcaae45a42718405a344aa4527f6",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "top/api/rest/FuwuSaleLinkGenRequest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12208"
},
{
"name": "HTML",
"bytes": "16773"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "258023"
},
{
"name": "Ruby",
"bytes": "861"
},
{
"name": "VimL",
"bytes": "401921"
}
],
"symlink_target": ""
} |
import sys
import unittest
import mykaze
_mysql = mykaze
from mykaze.constants import FIELD_TYPE
from mykaze.tests import base
class TestDBAPISet(unittest.TestCase):
def test_set_equality(self):
self.assertTrue(mykaze.STRING == mykaze.STRING)
def test_set_inequality(self):
self.assertTrue(mykaze.STRING != mykaze.NUMBER)
def test_set_equality_membership(self):
self.assertTrue(FIELD_TYPE.VAR_STRING == mykaze.STRING)
def test_set_inequality_membership(self):
self.assertTrue(FIELD_TYPE.DATE != mykaze.STRING)
class CoreModule(unittest.TestCase):
"""Core _mysql module features."""
def test_NULL(self):
"""Should have a NULL constant."""
self.assertEqual(_mysql.NULL, 'NULL')
def test_version(self):
"""Version information sanity."""
self.assertTrue(isinstance(_mysql.__version__, str))
self.assertTrue(isinstance(_mysql.version_info, tuple))
self.assertEqual(len(_mysql.version_info), 5)
def test_client_info(self):
self.assertTrue(isinstance(_mysql.get_client_info(), str))
def test_thread_safe(self):
self.assertTrue(isinstance(_mysql.thread_safe(), int))
class CoreAPI(unittest.TestCase):
"""Test _mysql interaction internals."""
def setUp(self):
kwargs = base.PyMySQLTestCase.databases[0].copy()
kwargs["read_default_file"] = "~/.my.cnf"
self.conn = _mysql.connect(**kwargs)
def tearDown(self):
self.conn.close()
def test_thread_id(self):
tid = self.conn.thread_id()
self.assertTrue(isinstance(tid, int),
"thread_id didn't return an int.")
self.assertRaises(TypeError, self.conn.thread_id, ('evil',),
"thread_id shouldn't accept arguments.")
def test_affected_rows(self):
self.assertEquals(self.conn.affected_rows(), 0,
"Should return 0 before we do anything.")
#def test_debug(self):
## FIXME Only actually tests if you lack SUPER
#self.assertRaises(mykaze.OperationalError,
#self.conn.dump_debug_info)
def test_charset_name(self):
self.assertTrue(isinstance(self.conn.character_set_name(), str),
"Should return a string.")
def test_host_info(self):
self.assertTrue(isinstance(self.conn.get_host_info(), str),
"Should return a string.")
def test_proto_info(self):
self.assertTrue(isinstance(self.conn.get_proto_info(), int),
"Should return an int.")
def test_server_info(self):
if sys.version_info[0] == 2:
self.assertTrue(isinstance(self.conn.get_server_info(), basestring),
"Should return an str.")
else:
self.assertTrue(isinstance(self.conn.get_server_info(), str),
"Should return an str.")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b9e0e31627eb383b5e7744ea180ebfca",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 31.789473684210527,
"alnum_prop": 0.6089403973509934,
"repo_name": "methane/MyKaze",
"id": "30ee1a4e561bbfeb35708b411473a5f28246baa9",
"size": "3020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mykaze/tests/thirdparty/test_MySQLdb/test_MySQLdb_nonstandard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "186000"
}
],
"symlink_target": ""
} |
"""Resolvers for resource parameters."""
from googlecloudsdk.core import exceptions
class Error(exceptions.Error):
"""Errors for this module."""
class InconsistentArgumentError(Error):
def __init__(self, param, values):
super(InconsistentArgumentError, self).__init__(
'got multiple values for [{param}]: {values}'.format(
param=param,
values=', '.join(values)))
class UnsetArgumentError(Error):
def __init__(self, visible_name):
super(UnsetArgumentError, self).__init__(
'resource is ambiguous, try specifying [{name}]'.format(
name=visible_name))
def FromProperty(prop):
"""Get a default value from a property.
Args:
prop: properties._Property, The property to fetch.
Returns:
A niladic function that fetches the property.
"""
def DefaultFunc():
return prop.Get(required=True)
return DefaultFunc
def FromArgument(visible_name, value):
"""Infer a parameter from a flag, or explain what's wrong.
Args:
visible_name: str, The flag as it would be typed by the user. eg, '--zone'.
value: The value of that flag taken from the command-line arguments.
Returns:
A niladic function that returns the value.
"""
def DefaultFunc():
if value is None:
raise UnsetArgumentError(visible_name)
return value
return DefaultFunc
| {
"content_hash": "edc14b6c48a7f153cee6046835780cbb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 24.654545454545456,
"alnum_prop": 0.6762536873156342,
"repo_name": "Sorsly/subtle",
"id": "6771a80ccafb9ef9cf4ea4336fbae54ce0e67021",
"size": "1952",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/googlecloudsdk/core/resolvers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1581"
},
{
"name": "CSS",
"bytes": "226"
},
{
"name": "HTML",
"bytes": "4637"
},
{
"name": "JavaScript",
"bytes": "3037"
},
{
"name": "PHP",
"bytes": "4543"
},
{
"name": "Pascal",
"bytes": "31"
},
{
"name": "Python",
"bytes": "13243860"
},
{
"name": "Roff",
"bytes": "1050600"
},
{
"name": "Shell",
"bytes": "16136"
},
{
"name": "Smarty",
"bytes": "2484"
},
{
"name": "SourcePawn",
"bytes": "308"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import forms
from django.forms.models import ModelForm
from .models import AceEditorPluginModel
from django.utils.translation import ugettext_lazy as _
class AceEditorPluginAdminForm(ModelForm):
class Meta:
model = AceEditorPluginModel
exclude = ()
def __init__(self, *args, **kwargs):
super(AceEditorPluginAdminForm, self).__init__(*args, **kwargs)
| {
"content_hash": "18ac83af83322285b53bdd299a6ae173",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 24.5,
"alnum_prop": 0.7188208616780045,
"repo_name": "TigerND/djangocms-ace",
"id": "8e32339c6c6f196533a726ccbe4f212a3be5b7c3",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangocms_ace/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1284"
},
{
"name": "JavaScript",
"bytes": "876"
},
{
"name": "Python",
"bytes": "13488"
}
],
"symlink_target": ""
} |
"""File downloading functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import os
import shutil
import time
from .progressbar import ProgressBar
from .numerics import hashfunc
from .misc import sizeof_fmt
from ._logging import logger, verbose
# Adapted from nilearn
def _get_http(url, temp_file_name, initial_size, timeout, verbose_bool):
"""Safely (resume a) download to a file from http(s)."""
from urllib import request
from urllib.error import HTTPError, URLError
# Actually do the reading
response = None
extra = ''
if initial_size > 0:
logger.debug(' Resuming at %s' % (initial_size,))
req = request.Request(
url, headers={'Range': 'bytes=%s-' % (initial_size,)})
try:
response = request.urlopen(req, timeout=timeout)
content_range = response.info().get('Content-Range', None)
if (content_range is None or not content_range.startswith(
'bytes %s-' % (initial_size,))):
raise IOError('Server does not support resuming')
except (KeyError, HTTPError, URLError, IOError):
initial_size = 0
response = None
else:
extra = ', resuming at %s' % (sizeof_fmt(initial_size),)
if response is None:
response = request.urlopen(request.Request(url), timeout=timeout)
file_size = int(response.headers.get('Content-Length', '0').strip())
file_size += initial_size
url = response.geturl()
logger.info('Downloading %s (%s%s)' % (url, sizeof_fmt(file_size), extra))
del url
mode = 'ab' if initial_size > 0 else 'wb'
progress = ProgressBar(file_size, initial_size, unit='B',
mesg='Downloading', unit_scale=True,
unit_divisor=1024)
del file_size
chunk_size = 8192 # 2 ** 13
with open(temp_file_name, mode) as local_file:
while True:
t0 = time.time()
chunk = response.read(chunk_size)
dt = time.time() - t0
if dt < 0.01:
chunk_size *= 2
elif dt > 0.1 and chunk_size > 8192:
chunk_size = chunk_size // 2
if not chunk:
break
local_file.write(chunk)
progress.update_with_increment_value(len(chunk))
@verbose
def _fetch_file(url, file_name, print_destination=True, resume=True,
hash_=None, timeout=30., hash_type='md5', verbose=None):
"""Load requested file, downloading it if needed or requested.
Parameters
----------
url: string
The url of file to be downloaded.
file_name: string
Name, along with the path, of where downloaded file will be saved.
print_destination: bool, optional
If true, destination of where file was saved will be printed after
download finishes.
resume: bool, optional
If true, try to resume partially downloaded files.
hash_ : str | None
The hash of the file to check. If None, no checking is
performed.
timeout : float
The URL open timeout.
hash_type : str
The type of hashing to use such as "md5" or "sha1"
%(verbose)s
"""
# Adapted from NISL:
# https://github.com/nisl/tutorial/blob/master/nisl/datasets.py
from urllib import parse
if hash_ is not None and (not isinstance(hash_, str) or
len(hash_) != 32) and hash_type == 'md5':
raise ValueError('Bad hash value given, should be a 32-character '
'string:\n%s' % (hash_,))
temp_file_name = file_name + ".part"
verbose_bool = (logger.level <= 20) # 20 is info
scheme = parse.urlparse(url).scheme
if scheme not in ('http', 'https'):
raise NotImplementedError('Cannot use scheme %r' % (scheme,))
try:
# Triage resume
if not os.path.exists(temp_file_name):
resume = False
if resume:
with open(temp_file_name, 'rb', buffering=0) as local_file:
local_file.seek(0, 2)
initial_size = local_file.tell()
del local_file
else:
initial_size = 0
_get_http(url, temp_file_name, initial_size, timeout, verbose_bool)
# check hash sum eg md5sum
if hash_ is not None:
logger.info('Verifying hash %s.' % (hash_,))
hashsum = hashfunc(temp_file_name, hash_type=hash_type)
if hash_ != hashsum:
raise RuntimeError('Hash mismatch for downloaded file %s, '
'expected %s but got %s'
% (temp_file_name, hash_, hashsum))
shutil.move(temp_file_name, file_name)
if print_destination is True:
logger.info('File saved as %s.\n' % file_name)
except Exception:
logger.error('Error while fetching file %s.'
' Dataset fetching aborted.' % url)
raise
def _url_to_local_path(url, path):
"""Mirror a url path in a local destination (keeping folder structure)."""
from urllib import parse, request
destination = parse.urlparse(url).path
# First char should be '/', and it needs to be discarded
if len(destination) < 2 or destination[0] != '/':
raise ValueError('Invalid URL')
destination = os.path.join(path, request.url2pathname(destination)[1:])
return destination
| {
"content_hash": "0d47489d3df89c8472b88410ba28e0e3",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 38.19444444444444,
"alnum_prop": 0.586,
"repo_name": "kambysese/mne-python",
"id": "e1f7b9b86b1935558132a40bdfc1303b7b1f4a5e",
"size": "5524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mne/utils/fetching.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3912"
},
{
"name": "Python",
"bytes": "5978369"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from feed import Feed
from live import LiveFeedWidget
| {
"content_hash": "a1db1d94aac1f28e2545837b75756a7a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 27,
"alnum_prop": 0.8518518518518519,
"repo_name": "mokshaproject/moksha",
"id": "4b550c42804c1b0602c80924713b41d1003a2b11",
"size": "672",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "moksha.feeds/moksha/feeds/widgets/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21145"
},
{
"name": "HTML",
"bytes": "11953"
},
{
"name": "JavaScript",
"bytes": "590544"
},
{
"name": "Makefile",
"bytes": "18249"
},
{
"name": "Python",
"bytes": "257694"
},
{
"name": "Shell",
"bytes": "4088"
}
],
"symlink_target": ""
} |
import factory
from django.contrib.auth.hashers import make_password
from resrc.userprofile.models import Profile
class UserF(factory.django.DjangoModelFactory):
FACTORY_FOR = Profile
first_name = factory.Sequence(lambda n: "First%s" % n)
last_name = factory.Sequence(lambda n: "Last%s" % n)
email = factory.Sequence(lambda n: "email%s@example.com" % n)
username = factory.Sequence(lambda n: "email%s@example.com" % n)
password = make_password("password")
show_email = False
is_staff = False
| {
"content_hash": "c50f8a5ff7b77a32193d027aebf9d086",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 36.42857142857143,
"alnum_prop": 0.7352941176470589,
"repo_name": "sergiolimajr/resrc",
"id": "ed95abb0ccde7c1ffea19d82f0c1c1ad91e4fae2",
"size": "510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resrc/userprofile/tests/factories.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Module that provides the default converters and converter registry.
Converters will be auto registered in the ConverterRegistry like Django Model.
If you haven't specify the name attribute in Meta class, registry will auto use the class name.
Example:
class ConverterExample(BaseConverter):
@staticmethod
def convert(key, string):
pass
class Meta:
name = ('example', )
But if you can't auto import the converter class, you can register the converter manually.
Example:
ConverterExample.register()
ConverterExample.register('example')
"""
import six
from .validators import IntegerValidator, NumericValidator
class ConverterRegistry(object):
"""
Registry for all converters.
"""
_registry = {}
@classmethod
def register(cls, name, _class):
"""Register Converter in ConverterRegistry.
Args:
name (str, iterable): Register key or name tuple.
_class (BaseConverter): Converter class.
"""
if isinstance(name, (tuple, set, list)):
for _name in name:
cls._registry[_name] = _class
else:
cls._registry[name] = _class
@classmethod
def get(cls, name):
return cls._registry.get(name, StringConverter)
class ConverterMetaClass(type):
"""
Metaclass for all Converters.
"""
def __new__(cls, name, bases, attributes):
_class = super(ConverterMetaClass, cls).__new__(cls, name, bases, attributes)
attr_meta = attributes.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not abstract:
_class.register()
return _class
class BaseConverter(six.with_metaclass(ConverterMetaClass)):
"""
Abstract super class for all converters.
"""
@staticmethod
def convert(key, string):
raise NotImplementedError
@classmethod
def register(cls, name=None):
"""Register this converter to registry.
Attributes:
name (Optinal[str, iterable]): Name that used to register in registry.
Defaults to the name in Meta class.
"""
if name is None:
attr_meta = getattr(cls, 'Meta', None)
name = getattr(attr_meta, 'name', cls.__name__)
ConverterRegistry.register(name, cls)
class Meta:
"""Meta class of Converter
Attributes:
abstract (bool): Class will not auto register if this attribute is True.
name (Optional[str, iterable]): Name that used to auto register in registry.
"""
abstract = True
class StringConverter(BaseConverter):
"""
Converter that just passing the value.
"""
@staticmethod
def convert(key, string):
return string
class Meta:
name = ('string', 'str')
class IntegerConverter(BaseConverter):
"""
Convert the value to an integer value.
"""
integer_validator = IntegerValidator()
@staticmethod
def convert(key, string):
if string is None:
return None
IntegerConverter.integer_validator(key, {key: string})
return int(string)
class Meta:
name = ('integer', 'int')
class FloatConverter(BaseConverter):
"""
Convert the value to a float value.
"""
numeric_validator = NumericValidator()
@staticmethod
def convert(key, string):
if string is None:
return None
FloatConverter.numeric_validator(key, {key: string})
return float(string)
class Meta:
name = 'float'
class BooleanConverter(BaseConverter):
"""
Convert the value to a boolean value.
"""
# Set is the faster than tuple and list, but False is equals 0 in set structure.
false_values = {None, False, 'false', 'False', 0, '0'}
@staticmethod
def convert(key, string):
return string not in BooleanConverter.false_values
class Meta:
name = ('boolean', 'bool')
class FileConverter(BaseConverter):
"""
Pass the file object.
"""
@staticmethod
def convert(key, value):
return value
class Meta:
name = ('file',)
| {
"content_hash": "71a4f2f920407561ab385d2d5de94345",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 95,
"avg_line_length": 24.523255813953487,
"alnum_prop": 0.6154575628259839,
"repo_name": "romain-li/django-validator",
"id": "21784d83c35312880aa1953d707556f26df7f040",
"size": "4218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_validator/converters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44364"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import traceback
from _pydev_bundle import pydev_imports
from _pydevd_bundle.pydevd_utils import save_main_module
from socket import AF_INET
from socket import SOCK_STREAM
from socket import socket
from _prof_imports import ProfilerResponse
from prof_io import ProfWriter, ProfReader
from prof_util import generate_snapshot_filepath, statsToResponse
base_snapshot_path = os.getenv('PYCHARM_SNAPSHOT_PATH')
remote_run = bool(os.getenv('PYCHARM_REMOTE_RUN', ''))
def StartClient(host, port):
""" connects to a host/port """
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i < MAX_TRIES:
try:
s.connect((host, port))
except:
i += 1
time.sleep(0.2)
continue
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) # TODO: is it safe?
class Profiler(object):
def __init__(self):
try:
import yappi_profiler
self.profiling_backend = yappi_profiler.YappiProfile()
print('Starting yappi profiler\n')
except ImportError:
import cProfile
self.profiling_backend = cProfile.Profile()
print('Starting cProfile profiler\n')
def connect(self, host, port):
s = StartClient(host, port)
self.initializeNetwork(s)
def initializeNetwork(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = ProfWriter(sock)
self.reader = ProfReader(sock, self)
self.reader.start()
time.sleep(0.1) # give threads time to start
def process(self, message):
if hasattr(message, 'save_snapshot'):
self.save_snapshot(message.id, generate_snapshot_filepath(message.save_snapshot.filepath, remote_run), remote_run)
else:
raise AssertionError("Unknown request %s" % dir(message))
def run(self, file):
m = save_main_module(file, 'run_profiler')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
self.start_profiling()
try:
pydev_imports.execfile(file, globals, globals) # execute the script
finally:
self.stop_profiling()
self.save_snapshot(0, generate_snapshot_filepath(base_snapshot_path, remote_run), remote_run)
def start_profiling(self):
self.profiling_backend.enable()
def stop_profiling(self):
self.profiling_backend.disable()
def get_snapshot(self):
self.profiling_backend.create_stats()
return self.profiling_backend.stats
def dump_snapshot(self, filename):
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.makedirs(dir)
self.profiling_backend.dump_stats(filename)
return filename
def save_snapshot(self, id, filename, send_stat=False):
self.stop_profiling()
if filename is not None:
filename = self.dump_snapshot(filename)
print('Snapshot saved to %s' % filename)
if not send_stat:
response = ProfilerResponse(id=id, snapshot_filepath=filename)
else:
response = ProfilerResponse(id=id)
statsToResponse(self.get_snapshot(), response)
self.writer.addCommand(response)
self.start_profiling()
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
file = sys.argv[3]
del sys.argv[0]
del sys.argv[0]
del sys.argv[0]
profiler = Profiler()
try:
profiler.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
# add file path to sys.path
sys.path.insert(0, os.path.split(file)[0])
profiler.run(file)
| {
"content_hash": "8d321131f32b6983653823556dc43e4f",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 126,
"avg_line_length": 28.220689655172414,
"alnum_prop": 0.6111925708699902,
"repo_name": "Soya93/Extract-Refactoring",
"id": "8c2ed576f815fda3fc9a562f726d3e4e33e32e04",
"size": "4092",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/helpers/profiler/run_profiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "63896"
},
{
"name": "C",
"bytes": "214817"
},
{
"name": "C#",
"bytes": "1538"
},
{
"name": "C++",
"bytes": "191650"
},
{
"name": "CSS",
"bytes": "189895"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Cucumber",
"bytes": "14382"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groff",
"bytes": "35232"
},
{
"name": "Groovy",
"bytes": "2436826"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1803098"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "154798881"
},
{
"name": "JavaScript",
"bytes": "562223"
},
{
"name": "Jupyter Notebook",
"bytes": "92629"
},
{
"name": "Kotlin",
"bytes": "1430452"
},
{
"name": "Lex",
"bytes": "179878"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "53411"
},
{
"name": "Objective-C",
"bytes": "29064"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6570"
},
{
"name": "Python",
"bytes": "23314398"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Scala",
"bytes": "11698"
},
{
"name": "Shell",
"bytes": "68088"
},
{
"name": "Smalltalk",
"bytes": "64"
},
{
"name": "TeX",
"bytes": "62325"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
"""
This is an example settings/local.py file.
These settings overrides what's in settings/base.py
"""
import logging
# To extend any settings from settings/base.py here's an example:
#from . import base
#INSTALLED_APPS = base.INSTALLED_APPS + ['debug_toolbar']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db/development.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Uncomment this and set to all slave DBs in use on the site.
# SLAVE_DATABASES = ['slave']
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Make this unique, and don't share it with anybody. It cannot be blank.
SECRET_KEY = '#v#d*_b!wjst89rsvn*n8@f8&ninuy4&v7jw=_!@5c9&i1$!@t'
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
## Log settings
LOG_LEVEL = logging.INFO
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_wtfhack" # Make this unique to your project.
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'wtfhack': {
'level': "DEBUG"
}
}
}
# Common Event Format logging parameters
#CEF_PRODUCT = 'wtfhack'
#CEF_VENDOR = 'Your Company'
#CEF_VERSION = '0'
#CEF_DEVICE_VERSION = '0'
INTERNAL_IPS = ('127.0.0.1')
# Enable these options for memcached
#CACHE_BACKEND= "memcached://127.0.0.1:11211/"
#CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True
# Set this to true if you use a proxy that sets X-Forwarded-Host
#USE_X_FORWARDED_HOST = False
SERVER_EMAIL = "webmaster@example.com"
DEFAULT_FROM_EMAIL = "webmaster@example.com"
SYSTEM_EMAIL_PREFIX = "[wtfhack]"
| {
"content_hash": "60431ce05500e4e4cb714cb8d9f40b04",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 77,
"avg_line_length": 28.225490196078432,
"alnum_prop": 0.6634248002778743,
"repo_name": "sloria/wtfhack",
"id": "b1c9cf7e14171d84f0916275e24b91ec73e2f0b6",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wtfhack/settings/local-dist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "575712"
},
{
"name": "PHP",
"bytes": "285"
},
{
"name": "Python",
"bytes": "44966"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "2686"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_mutual_subscribe_40(weave_wdm_next_test_base):
def test_weave_wdm_next_mutual_subscribe_40(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['total_client_count'] = 2
wdm_next_args['final_client_status'] = 4
wdm_next_args['timer_client_period'] = 4000
wdm_next_args['test_client_iterations'] = 1
wdm_next_args['test_client_delay'] = 15000
wdm_next_args['enable_client_flip'] = 1
wdm_next_args['total_server_count'] = 2
wdm_next_args['final_server_status'] = 3
wdm_next_args['timer_server_period'] = 5000
wdm_next_args['enable_server_flip'] = 1
wdm_next_args['client_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Handler\[0\] \[(ALIVE|CONFM)\] TerminateSubscription ', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations'] * (wdm_next_args['total_server_count'] + 1)),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('Client\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations'] * (wdm_next_args['total_client_count'] + 1)),
('Handler\[0\] \[(ALIVE|CONFM)\] AbortSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['F32: Mutual Subscribe: Root path. Null Version. Mutate data in initiator and responder. Publisher in responder aborts']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test F32")
super(test_weave_wdm_next_mutual_subscribe_40, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| {
"content_hash": "d4576e853cf23aaa5a83378a13e4c223",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 173,
"avg_line_length": 60.625,
"alnum_prop": 0.5762886597938144,
"repo_name": "openweave/openweave-core",
"id": "1d270389293a3b4776beb9edccc810ae35af3ad5",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_40.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "664311"
},
{
"name": "C++",
"bytes": "13369518"
},
{
"name": "Java",
"bytes": "300780"
},
{
"name": "M4",
"bytes": "115889"
},
{
"name": "Makefile",
"bytes": "354863"
},
{
"name": "Objective-C",
"bytes": "126850"
},
{
"name": "Objective-C++",
"bytes": "302756"
},
{
"name": "Perl",
"bytes": "12136"
},
{
"name": "Python",
"bytes": "2029596"
},
{
"name": "Shell",
"bytes": "122005"
}
],
"symlink_target": ""
} |
"""
Files Pipeline
See documentation in topics/media-pipeline.rst
"""
import hashlib
import os
import os.path
import time
import logging
from email.utils import parsedate_tz, mktime_tz
from six.moves.urllib.parse import urlparse
from collections import defaultdict
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.internet import defer, threads
from scrapy.pipelines.media import MediaPipeline
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.misc import md5sum
from scrapy.utils.log import failure_to_exc_info
logger = logging.getLogger(__name__)
class FileException(Exception):
"""General media error exception"""
class FSFilesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, path, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(path)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, 'wb') as f:
f.write(buf.getvalue())
def stat_file(self, path, info):
absolute_path = self._get_filesystem_path(path)
try:
last_modified = os.path.getmtime(absolute_path)
except: # FIXME: catching everything!
return {}
with open(absolute_path, 'rb') as f:
checksum = md5sum(f)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, path):
path_comps = path.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
POLICY = 'public-read'
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_file(self, path, info):
def _onsuccess(boto_key):
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = parsedate_tz(last_modified)
modified_stamp = int(mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(path).addCallback(_onsuccess)
def _get_boto_bucket(self):
from boto.s3.connection import S3Connection
# disable ssl (is_secure=False) because of this python bug:
# http://bugs.python.org/issue5103
c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, path):
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, path)
k = b.new_key(key_name)
if meta:
for metakey, metavalue in six.iteritems(meta):
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
buf.seek(0)
return threads.deferToThread(k.set_contents_from_string, buf.getvalue(),
headers=h, policy=self.POLICY)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
`new` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
`uptodate` files are the ones that the pipeline processed and are still
valid files.
`expired` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
'': FSFilesStore,
'file': FSFilesStore,
's3': S3FilesStore,
}
DEFAULT_FILES_URLS_FIELD = 'file_urls'
DEFAULT_FILES_RESULT_FIELD = 'files'
def __init__(self, store_uri, download_func=None):
if not store_uri:
raise NotConfigured
self.store = self._get_store(store_uri)
super(FilesPipeline, self).__init__(download_func=download_func)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
cls.FILES_URLS_FIELD = settings.get('FILES_URLS_FIELD', cls.DEFAULT_FILES_URLS_FIELD)
cls.FILES_RESULT_FIELD = settings.get('FILES_RESULT_FIELD', cls.DEFAULT_FILES_RESULT_FIELD)
cls.EXPIRES = settings.getint('FILES_EXPIRES', 90)
store_uri = settings['FILES_STORE']
return cls(store_uri)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.EXPIRES:
return # returning None force download
referer = request.headers.get('Referer')
logger.debug(
'File (uptodate): Downloaded %(medianame)s from %(request)s '
'referred in <%(referer)s>',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': path, 'checksum': checksum}
path = self.file_path(request, info=info)
dfd = defer.maybeDeferred(self.store.stat_file, path, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(
lambda f:
logger.error(self.__class__.__name__ + '.store.stat_file',
exc_info=failure_to_exc_info(f),
extra={'spider': info.spider})
)
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = request.headers.get('Referer')
logger.warning(
'File (unknown-error): Error downloading %(medianame)s from '
'%(request)s referred in <%(referer)s>: %(exception)s',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer, 'exception': failure.value},
extra={'spider': info.spider}
)
raise FileException
def media_downloaded(self, response, request, info):
referer = request.headers.get('Referer')
if response.status != 200:
logger.warning(
'File (code: %(status)s): Error downloading file from '
'%(request)s referred in <%(referer)s>',
{'status': response.status,
'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('download-error')
if not response.body:
logger.warning(
'File (empty-content): Empty file from %(request)s referred '
'in <%(referer)s>: no-content',
{'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
logger.debug(
'File (%(status)s): Downloaded file from %(request)s referred in '
'<%(referer)s>',
{'status': status, 'request': request, 'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, status)
try:
path = self.file_path(request, response=response, info=info)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
logger.warning(
'File (error): Error processing file from %(request)s '
'referred in <%(referer)s>: %(errormsg)s',
{'request': request, 'referer': referer, 'errormsg': str(exc)},
extra={'spider': info.spider}, exc_info=True
)
raise
except Exception as exc:
logger.error(
'File (unknown-error): Error processing file from %(request)s '
'referred in <%(referer)s>',
{'request': request, 'referer': referer},
exc_info=True, extra={'spider': info.spider}
)
raise FileException(str(exc))
return {'url': request.url, 'path': path, 'checksum': checksum}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('file_count', spider=spider)
spider.crawler.stats.inc_value('file_status_count/%s' % status, spider=spider)
### Overridable Interface
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.FILES_URLS_FIELD, [])]
def file_downloaded(self, response, request, info):
path = self.file_path(request, response=response, info=info)
buf = BytesIO(response.body)
self.store.persist_file(path, buf, info)
checksum = md5sum(buf)
return checksum
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.FILES_RESULT_FIELD in item.fields:
item[self.FILES_RESULT_FIELD] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('FilesPipeline.file_key(url) method is deprecated, please use '
'file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from file_key with url as first argument
if not isinstance(request, Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() method has been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
## end of deprecation warning block
media_guid = hashlib.sha1(url).hexdigest() # change to request.url after deprecation
media_ext = os.path.splitext(url)[1] # change to request.url after deprecation
return 'full/%s%s' % (media_guid, media_ext)
# deprecated
def file_key(self, url):
return self.file_path(url)
file_key._base = True
| {
"content_hash": "a1880e2a4fa32ac0f84d821f91276df1",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 99,
"avg_line_length": 36.56845238095238,
"alnum_prop": 0.595588833726703,
"repo_name": "coderabhishek/scrapy",
"id": "a449793c9a72c84820ed6f4e008006fe849dbddf",
"size": "12287",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scrapy/pipelines/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Groff",
"bytes": "2008"
},
{
"name": "HTML",
"bytes": "1809"
},
{
"name": "Python",
"bytes": "1270132"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration
def _get_field(field_name):
from sentry.models import User
return User._meta.get_field(field_name)
def _get_email():
from django.core.exceptions import ValidationError
rv = click.prompt("Email")
field = _get_field("email")
try:
return field.clean(rv, None)
except ValidationError as e:
raise click.ClickException("; ".join(e.messages))
def _get_password():
from django.core.exceptions import ValidationError
rv = click.prompt("Password", hide_input=True, confirmation_prompt=True)
field = _get_field("password")
try:
return field.clean(rv, None)
except ValidationError as e:
raise click.ClickException("; ".join(e.messages))
def _get_superuser():
return click.confirm("Should this user be a superuser?", default=False)
@click.command()
@click.option("--email")
@click.option("--password")
@click.option("--superuser/--no-superuser", default=None, is_flag=True)
@click.option("--no-password", default=False, is_flag=True)
@click.option("--no-input", default=False, is_flag=True)
@configuration
def createuser(email, password, superuser, no_password, no_input):
"Create a new user."
if not no_input:
if not email:
email = _get_email()
if not (password or no_password):
password = _get_password()
if superuser is None:
superuser = _get_superuser()
if superuser is None:
superuser = False
if not email:
raise click.ClickException("Invalid or missing email address.")
# TODO(mattrobenolt): Accept password over stdin?
if not no_password and not password:
raise click.ClickException("No password set and --no-password not passed.")
from sentry import roles
from sentry.models import User
from django.conf import settings
user = User(
email=email, username=email, is_superuser=superuser, is_staff=superuser, is_active=True
)
if password:
user.set_password(password)
user.save()
click.echo("User created: %s" % (email,))
# TODO(dcramer): kill this when we improve flows
if settings.SENTRY_SINGLE_ORGANIZATION:
from sentry.models import Organization, OrganizationMember, OrganizationMemberTeam, Team
org = Organization.get_default()
if superuser:
role = roles.get_top_dog().id
else:
role = org.default_role
member = OrganizationMember.objects.create(organization=org, user=user, role=role)
# if we've only got a single team let's go ahead and give
# access to that team as its likely the desired outcome
teams = list(Team.objects.filter(organization=org)[0:2])
if len(teams) == 1:
OrganizationMemberTeam.objects.create(team=teams[0], organizationmember=member)
click.echo("Added to organization: %s" % (org.slug,))
| {
"content_hash": "426b97d5e40ecbdc18306c9dfc8836fa",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 96,
"avg_line_length": 30.464646464646464,
"alnum_prop": 0.6651193633952255,
"repo_name": "mvaled/sentry",
"id": "f84305e0b07e25841dd01d2f46dc578f9583a0a0",
"size": "3016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/runner/commands/createuser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
import optparse, sys
from twisted.internet import defer
from twisted.internet.protocol import Protocol, ClientFactory
from twisted.protocols.basic import NetstringReceiver
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 7.0
Run it like this:
python get-poetry.py xform-port port1 port2 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-client-6/get-poetry.py 10001 10002 10003
to grab poetry from servers on ports 10002, and 10003 and transform
it using the server on port 10001.
Of course, there need to be appropriate servers listening on those
ports for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if len(addresses) < 2:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return list(map(parse_address, addresses))
class PoetryProtocol(Protocol):
poem = b''
def dataReceived(self, data):
self.poem += data
def connectionLost(self, reason):
self.poemReceived(self.poem)
def poemReceived(self, poem):
self.factory.poem_finished(poem)
class PoetryClientFactory(ClientFactory):
protocol = PoetryProtocol
def __init__(self, deferred):
self.deferred = deferred
def poem_finished(self, poem):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.callback(poem)
def clientConnectionFailed(self, connector, reason):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
class TransformClientProtocol(NetstringReceiver):
def connectionMade(self):
self.sendRequest(self.factory.xform_name, self.factory.poem)
def sendRequest(self, xform_name, poem):
# poem was already bytes
# change xform_name to bytes
# period seperator must also be a byte
self.sendString(xform_name.encode('utf8') + b'.' + poem)
def stringReceived(self, s):
self.transport.loseConnection()
self.poemReceived(s)
def poemReceived(self, poem):
self.factory.handlePoem(poem)
class TransformClientFactory(ClientFactory):
protocol = TransformClientProtocol
def __init__(self, xform_name, poem):
self.xform_name = xform_name
self.poem = poem
self.deferred = defer.Deferred()
def handlePoem(self, poem):
d, self.deferred = self.deferred, None
d.callback(poem)
def clientConnectionLost(self, _, reason):
if self.deferred is not None:
d, self.deferred = self.deferred, None
d.errback(reason)
clientConnectionFailed = clientConnectionLost
class TransformProxy(object):
"""
I proxy requests to a transformation service.
"""
def __init__(self, host, port):
self.host = host
self.port = port
def xform(self, xform_name, poem):
factory = TransformClientFactory(xform_name, poem)
from twisted.internet import reactor
reactor.connectTCP(self.host, self.port, factory)
return factory.deferred
def get_poetry(host, port):
"""
Download a poem from the given host and port. This function
returns a Deferred which will be fired with the complete text of
the poem or a Failure if the poem could not be downloaded.
"""
d = defer.Deferred()
from twisted.internet import reactor
factory = PoetryClientFactory(d)
reactor.connectTCP(host, port, factory)
return d
def poetry_main():
addresses = parse_args()
xform_addr = addresses.pop(0)
proxy = TransformProxy(*xform_addr)
from twisted.internet import reactor
results = []
@defer.inlineCallbacks
def get_transformed_poem(host, port):
try:
poem = yield get_poetry(host, port)
except Exception as e:
print('The poem download failed:', e, file=sys.stderr)
raise
try:
poem = yield proxy.xform('cummingsify', poem)
except Exception as e:
print('Cummingsify failed!', e, file=sys.stderr)
defer.returnValue(poem)
def got_poem(poem):
print(poem.decode('utf8'))
def poem_done(_):
results.append(_)
if len(results) == len(addresses):
reactor.stop()
for address in addresses:
host, port = address
d = get_transformed_poem(host, port)
d.addCallbacks(got_poem)
d.addBoth(poem_done)
reactor.run()
if __name__ == '__main__':
poetry_main()
| {
"content_hash": "4e9af6bf7b3510bb4c3a4c062b5f82e9",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 68,
"avg_line_length": 25.224489795918366,
"alnum_prop": 0.6383495145631068,
"repo_name": "jdavisp3/twisted-intro",
"id": "e430be72ac3fa65dfc68e56da3de3bc75f2de591",
"size": "5003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twisted-client-7/get-poetry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "2523"
},
{
"name": "Haskell",
"bytes": "3262"
},
{
"name": "Makefile",
"bytes": "137"
},
{
"name": "Python",
"bytes": "118095"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
} |
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def _validate_list_of_port_dicts(values, data):
if not isinstance(values, list):
msg = _("'%s' is not a list") % data
return msg
for item in values:
msg = _validate_port_dict(item)
if msg:
return msg
items = [tuple(entry.items()) for entry in values]
if len(items) != len(set(items)):
msg = _("Duplicate items in the list: '%s'") % values
return msg
def _validate_port_dict(values):
if not isinstance(values, dict):
msg = _("%s is not a valid dictionary") % values
LOG.debug(msg)
return msg
port_id = values.get('port_id')
fixed_ip = values.get('fixed_ip_address')
msg = attr._validate_uuid(port_id)
if msg:
return msg
if fixed_ip is None:
return
msg = attr._validate_ip_address(fixed_ip)
if msg:
return msg
attr.validators['type:validate_list_of_port_dicts'] = (
_validate_list_of_port_dicts
)
RESOURCE_NAME = "scalingip"
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_COLLECTION: {
'id': {
'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
"scaling_ip_address": {
'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None,
'enforce_policy': True
},
"tenant_id": {
'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True
},
"scaling_network_id": {
'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True
},
"ports": {
'allow_post': True, 'allow_put': True,
'validate': {
'type:validate_list_of_port_dicts': None
},
'is_visible': True,
'required_by_policy': True
}
}
}
class Scalingip(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return RESOURCE_NAME
@classmethod
def get_alias(cls):
return RESOURCE_NAME
@classmethod
def get_description(cls):
return "Scaling IPs"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/network/ext/"
"networks_quark/api/v2.0")
@classmethod
def get_updated(cls):
return "2016-01-20T19:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
None,
register_quota=True)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
| {
"content_hash": "fcab7c5177b4cfddcb3896cc13bd8f68",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 74,
"avg_line_length": 28.411290322580644,
"alnum_prop": 0.5410161793925632,
"repo_name": "lmaycotte/quark",
"id": "637c42f26d4cf9a30b0610bc8d7ec12f1cd60169",
"size": "4113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quark/api/extensions/scalingip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1207653"
},
{
"name": "Shell",
"bytes": "861"
}
],
"symlink_target": ""
} |
desc="""calling a stored procedure"""
setup="""
"""
cleanup="""
"""
notes="""
To call a procedure which is part of a package, or a procedure which
is part of another schema, use dotted notation like this:
<ul>
<li>package_name.procedure_name
<li>schema_name.procedure_name
<li>schema_name.package_name.procedure_name
</ul>
<p>
Parameters are passed as a list. callproc() returns a list
of the parameters passed in. If any parameters are OUT or
IN OUT, the returned list will have the modified values.
<p>
There's nothing special about calling a procedure during a
transaction. If the procedure modifies a table, you will
need to do a commit. It's possible that the procedure may
also do a commit (but that is generally a bad practice).
"""
output="""
"""
import sys
import cx_Oracle
def demo(conn,curs):
curs.callproc('cxdemo.p0')
curs.callproc('cxdemo.p2', [55, 66])
if __name__ == '__main__':
connstr = sys.argv[1]
conn = cx_Oracle.connect(connstr)
curs = conn.cursor()
demo(conn,curs)
conn.close()
| {
"content_hash": "2ee3a602ba22f9154d651f09d7712b2f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 68,
"avg_line_length": 24.186046511627907,
"alnum_prop": 0.7028846153846153,
"repo_name": "marhar/sqlminus",
"id": "9a8afd4443a53eae9e45570ef1915cb720421e00",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cx-oracle-demos/storedproc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "415"
},
{
"name": "HTML",
"bytes": "23121"
},
{
"name": "Makefile",
"bytes": "1569"
},
{
"name": "PLSQL",
"bytes": "899"
},
{
"name": "PLpgSQL",
"bytes": "43"
},
{
"name": "Python",
"bytes": "107916"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
class UnpackingError(Exception):
"""Badly packed source or general error. Argument is a
meaningful description."""
pass
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = 0
for i, char in enumerate(reversed(match.group(0))):
word = word + (ord(char)-161)*(95**i)
return symtab[word] or word
source = re.sub(ur'[\xa1-\xff]+', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
| {
"content_hash": "362b857b1d9fd9ef29967e80f01b3979",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 74,
"avg_line_length": 32.258620689655174,
"alnum_prop": 0.5825761624799572,
"repo_name": "rysson/filmkodi",
"id": "9760505678af90a63ef8de2b73e9ea07eda7b331",
"size": "2161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugin.video.mrknow/lib/utils/unpack95High.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
} |
"""
File: simpio.h
--------------
This file exports a set of functions that simplify input/output
operations in Python and provide some error-checking on console input.
Modified from Marty Stepp's CPP libraries.
@author sredmond
"""
GETINTEGER_DEFAULT_PROMPT = "Enter an integer: ";
GETINTEGER_DEFAULT_REPROMPT = "Illegal integer format. Try again.";
GETREAL_DEFAULT_PROMPT = "Enter a number: ";
GETREAL_DEFAULT_REPROMPT = "Illegal numeric format. Try again.";
GETPOSITIVEREAL_DEFAULT_PROMPT = "Enter a positive number: "
GETPOSITIVEREAL_DEFAULT_REPROMPT = "Illegal numeric format. Try again. "
GETYESORNO_DEFAULT_PROMPT = "Yes or No? ";
GETYESORNO_DEFAULT_REPROMPT = "Please type a word that starts with 'Y' or 'N': ";
DEFAULT_PROMPT = "> "
DEFAULT_REPROMPT = "Invalid input. Please try again. "
def get_integer(prompt=GETINTEGER_DEFAULT_PROMPT, reprompt=GETINTEGER_DEFAULT_REPROMPT):
return get_fn_cond(
lambda line: int(line.strip()),
lambda _: True,
prompt, reprompt
)
def get_real(prompt=GETREAL_DEFAULT_PROMPT, reprompt=GETREAL_DEFAULT_REPROMPT):
return get_fn_cond(
lambda line: float(line.strip()),
lambda _: True,
prompt, reprompt
)
def get_positive_real(prompt=GETPOSITIVEREAL_DEFAULT_PROMPT, reprompt=GETPOSITIVEREAL_DEFAULT_REPROMPT):
return get_fn_cond(
lambda line: float(line.strip()),
lambda val: val > 0,
prompt, reprompt
)
def get_yes_or_no(prompt=GETYESORNO_DEFAULT_PROMPT, reprompt=GETYESORNO_DEFAULT_REPROMPT, default=None):
result = get_fn_cond(
lambda line: line[0].upper() if line else default,
lambda val: val in ['Y', 'N'],
prompt, reprompt
)
return result == 'Y'
def get_fn_cond(fn, pred, prompt=DEFAULT_PROMPT, reprompt=DEFAULT_REPROMPT):
prompt = append_space(prompt)
while True:
line = input(prompt)
try:
out = fn(line)
except Exception:
pass
else:
if pred(out):
return out
if reprompt:
print(append_space(reprompt))
def get_line(prompt=None):
return input(append_space(prompt))
def append_space(prompt):
"""Adds a space to the end of the given string if none is present."""
if not prompt.endswith(' '):
return prompt + ' '
return prompt
| {
"content_hash": "d1e1bf474ae51a136c09d7ce959abade",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 104,
"avg_line_length": 29.4125,
"alnum_prop": 0.6578835529111772,
"repo_name": "sredmond/acmpy",
"id": "03f524b2327a0f87cddfbd915ddff1f360642f5b",
"size": "2380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campy/util/simpio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296890"
}
],
"symlink_target": ""
} |
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class MyHandler(FileSystemEventHandler):
def on_modified(self, event):
print "Got it!"
if __name__ =="__main__":
event_handler= MyHandler()
observer = Observer()
observer.schedule(event_handler, path="/home/pi/datalogger/loggerconfigs", recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| {
"content_hash": "2fba5cd03785d9c2820ca7a0e205f8ce",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 92,
"avg_line_length": 23.047619047619047,
"alnum_prop": 0.75,
"repo_name": "mauerflitza/Probieren2",
"id": "9529b00467ae47e94081ff1a8158301d2a4bba41",
"size": "502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "File_Event_Handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8546"
},
{
"name": "HTML",
"bytes": "9724"
},
{
"name": "JavaScript",
"bytes": "45176"
},
{
"name": "PHP",
"bytes": "1975"
},
{
"name": "Python",
"bytes": "14140"
}
],
"symlink_target": ""
} |
"""Tests for the elementbase class"""
import unittest
from starstruct.elementbase import ElementBase
# pylint: disable=line-too-long,invalid-name,no-self-use
class TestElementBase(unittest.TestCase):
"""ElementBase module tests"""
def test_valid(self):
"""Test field formats that are valid ElementBase elements."""
test_fields = [
('a', 'd'), # double
('b', 'f'), # float
('e', '?'), # bool: 0, 1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementBase.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementBase elements."""
test_fields = [
('a', '4x'), # 4 pad bytes
('b', 'z'), # invalid
('c', '1'), # invalid
('d', '9S'), # invalid (must be lowercase)
('e', 'b'), # signed byte: -128, 127
('f', 'H'), # unsigned short: 0, 65535
('g', '10s'), # 10 byte string
('h', 'L'), # unsigned long: 0, 2^32-1
('i', '/'), # invalid
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementBase.valid(field)
self.assertFalse(out)
| {
"content_hash": "3d296df34b7a13a287837e2909566b31",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 32.27272727272727,
"alnum_prop": 0.4936619718309859,
"repo_name": "sprout42/StarStruct",
"id": "0de7955e46eb8f4c739b0f2ae91091472b6a9017",
"size": "1444",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "starstruct/tests/test_elementbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9319"
},
{
"name": "Python",
"bytes": "168892"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GlobalRegion.icon_path'
db.add_column('location_globalregion', 'icon_path',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'GlobalRegion.icon_title'
db.add_column('location_globalregion', 'icon_title',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GlobalRegion.icon_path'
db.delete_column('location_globalregion', 'icon_path')
# Deleting field 'GlobalRegion.icon_title'
db.delete_column('location_globalregion', 'icon_title')
models = {
'location.country': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Country'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.GlobalRegion']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.globalregion': {
'Meta': {'ordering': "['name']", 'object_name': 'GlobalRegion'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'icon_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'icon_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Locality'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.RegionDistrict']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.place': {
'Meta': {'ordering': "['id']", 'object_name': 'Place'},
'australian_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'location.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'location.regiondistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'RegionDistrict'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.StateProvince']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.stateprovince': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'StateProvince'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.Country']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
}
}
complete_apps = ['location'] | {
"content_hash": "c9d3dd9439cb934214de21f378787522",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 176,
"avg_line_length": 72.25,
"alnum_prop": 0.5432525951557093,
"repo_name": "uq-eresearch/uqam",
"id": "24c074f3217e6db31441071dff497e01603f538e",
"size": "8116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "location/migrations/0005_auto__add_field_globalregion_icon_path__add_field_globalregion_icon_ti.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "117676"
},
{
"name": "HTML",
"bytes": "108660"
},
{
"name": "JavaScript",
"bytes": "977528"
},
{
"name": "Python",
"bytes": "1297328"
},
{
"name": "Shell",
"bytes": "24566"
}
],
"symlink_target": ""
} |
import argparse
import csv
import re
# parser definition
parser = argparse.ArgumentParser(prog='Make csv file from wikipedia data')
parser.add_argument('--data_dir', type=str, default='wikipedia/',
help='Local dir to the txt files.')
parser.add_argument('--output_path', type=str, default='wiki.csv',
help='Local path to the csv output.')
# constants
FILES = ['A_F.txt', 'G_M.txt', 'N_Z.txt']
def hex_to_rgb(hex_color):
hex_color = int(hex_color, 16)
r = (hex_color >> 16) & 0xFF
g = (hex_color >> 8) & 0xFF
b = hex_color & 0xFF
return (r, g, b)
def preprocess_name(name):
# keeping -, spaces, letters and numbers only
name = re.sub(r'[^a-zA-Z0-9 -]', r'', name)
# make all letters lower case
name = name.lower()
return name
def read_and_save_text(file_path, csv_writer):
with open(file_path, 'r') as f:
for line in f.readlines():
# between each name and color there's a \t#
name, hex_color = line.split('\t#')
hex_color = re.sub('\n', '', hex_color) # remove \n
name = preprocess_name(name)
r, g, b = hex_to_rgb(hex_color)
csv_writer.writerow([name, r, g, b])
def main():
try:
args = parser.parse_args()
except:
print(parser.print_help())
exit()
# get args
data_dir = args.data_dir
output_path = args.output_path
# create csv writer for the output
output_file = open(output_path, 'a+')
csv_writer = csv.writer(output_file)
# read each wikipedia file and save in the csv
for file_name in FILES:
file_path = data_dir + file_name
read_and_save_text(file_path, csv_writer)
if __name__ == '__main__':
main()
| {
"content_hash": "bfbfb584d60d56ef93f5ccc796e7cac6",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 24.17391304347826,
"alnum_prop": 0.6163069544364509,
"repo_name": "random-forests/tensorflow-workshop",
"id": "61847ed1ccda619cc57acb54de3e1f9c50b7fcb7",
"size": "1668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "archive/extras/colorbot/data/aggregate_wiki.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8519160"
},
{
"name": "Python",
"bytes": "24893"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
"""
Server side functions for tagging.
- Tags can be added to any record (doctype, name) in the system.
- Items are filtered by tags
- Top tags are shown in the sidebar (?)
- Tags are also identified by the tag_fields property of the DocType
Discussion:
Tags are shown in the docbrowser and ideally where-ever items are searched.
There should also be statistics available for tags (like top tags etc)
Design:
- free tags (user_tags) are stored in __user_tags
- doctype tags are set in tag_fields property of the doctype
- top tags merges the tags from both the lists (only refreshes once an hour (max))
"""
import webnotes
from webnotes.utils import load_json
def check_user_tags(dt):
"if the user does not have a tags column, then it creates one"
try:
webnotes.conn.sql("select `_user_tags` from `tab%s` limit 1" % dt)
except Exception, e:
if e.args[0] == 1054:
DocTags(dt).setup()
@webnotes.whitelist()
def add_tag():
"adds a new tag to a record, and creates the Tag master"
f = webnotes.form_dict
tag, color = f.get('tag'), f.get('color')
dt, dn = f.get('dt'), f.get('dn')
DocTags(dt).add(dn, tag)
return tag
@webnotes.whitelist()
def remove_tag():
"removes tag from the record"
f = webnotes.form_dict
tag, dt, dn = f.get('tag'), f.get('dt'), f.get('dn')
DocTags(dt).remove(dn, tag)
class DocTags:
"""Tags for a particular doctype"""
def __init__(self, dt):
self.dt = dt
def get_tag_fields(self):
"""returns tag_fields property"""
return webnotes.conn.get_value('DocType', self.dt, 'tag_fields')
def get_tags(self, dn):
"""returns tag for a particular item"""
return webnotes.conn.get_value(self.dt, dn, '_user_tags', ignore=1) or ''
def add(self, dn, tag):
"""add a new user tag"""
tl = self.get_tags(dn).split(',')
if not tag in tl:
tl.append(tag)
self.update(dn, tl)
def remove(self, dn, tag):
"""remove a user tag"""
tl = self.get_tags(dn).split(',')
self.update(dn, filter(lambda x:x!=tag, tl))
def remove_all(self, dn):
"""remove all user tags (call before delete)"""
self.update(dn, [])
def update(self, dn, tl):
"""updates the _user_tag column in the table"""
if not tl:
tags = ''
else:
tl = list(set(filter(lambda x: x, tl)))
tags = ',' + ','.join(tl)
try:
webnotes.conn.sql("update `tab%s` set _user_tags=%s where name=%s" % \
(self.dt,'%s','%s'), (tags , dn))
except Exception, e:
if e.args[0]==1054:
if not tags:
# no tags, nothing to do
return
self.setup()
self.update(dn, tl)
else: raise e
def setup(self):
"""adds the _user_tags column if not exists"""
webnotes.conn.commit()
webnotes.conn.sql("alter table `tab%s` add column `_user_tags` varchar(180)" % self.dt)
webnotes.conn.begin()
| {
"content_hash": "8c790a4272cf6028cd975487c7db9bb6",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 89,
"avg_line_length": 25.36936936936937,
"alnum_prop": 0.6502130681818182,
"repo_name": "gangadhar-kadam/sapphite_lib",
"id": "00776f2913fb6cede004b65e9536d6017e02bf69",
"size": "2904",
"binary": false,
"copies": "3",
"ref": "refs/heads/1310",
"path": "webnotes/widgets/tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74826"
},
{
"name": "HTML",
"bytes": "36644"
},
{
"name": "JavaScript",
"bytes": "1134668"
},
{
"name": "Python",
"bytes": "563769"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as la
import sys
sys.path.append('..')
import uncompiled_floq.helpers as h
import uncompiled_floq.blockmatrix as bm
import uncompiled_floq.fixed_system as fs
import uncompiled_floq.errors as errors
import itertools
import copy
import cmath
def get_u(hf, params):
"""
Calculate the time evolution operator U
given a Fourier transformed Hamiltonian Hf
"""
k = assemble_k(hf, params)
vals, vecs = find_eigensystem(k, params)
phi = calculate_phi(vecs)
psi = calculate_psi(vecs, params)
return calculate_u(phi, psi, vals, params)
def get_u_and_du(hf, dhf, params):
"""
Calculate the time evolution operator U
given a Fourier transformed Hamiltonian Hf,
as well as its derivative dU given dHf
"""
k = assemble_k(hf, params)
vals, vecs = find_eigensystem(k, params)
phi = calculate_phi(vecs)
psi = calculate_psi(vecs, params)
u = calculate_u(phi, psi, vals, params)
dk = assemble_dk(dhf, params)
du = calculate_du(dk, psi, vals, vecs, params)
return [u, du]
def assemble_k(hf, p):
hf_max = (p.nc-1)/2
nz = p.nz
nc = p.nc
dim = p.dim
omega = p.omega
k = np.zeros([p.k_dim, p.k_dim], dtype='complex128')
# Assemble K by placing each component of Hf in turn, which
# for a fixed Fourier index lie on diagonals, with 0 on the
# main diagonal, positive numbers on the right and negative on the left
#
# The first row is therefore essentially Hf(0) Hf(1) ... Hf(hf_max) 0 0 0 ...
# The last row is then ... 0 0 0 Hf(-hf_max) ... Hf(0)
# Note that the main diagonal acquires a factor of omega*identity*(row/column number)
for n in xrange(-hf_max, hf_max+1):
start_row = max(0, n) # if n < 0, start at row 0
start_col = max(0, -n) # if n > 0, start at col 0
stop_row = min((nz-1)+n, nz-1)
stop_col = min((nz-1)-n, nz-1)
row = start_row
col = start_col
current_component = hf[h.n_to_i(n, nc)]
while row <= stop_row and col <= stop_col:
if n == 0:
block = current_component + np.identity(dim)*omega*h.i_to_n(row, nz)
bm.set_block_in_matrix(block, k, dim, nz, row, col)
else:
bm.set_block_in_matrix(current_component, k, dim, nz, row, col)
row += 1
col += 1
return k
def assemble_dk(dhf, p):
p2 = copy.copy(p)
p2.omega = 0.0
return np.array([assemble_k(dhf[i], p2) for i in xrange(0, p.np)])
def find_eigensystem(k, p):
# Find eigenvalues and eigenvectors for k,
# identify the dim unique ones,
# return them in a segmented form
vals, vecs = compute_eigensystem(k, p)
unique_vals = find_unique_vals(vals, p)
vals = vals.round(p.decimals)
indices_unique_vals = [np.where(vals == eva)[0][0] for eva in unique_vals]
unique_vecs = np.array([vecs[:, i] for i in indices_unique_vals])
unique_vecs = separate_components(unique_vecs, p.nz)
return [unique_vals, unique_vecs]
def compute_eigensystem(k, p):
# Find eigenvalues and eigenvectors of k,
# using the method specified in the parameters
if p.sparse:
k = sp.csc_matrix(k)
number_of_eigs = min(2*p.dim, p.k_dim)
vals, vecs = la.eigs(k, k=number_of_eigs, sigma=0.0)
else:
vals, vecs = np.linalg.eig(k)
vals, vecs = trim_eigensystem(vals, vecs, p)
vals = vals.real.astype(np.float64, copy=False)
return vals, vecs
def trim_eigensystem(vals, vecs, p):
# Trim eigenvalues and eigenvectors to only 2*dim ones
# clustered around zero
# Sort eigenvalues and -vectors in increasing order
idx = vals.argsort()
vals = vals[idx]
vecs = vecs[:, idx]
# Only keep values around 0
middle = p.k_dim/2
cutoff_left = max(0, middle - p.dim)
cutoff_right = min(p.k_dim, cutoff_left + 2*p.dim)
cut_vals = vals[cutoff_left:cutoff_right]
cut_vecs = vecs[:, cutoff_left:cutoff_right]
return cut_vals, cut_vecs
def find_unique_vals(vals, p):
# In the list of values supplied, find the set of dim
# e_i that fulfil (e_i - e_j) mod omega != 0 for all i,j,
# and that lie closest to 0.
mod_vals = np.mod(vals, p.omega)
mod_vals = mod_vals.round(decimals=p.decimals) # round to suppress floating point issues
unique_vals = np.unique(mod_vals)
# the unique_vals are ordered and >= 0, but we'd rather have them clustered around 0
should_be_negative = np.where(unique_vals > p.omega/2.)
unique_vals[should_be_negative] = (unique_vals[should_be_negative]-p.omega).round(p.decimals)
if unique_vals.shape[0] != p.dim:
raise errors.EigenvalueNumberError(vals, unique_vals)
else:
return np.sort(unique_vals)
def separate_components(vecs, n):
# Given an array of vectors vecs,
# return an array of each of the vectors split into n sub-arrays
return np.array([np.split(eva, n) for eva in vecs])
def calculate_phi(vecs):
# Given an array of eigenvectors vecs,
# sum over all frequency components in each
return np.array([np.sum(eva, axis=0) for eva in vecs])
def calculate_psi(vecs, p):
# Given an array of eigenvectors vecs,
# sum over all frequency components in each,
# weighted by exp(- i omega t n), with n
# being the Fourier index of the component
psi = np.zeros([p.dim, p.dim], dtype='complex128')
for k in xrange(0, p.dim):
partial = np.zeros(p.dim, dtype='complex128')
for i in xrange(0, p.nz):
num = h.i_to_n(i, p.nz)
partial += np.exp(1j*p.omega*p.t*num)*vecs[k][i]
psi[k, :] = partial
return psi
def calculate_u(phi, psi, energies, p):
u = np.zeros([p.dim, p.dim], dtype='complex128')
for k in xrange(0, p.dim):
u += np.exp(-1j*p.t*energies[k])*np.outer(psi[k], np.conj(phi[k]))
return u
def calculate_du(dk, psi, vals, vecs, p):
dim = p.dim
nz_max = p.nz_max
nz = p.nz
npm = p.np
omega = p.omega
t = p.t
vecsstar = np.conj(vecs)
du = np.zeros([npm, dim, dim], dtype='complex128')
# (i1,n1) & (i2,n2) iterate over the full spectrum of k:
# i1, i2: unique eigenvalues/-vectors in 0th Brillouin zone
# n1, n2: related vals/vecs derived by shifting with those offsets (lying in the nth BZs)
uniques = xrange(0, dim)
offsets = xrange(-nz_max, nz_max+1)
alphas = np.empty([npm, 2*nz+1, dim, dim], dtype=np.complex128)
for dn in xrange(-nz_max*2, 2*nz_max+1):
idn = h.n_to_i(dn, 2*nz)
for i1, i2 in itertools.product(uniques, uniques):
v1 = np.roll(vecsstar[i1], dn, axis=0)
for c in xrange(0, npm):
alphas[c, idn, i1, i2] = (integral_factors(vals[i1], vals[i2], dn, omega, t) *
expectation_value(dk[c], v1, vecs[i2]))
for n2 in offsets:
for i1, i2 in itertools.product(uniques, uniques):
product = np.outer(psi[i1], vecsstar[i2, h.n_to_i(-n2, nz)])
for n1 in offsets:
idn = h.n_to_i(n1-n2, 2*nz)
for c in xrange(0, npm):
du[c] += alphas[c, idn, i1, i2]*product
return du
def integral_factors(e1, e2, dn, omega, t):
if e1 == e2 and dn == 0:
return -1.0j*t*cmath.exp(-1j*t*e1)
else:
return (cmath.exp(-1j*t*e1)-cmath.exp(-1j*t*(e2-omega*dn)))/((e1-e2+omega*dn))
def expectation_value(dk, v1, v2):
a = v1.flatten()
b = v2.flatten()
return np.dot(np.dot(a, dk), b)
| {
"content_hash": "8645fb2b60c9c41388125534ec6a58f0",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 97,
"avg_line_length": 28.176470588235293,
"alnum_prop": 0.6052974947807933,
"repo_name": "sirmarcel/floq",
"id": "1a0e32dcef9549f02a6fd968f816398c4c1ab714",
"size": "7664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark/museum_of_evolution/p4/evolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226290"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import multiprocessing
bind = "127.0.0.1:8000"
workers = multiprocessing.cpu_count() * 2 + 1
errorlog = "/opt/adcap.biz/log/adcap.biz_error.log"
loglevel = "info"
preload_app = True
| {
"content_hash": "1ebc469421eeaab43a8ec8e2d0979fff",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 51,
"avg_line_length": 28,
"alnum_prop": 0.7232142857142857,
"repo_name": "RobSpectre/salt-states",
"id": "0726c7f9eafc036248ff44f5bd2dcfcf3748e71d",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adcap/gunicorn.conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "657"
},
{
"name": "PHP",
"bytes": "34"
},
{
"name": "Python",
"bytes": "33097"
},
{
"name": "SaltStack",
"bytes": "92378"
},
{
"name": "Shell",
"bytes": "25504"
},
{
"name": "Vim script",
"bytes": "2147"
}
],
"symlink_target": ""
} |
"""Subfunction for the Google command line tool, GoogleCL.
This function handles the authentication and storage of
credentials for the services which use OAuth2
"""
import httplib2
import logging
import pickle
import os
import googlecl
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
LOG = logging.getLogger(googlecl.LOGGER_NAME)
TOKENS_FILENAME_FORMAT = 'dca_%s_%s'
def authenticate(email, servicename, doc, http, client_id,
client_secret, force_auth=False):
""" Authenticates an provided http object,
Prompts for user confirmation if necessary, and stores the credentials
Args:
email: The email address of the user
servicename: The service which requires authentication
doc: Documentation for the service (for determining scopes)
http: The object being authenticated
Returns:
The authorized object
"""
tokens_path = googlecl.get_data_path(TOKENS_FILENAME_FORMAT %
(email, servicename),
create_missing_dir=True)
storage = Storage(tokens_path)
credentials = storage.get()
if credentials is None or credentials.invalid or force_auth:
# Works with google-api-python-client-1.0beta2, but not with
# beta4. They're working on a way to allow deleting credentials.
#storage.put(None)
desiredcred = ""
for arg in doc['auth']['oauth2']['scopes']:
desiredcred = desiredcred + arg + ' '
FLOW = OAuth2WebServerFlow(client_id, client_secret,
scope=desiredcred, user_agent='discoverycl')
credentials = run(FLOW, storage)
return credentials.authorize(http)
| {
"content_hash": "36625ee91479051185b8171b8f48e5ec",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 72,
"avg_line_length": 34.673469387755105,
"alnum_prop": 0.7098293113596233,
"repo_name": "charany1/googlecl",
"id": "8fca3b14304e47830af43008a242d917144846a2",
"size": "2281",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/googlecl/discovery/authentication.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import functools
import os
import pytest
from limejudge import anscompare
def test_compare_no_extra_ws(tmpdir):
outfile = tmpdir.join('outfile')
ansfile = tmpdir.join('ansfile')
outname = str(outfile)
ansname = str(ansfile)
nonexistent = os.path.join(str(tmpdir), 'nonexistent')
cmp_noextraws = functools.partial(anscompare.compare, 'no-extra-ws')
# Same content
outfile.write('1234abcd\nefgh5678\n')
ansfile.write('1234abcd\nefgh5678\n')
assert cmp_noextraws(None, outname, ansname, 42) == 42
assert cmp_noextraws('ignored', outname, ansname, 42) == 42
# Different content
outfile.write('1234abcd\nefgh5678\n')
ansfile.write('1234abcd\nefgh5679\n')
assert cmp_noextraws(None, outname, ansname, 42) == 0
# Ignore trailing whitespace
outfile.write('1234abcd \t\r\nefgh')
ansfile.write('1234abcd\nefgh\n\r\n')
assert cmp_noextraws(None, outname, ansname, 42) == 42
# Don't ignore leading whitespace
outfile.write(' 1234abcd\n')
ansfile.write('1234abcd\n')
assert cmp_noextraws(None, outname, ansname, 42) == 0
# Non-existent files
assert cmp_noextraws(None, nonexistent, ansname, 42) == 0
assert cmp_noextraws(None, outname, nonexistent, 42) == 0
assert cmp_noextraws(None, '', '', 42) == 0
def test_compare_spj(tmpdir):
infile = tmpdir.join('infile')
outfile = tmpdir.join('outfile')
ansfile = tmpdir.join('ansfile')
inname = str(infile)
outname = str(outfile)
ansname = str(ansfile)
spjpath = os.path.join(os.path.dirname(__file__), 'progs', 'spj.py')
relspjpath = os.path.relpath(spjpath)
cmp_spj = functools.partial(anscompare.compare,
'special-judge:' + spjpath)
cmp_relspj = functools.partial(anscompare.compare,
'special-judge:' + relspjpath)
# Call special judge with absolute or relative path
infile.write('input')
ansfile.write('answer')
outfile.write('score 5')
assert cmp_spj(inname, outname, ansname, 42) == 5
assert cmp_relspj(inname, outname, ansname, 42) == 5
# Input is checked correctly
infile.write('bad-input')
ansfile.write('answer')
outfile.write('score 5')
assert cmp_spj(inname, outname, ansname, 42) == 0
# Answer is checked correctly
infile.write('input')
ansfile.write('bad-answer')
outfile.write('score 5')
assert cmp_spj(inname, outname, ansname, 42) == 0
# Output is checked correctly
infile.write('input')
ansfile.write('answer')
outfile.write('bad-score 5')
assert cmp_spj(inname, outname, ansname, 42) == 0
# Scores out of bound is returned as-is
infile.write('input')
ansfile.write('answer')
outfile.write('score -1')
assert cmp_spj(inname, outname, ansname, 42) == -1
infile.write('input')
ansfile.write('answer')
outfile.write('score 43')
assert cmp_spj(inname, outname, ansname, 42) == 43
def test_compare_invalid():
with pytest.raises(ValueError) as err:
anscompare.compare('invalid-comp-type', None, None, None, 0)
assert 'Unknown comparison type:' in str(err)
| {
"content_hash": "e7d333e00a746b353a375e5b2b330fe6",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 72,
"avg_line_length": 31.71,
"alnum_prop": 0.6549984232103437,
"repo_name": "yyt16384/limejudge",
"id": "44d7d45c96a80c687d4fa48905f344c897755ee2",
"size": "3171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_anscompare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1090"
},
{
"name": "Python",
"bytes": "50806"
}
],
"symlink_target": ""
} |
def bigend_24b(self, b1, b2, b3):
"""This function converts big endian bytes to readable value 0~255"""
b1 = int(b1, 16)
b2 = int(b2, 16)
b3 = int(b3, 16)
return (b1*256*256) + (256*b2) + b3
#return int(b1,16)*65536+int(b2,16)*256+int(b3,16)
class Parser(object):
"""The parser class of data from the Mindwave
It parsers the data according to the mindwave protocol
"""
def __init__(self, headset, stream):
self.headset = headset
self.stream = stream
self.buffer = []
def __call__(self):
return self
def print_bytes(self, data):
"""Print bytes"""
for b in data:
print '0x%s, ' % b.encode('hex'),
def parser(self, data):
"""This method parse a chunk of bytes
It splits the incoming bytes in payload data
"""
# settings = self.stream.getSettingsDict()
# for i in xrange(2):
# settings['rtscts'] = not settings['rtscts']
# self.stream.applySettingsDict(settings)
while len(data)> 1 :
try:
byte1, byte2 = data[0], data[1]
# SYNC | SYNC | PLENGTH | PAYLOAD | CHKSUM
# PAYLOAD: (EXCODE) | CODE |(VLENGTH) | VALUE
if byte1 == Bytes.SYNC and byte2 == Bytes.SYNC:
#self.buffer.append(byte1)
#self.buffer.append(byte2)
data = data[2:]
while True:
plength = data[0] # 0-169
#self.buffer.append(plength)
plength = ord(plength)
if plength != 170:
break
if plength > 170:
pass #continue
data = data[1:]
payload = data[:plength]
checksum = 0
checksum = sum(ord(b) for b in payload[:-1])
checksum &= 0xff
checksum = ~checksum & 0xff
chksum = data[plength]
if checksum != ord(chksum):
pass
self.parser_payload(payload)
#self.buffer.append(chksum)
data = data[plength+1:]
# for b in self.buffer:
# if not b == "":
# print '0x%s, ' % b.encode('hex'),
# print ""
#self.buffer = []
else:
data = data[1:]
except IndexError, e:
pass
def parser_payload(self, payload):
"""This method gets the eMeter values
It receives the data payload and parse it to find Concentration and Meditation values
"""
while payload:
try:
code, payload = payload[0], payload[1:]
except IndexError:
pass
#self.buffer.append(code)
# multibytes
if ord(code) >= 0x80:
try:
vlength, payload = payload[0], payload[1:]
value, payload = payload[:ord(vlength)], payload[ord(vlength):]
except IndexError:
pass
#self.buffer.append(vlength)
if code == BytesStatus.RESPONSE_CONNECTED:
# headset found
# format: 0xaa 0xaa 0x04 0xd0 0x02 0x05 0x05 0x23
self.headset.status = Status.CONNECTED
self.headset.id = value
elif code == BytesStatus.RESPONSE_NOFOUND: # it can be 0 or 2 bytes
# headset no found
# format: 0xaa 0xaa 0x04 0xd1 0x02 0x05 0x05 0xf2
self.headset.status = Status.NOFOUND
# 0xAA 0xAA 0x02 0xD1 0x00 0xD9
elif code == BytesStatus.RESPONSE_DISCONNECTED: # dongle send 4 bytes
# headset found
# format: 0xaa 0xaa 0x04 0xd2 0x02 0x05 0x05 0x21
self.headset.status = Status.DISCONNECTED
elif code == BytesStatus.RESPONSE_REQUESTDENIED:
# headset found
# format: 0xaa 0xaa 0x02 0xd3 0x00 0x2c
self.headset.status = Status.DENIED
elif code == 0xd4: # waiting for a command the device send a byte 0x00
# standby/scanning mode
# format: 0xaa 0xaa 0x03 0xd4 0x01 0x00 0x2a
print 'scanning'
self.headset.status = Status.STANDBY
elif code == Bytes.RAW_VALUE:
hight = value[0]
low = value[1]
#self.buffer.append(hight)
#self.buffer.append(low)
self.headset.raw_value = ord(hight)*255+ord(low)
#self.headset.raw_value = int(hight, 16)*256+int(low, 16)
if self.headset.raw_value > 32768:
self.headset.raw_value = self.headset.raw_value - 65536
elif code == Bytes.ASIC_EEG_POWER:
# ASIC_EEG_POWER_INT
# delta, theta, low-alpha, high-alpha, low-beta, high-beta,
# low-gamma, high-gamma
self.headset.asig_eeg_power = []
#print "length egg_power:", len(value)
#for i in range(8):
# self.headset.asig_eeg_power.append(
# bigend_24b(value[i], value[i+1], value[i+2]))
else: #unknow multibyte
pass
else:
# single byte there isn't vlength
# 0-127
value, payload = payload[0], payload[1:]
#self.buffer.append(value)
if code == Bytes.POOR_SIGNAL:
self.headset.signal = ord(value) #int(value,16)
elif code == Bytes.ATTENTION:
self.headset.attention = ord(value) # int(value,16) # ord(value)
elif code == Bytes.MEDITATION:
self.headset.meditation = ord(value) #int(value,16) #ord(value)
elif code == Bytes.BLINK:
self.headset.blink = ord(value) #int(value,16) # ord(value)
else:
pass
from common import *
| {
"content_hash": "410c6a95c2a418f1a0cc378074f03f36",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 101,
"avg_line_length": 36.55913978494624,
"alnum_prop": 0.44735294117647056,
"repo_name": "osrf/gsoc-ros-neural",
"id": "0c8f816fd66a0135e636e65c604f1afefdbcdf20",
"size": "6801",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mindwave_driver/src/mindwave_driver/parser.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "14785"
},
{
"name": "CMake",
"bytes": "3562"
},
{
"name": "Python",
"bytes": "31421"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import pytest
from .utils import create_test_db # noqa
from mandroobi.models.metrics import ClosingBalance, Metric
def test_metric():
metric = Metric()
metric.account_id = '50000'
metric.accounting_period_id = '19991231'
metric.business_unit_id = 'NORTH'
metric.driver_id = 'SALES_VOLUME'
metric.currency_id = 'NO_CURRENCY'
metric.scenario_id = 'PLAN'
def test_closing_balance():
closing_balance = ClosingBalance()
closing_balance.closing_balance = 1000.50
@pytest.mark.usefixtures('create_test_db')
def test_closing_balance_inc_lia_equ_reverse_sign():
closing_balance = ClosingBalance.create(
account_id='20000',
accounting_period_id='20170101',
business_unit_id='TESTINO_LLC',
driver_id='NO_DRIVER',
currency_id='USD',
scenario_id='ACTUAL',
amount=100000.00
)
assert closing_balance.amount == -100000.00
# Sign also gets persisted
closing_balance = ClosingBalance.where(account_id='20000').first()
assert closing_balance.amount == -100000.00
| {
"content_hash": "b570fbd0b6f8f33890961be245b47bba",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 28.435897435897434,
"alnum_prop": 0.678990081154193,
"repo_name": "ivansabik/mandroobi",
"id": "bbf6c13f2218f77fdf2eddb4cf9852467f3c9d4a",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "31432"
}
],
"symlink_target": ""
} |
import random
import re
import unicodedata
from eventlet import greenthread
from oslo.utils import excutils
from oslo.utils import strutils
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume.drivers.ibm.storwize_svc import ssh as storwize_ssh
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class StorwizeHelpers(object):
# All the supported QoS key are saved in this dict. When a new
# key is going to add, three values MUST be set:
# 'default': to indicate the value, when the parameter is disabled.
# 'param': to indicate the corresponding parameter in the command.
# 'type': to indicate the type of this value.
svc_qos_keys = {'IOThrottling': {'default': '0',
'param': 'rate',
'type': int}}
def __init__(self, run_ssh):
self.ssh = storwize_ssh.StorwizeSSH(run_ssh)
self.check_fcmapping_interval = 3
@staticmethod
def handle_keyerror(cmd, out):
msg = (_('Could not find key in output of command %(cmd)s: %(out)s')
% {'out': out, 'cmd': cmd})
raise exception.VolumeBackendAPIException(data=msg)
def compression_enabled(self):
"""Return whether or not compression is enabled for this system."""
resp = self.ssh.lslicense()
keys = ['license_compression_enclosures',
'license_compression_capacity']
for key in keys:
if resp.get(key, '0') != '0':
return True
return False
def get_system_info(self):
"""Return system's name, ID, and code level."""
resp = self.ssh.lssystem()
level = resp['code_level']
match_obj = re.search('([0-9].){3}[0-9]', level)
if match_obj is None:
msg = _('Failed to get code level (%s).') % level
raise exception.VolumeBackendAPIException(data=msg)
code_level = match_obj.group().split('.')
return {'code_level': tuple([int(x) for x in code_level]),
'system_name': resp['name'],
'system_id': resp['id']}
def get_pool_attrs(self, pool):
"""Return attributes for the specified pool."""
return self.ssh.lsmdiskgrp(pool)
def get_available_io_groups(self):
"""Return list of available IO groups."""
iogrps = []
resp = self.ssh.lsiogrp()
for iogrp in resp:
try:
if int(iogrp['node_count']) > 0:
iogrps.append(int(iogrp['id']))
except KeyError:
self.handle_keyerror('lsiogrp', iogrp)
except ValueError:
msg = (_('Expected integer for node_count, '
'svcinfo lsiogrp returned: %(node)s') %
{'node': iogrp['node_count']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return iogrps
def get_node_info(self):
"""Return dictionary containing information on system's nodes."""
nodes = {}
resp = self.ssh.lsnode()
for node_data in resp:
try:
if node_data['status'] != 'online':
continue
node = {}
node['id'] = node_data['id']
node['name'] = node_data['name']
node['IO_group'] = node_data['IO_group_id']
node['iscsi_name'] = node_data['iscsi_name']
node['WWNN'] = node_data['WWNN']
node['status'] = node_data['status']
node['WWPN'] = []
node['ipv4'] = []
node['ipv6'] = []
node['enabled_protocols'] = []
nodes[node['id']] = node
except KeyError:
self.handle_keyerror('lsnode', node_data)
return nodes
def add_iscsi_ip_addrs(self, storage_nodes):
"""Add iSCSI IP addresses to system node information."""
resp = self.ssh.lsportip()
for ip_data in resp:
try:
state = ip_data['state']
if ip_data['node_id'] in storage_nodes and (
state == 'configured' or state == 'online'):
node = storage_nodes[ip_data['node_id']]
if len(ip_data['IP_address']):
node['ipv4'].append(ip_data['IP_address'])
if len(ip_data['IP_address_6']):
node['ipv6'].append(ip_data['IP_address_6'])
except KeyError:
self.handle_keyerror('lsportip', ip_data)
def add_fc_wwpns(self, storage_nodes):
"""Add FC WWPNs to system node information."""
for key in storage_nodes:
node = storage_nodes[key]
resp = self.ssh.lsnode(node_id=node['id'])
wwpns = set(node['WWPN'])
for i, s in resp.select('port_id', 'port_status'):
if 'unconfigured' != s:
wwpns.add(i)
node['WWPN'] = list(wwpns)
LOG.info(_LI('WWPN on node %(node)s: %(wwpn)s')
% {'node': node['id'], 'wwpn': node['WWPN']})
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
chap_secret = utils.generate_password()
self.ssh.add_chap_secret(chap_secret, host_name)
return chap_secret
def get_chap_secret_for_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
resp = self.ssh.lsiscsiauth()
host_found = False
for host_data in resp:
try:
if host_data['name'] == host_name:
host_found = True
if host_data['iscsi_auth_method'] == 'chap':
return host_data['iscsi_chap_secret']
except KeyError:
self.handle_keyerror('lsiscsiauth', host_data)
if not host_found:
msg = _('Failed to find host %s') % host_name
raise exception.VolumeBackendAPIException(data=msg)
return None
def get_conn_fc_wwpns(self, host):
wwpns = set()
resp = self.ssh.lsfabric(host=host)
for wwpn in resp.select('local_wwpn'):
if wwpn is not None:
wwpns.add(wwpn)
return list(wwpns)
def get_host_from_connector(self, connector):
"""Return the Storwize host described by the connector."""
LOG.debug('enter: get_host_from_connector: %s' % connector)
# If we have FC information, we have a faster lookup option
host_name = None
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
resp = self.ssh.lsfabric(wwpn=wwpn)
for wwpn_info in resp:
try:
if (wwpn_info['remote_wwpn'] and
wwpn_info['name'] and
wwpn_info['remote_wwpn'].lower() ==
wwpn.lower()):
host_name = wwpn_info['name']
except KeyError:
self.handle_keyerror('lsfabric', wwpn_info)
if host_name:
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
return host_name
# That didn't work, so try exhaustive search
hosts_info = self.ssh.lshost()
found = False
for name in hosts_info.select('name'):
resp = self.ssh.lshost(host=name)
if 'initiator' in connector:
for iscsi in resp.select('iscsi_name'):
if iscsi == connector['initiator']:
host_name = name
found = True
break
elif 'wwpns' in connector and len(connector['wwpns']):
connector_wwpns = [str(x).lower() for x in connector['wwpns']]
for wwpn in resp.select('WWPN'):
if wwpn and wwpn.lower() in connector_wwpns:
host_name = name
found = True
break
if found:
break
LOG.debug('leave: get_host_from_connector: host %s' % host_name)
return host_name
def create_host(self, connector):
"""Create a new host on the storage system.
We create a host name and associate it with the given connection
information. The host name will be a cleaned up version of the given
host name (at most 55 characters), plus a random 8-character suffix to
avoid collisions. The total length should be at most 63 characters.
"""
LOG.debug('enter: create_host: host %s' % connector['host'])
# Before we start, make sure host name is a string and that we have at
# least one port.
host_name = connector['host']
if not isinstance(host_name, six.string_types):
msg = _('create_host: Host name is not unicode or string')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
ports = []
if 'initiator' in connector:
ports.append(['initiator', '%s' % connector['initiator']])
if 'wwpns' in connector:
for wwpn in connector['wwpns']:
ports.append(['wwpn', '%s' % wwpn])
if not len(ports):
msg = _('create_host: No initiators or wwpns supplied.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build a host name for the Storwize host - first clean up the name
if isinstance(host_name, unicode):
host_name = unicodedata.normalize('NFKD', host_name).encode(
'ascii', 'replace').decode('ascii')
for num in range(0, 128):
ch = str(chr(num))
if not ch.isalnum() and ch not in [' ', '.', '-', '_']:
host_name = host_name.replace(ch, '-')
# Storwize doesn't like hostname that doesn't starts with letter or _.
if not re.match('^[A-Za-z]', host_name):
host_name = '_' + host_name
# Add a random 8-character suffix to avoid collisions
rand_id = str(random.randint(0, 99999999)).zfill(8)
host_name = '%s-%s' % (host_name[:55], rand_id)
# Create a host with one port
port = ports.pop(0)
self.ssh.mkhost(host_name, port[0], port[1])
# Add any additional ports to the host
for port in ports:
self.ssh.addhostport(host_name, port[0], port[1])
LOG.debug('leave: create_host: host %(host)s - %(host_name)s' %
{'host': connector['host'], 'host_name': host_name})
return host_name
def delete_host(self, host_name):
self.ssh.rmhost(host_name)
def map_vol_to_host(self, volume_name, host_name, multihostmap):
"""Create a mapping between a volume to a host."""
LOG.debug('enter: map_vol_to_host: volume %(volume_name)s to '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
# Check if this volume is already mapped to this host
mapped = False
luns_used = []
result_lun = '-1'
resp = self.ssh.lshostvdiskmap(host_name)
for mapping_info in resp:
luns_used.append(int(mapping_info['SCSI_id']))
if mapping_info['vdisk_name'] == volume_name:
mapped = True
result_lun = mapping_info['SCSI_id']
if not mapped:
# Find unused lun
luns_used.sort()
result_lun = str(len(luns_used))
for index, n in enumerate(luns_used):
if n > index:
result_lun = str(index)
break
self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun,
multihostmap)
LOG.debug('leave: map_vol_to_host: LUN %(result_lun)s, volume '
'%(volume_name)s, host %(host_name)s' %
{'result_lun': result_lun,
'volume_name': volume_name,
'host_name': host_name})
return int(result_lun)
def unmap_vol_from_host(self, volume_name, host_name):
"""Unmap the volume and delete the host if it has no more mappings."""
LOG.debug('enter: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
# Check if the mapping exists
resp = self.ssh.lsvdiskhostmap(volume_name)
if not len(resp):
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to any host found.') %
{'vol_name': volume_name})
return
if host_name is None:
if len(resp) > 1:
LOG.warning(_LW('unmap_vol_from_host: Multiple mappings of '
'volume %(vol_name)s found, no host '
'specified.') % {'vol_name': volume_name})
return
else:
host_name = resp[0]['host_name']
else:
found = False
for h in resp.select('host_name'):
if h == host_name:
found = True
if not found:
LOG.warning(_LW('unmap_vol_from_host: No mapping of volume '
'%(vol_name)s to host %(host)s found.') %
{'vol_name': volume_name, 'host': host_name})
# We now know that the mapping exists
self.ssh.rmvdiskhostmap(host_name, volume_name)
# If this host has no more mappings, delete it
resp = self.ssh.lshostvdiskmap(host_name)
if not len(resp):
self.delete_host(host_name)
LOG.debug('leave: unmap_vol_from_host: volume %(volume_name)s from '
'host %(host_name)s'
% {'volume_name': volume_name, 'host_name': host_name})
@staticmethod
def build_default_opts(config):
# Ignore capitalization
protocol = config.storwize_svc_connection_protocol
if protocol.lower() == 'fc':
protocol = 'FC'
elif protocol.lower() == 'iscsi':
protocol = 'iSCSI'
cluster_partner = config.storwize_svc_stretched_cluster_partner
opt = {'rsize': config.storwize_svc_vol_rsize,
'warning': config.storwize_svc_vol_warning,
'autoexpand': config.storwize_svc_vol_autoexpand,
'grainsize': config.storwize_svc_vol_grainsize,
'compression': config.storwize_svc_vol_compression,
'easytier': config.storwize_svc_vol_easytier,
'protocol': protocol,
'multipath': config.storwize_svc_multipath_enabled,
'iogrp': config.storwize_svc_vol_iogrp,
'qos': None,
'stretched_cluster': cluster_partner,
'replication': False}
return opt
@staticmethod
def check_vdisk_opts(state, opts):
# Check that rsize is either -1 or between 0 and 100
if not (opts['rsize'] >= -1 and opts['rsize'] <= 100):
raise exception.InvalidInput(
reason=_('Illegal value specified for storwize_svc_vol_rsize: '
'set to either a percentage (0-100) or -1'))
# Check that warning is either -1 or between 0 and 100
if not (opts['warning'] >= -1 and opts['warning'] <= 100):
raise exception.InvalidInput(
reason=_('Illegal value specified for '
'storwize_svc_vol_warning: '
'set to a percentage (0-100)'))
# Check that grainsize is 32/64/128/256
if opts['grainsize'] not in [32, 64, 128, 256]:
raise exception.InvalidInput(
reason=_('Illegal value specified for '
'storwize_svc_vol_grainsize: set to either '
'32, 64, 128, or 256'))
# Check that compression is supported
if opts['compression'] and not state['compression_enabled']:
raise exception.InvalidInput(
reason=_('System does not support compression'))
# Check that rsize is set if compression is set
if opts['compression'] and opts['rsize'] == -1:
raise exception.InvalidInput(
reason=_('If compression is set to True, rsize must '
'also be set (not equal to -1)'))
# Check that the requested protocol is enabled
if opts['protocol'] not in state['enabled_protocols']:
raise exception.InvalidInput(
reason=_('Illegal value %(prot)s specified for '
'storwize_svc_connection_protocol: '
'valid values are %(enabled)s')
% {'prot': opts['protocol'],
'enabled': ','.join(state['enabled_protocols'])})
if opts['iogrp'] not in state['available_iogrps']:
avail_grps = ''.join(str(e) for e in state['available_iogrps'])
raise exception.InvalidInput(
reason=_('I/O group %(iogrp)d is not valid; available '
'I/O groups are %(avail)s')
% {'iogrp': opts['iogrp'],
'avail': avail_grps})
def _get_opts_from_specs(self, opts, specs):
qos = {}
for k, value in specs.iteritems():
# Get the scope, if using scope format
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# We generally do not look at capabilities in the driver, but
# protocol is a special case where the user asks for a given
# protocol and we want both the scheduler and the driver to act
# on the value.
if ((not scope or scope == 'capabilities') and
key == 'storage_protocol'):
scope = None
key = 'protocol'
words = value.split()
if not (words and len(words) == 2 and words[0] == '<in>'):
LOG.error(_LE('Protocol must be specified as '
'\'<in> iSCSI\' or \'<in> FC\'.'))
del words[0]
value = words[0]
# We generally do not look at capabilities in the driver, but
# replication is a special case where the user asks for
# a volume to be replicated, and we want both the scheduler and
# the driver to act on the value.
if ((not scope or scope == 'capabilities') and
key == 'replication'):
scope = None
key = 'replication'
words = value.split()
if not (words and len(words) == 2 and words[0] == '<is>'):
LOG.error(_LE('Replication must be specified as '
'\'<is> True\' or \'<is> False\'.'))
del words[0]
value = words[0]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
# Any keys that the driver should look at should have the
# 'drivers' scope.
if scope and scope != 'drivers':
continue
if key in opts:
this_type = type(opts[key]).__name__
if this_type == 'int':
value = int(value)
elif this_type == 'bool':
value = strutils.bool_from_string(value)
opts[key] = value
if len(qos) != 0:
opts['qos'] = qos
return opts
def _get_qos_from_volume_metadata(self, volume_metadata):
"""Return the QoS information from the volume metadata."""
qos = {}
for i in volume_metadata:
k = i.get('key', None)
value = i.get('value', None)
key_split = k.split(':')
if len(key_split) == 1:
scope = None
key = key_split[0]
else:
scope = key_split[0]
key = key_split[1]
# Add the QoS.
if scope and scope == 'qos':
if key in self.svc_qos_keys.keys():
try:
type_fn = self.svc_qos_keys[key]['type']
value = type_fn(value)
qos[key] = value
except ValueError:
continue
return qos
def get_vdisk_params(self, config, state, type_id, volume_type=None,
volume_metadata=None):
"""Return the parameters for creating the vdisk.
Takes volume type and defaults from config options into account.
"""
opts = self.build_default_opts(config)
ctxt = context.get_admin_context()
if volume_type is None and type_id is not None:
volume_type = volume_types.get_volume_type(ctxt, type_id)
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
specs = dict(volume_type).get('extra_specs')
# NOTE(vhou): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Merge the qos_specs into extra_specs and qos_specs has higher
# priority than extra_specs if they have different values for
# the same key.
specs.update(kvs)
opts = self._get_opts_from_specs(opts, specs)
if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos
and volume_metadata):
qos = self._get_qos_from_volume_metadata(volume_metadata)
if len(qos) != 0:
opts['qos'] = qos
self.check_vdisk_opts(state, opts)
return opts
@staticmethod
def _get_vdisk_create_params(opts):
easytier = 'on' if opts['easytier'] else 'off'
if opts['rsize'] == -1:
params = []
else:
params = ['-rsize', '%s%%' % str(opts['rsize']),
'-autoexpand', '-warning',
'%s%%' % str(opts['warning'])]
if not opts['autoexpand']:
params.remove('-autoexpand')
if opts['compression']:
params.append('-compressed')
else:
params.extend(['-grainsize', str(opts['grainsize'])])
params.extend(['-easytier', easytier])
return params
def create_vdisk(self, name, size, units, pool, opts):
LOG.debug('enter: create_vdisk: vdisk %s ' % name)
params = self._get_vdisk_create_params(opts)
self.ssh.mkvdisk(name, size, units, pool, opts, params)
LOG.debug('leave: _create_vdisk: volume %s ' % name)
def get_vdisk_attributes(self, vdisk):
attrs = self.ssh.lsvdisk(vdisk)
return attrs
def is_vdisk_defined(self, vdisk_name):
"""Check if vdisk is defined."""
attrs = self.get_vdisk_attributes(vdisk_name)
return attrs is not None
def find_vdisk_copy_id(self, vdisk, pool):
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, mdisk_grp in resp.select('copy_id', 'mdisk_grp_name'):
if mdisk_grp == pool:
return copy_id
msg = _('Failed to find a vdisk copy in the expected pool.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def get_vdisk_copy_attrs(self, vdisk, copy_id):
return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]
def get_vdisk_copies(self, vdisk):
copies = {'primary': None,
'secondary': None}
resp = self.ssh.lsvdiskcopy(vdisk)
for copy_id, status, sync, primary, mdisk_grp in \
resp.select('copy_id', 'status', 'sync',
'primary', 'mdisk_grp_name'):
copy = {'copy_id': copy_id,
'status': status,
'sync': sync,
'primary': primary,
'mdisk_grp_name': mdisk_grp,
'sync_progress': None}
if copy['sync'] != 'yes':
progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
copy['sync_progress'] = progress_info['progress']
if copy['primary'] == 'yes':
copies['primary'] = copy
else:
copies['secondary'] = copy
return copies
def check_copy_ok(self, vdisk, pool, copy_type):
try:
copy_id = self.find_vdisk_copy_id(vdisk, pool)
attrs = self.get_vdisk_copy_attrs(vdisk, copy_id)
except (exception.VolumeBackendAPIException,
exception.VolumeDriverException):
extended = ('No %(type)s copy in pool %(pool)s' %
{'type': copy_type, 'pool': pool})
return ('error', extended)
if attrs['status'] != 'online':
extended = 'The %s copy is offline' % copy_type
return ('error', extended)
if copy_type == 'secondary':
if attrs['sync'] == 'yes':
return ('active', None)
else:
progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id)
extended = 'progress: %s%%' % progress_info['progress']
return ('copying', extended)
return (None, None)
def _prepare_fc_map(self, fc_map_id, timeout):
self.ssh.prestartfcmap(fc_map_id)
mapping_ready = False
wait_time = 5
max_retries = (timeout / wait_time) + 1
for try_number in range(1, max_retries):
mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id)
if (mapping_attrs is None or
'status' not in mapping_attrs):
break
if mapping_attrs['status'] == 'prepared':
mapping_ready = True
break
elif mapping_attrs['status'] == 'stopped':
self.ssh.prestartfcmap(fc_map_id)
elif mapping_attrs['status'] != 'preparing':
msg = (_('Unexecpted mapping status %(status)s for mapping'
'%(id)s. Attributes: %(attr)s')
% {'status': mapping_attrs['status'],
'id': fc_map_id,
'attr': mapping_attrs})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
greenthread.sleep(wait_time)
if not mapping_ready:
msg = (_('Mapping %(id)s prepare failed to complete within the'
'allotted %(to)d seconds timeout. Terminating.')
% {'id': fc_map_id,
'to': timeout})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def run_flashcopy(self, source, target, timeout, full_copy=True):
"""Create a FlashCopy mapping from the source to the target."""
LOG.debug('enter: run_flashcopy: execute FlashCopy from source '
'%(source)s to target %(target)s' %
{'source': source, 'target': target})
fc_map_id = self.ssh.mkfcmap(source, target, full_copy)
self._prepare_fc_map(fc_map_id, timeout)
self.ssh.startfcmap(fc_map_id)
LOG.debug('leave: run_flashcopy: FlashCopy started from '
'%(source)s to %(target)s' %
{'source': source, 'target': target})
def _get_vdisk_fc_mappings(self, vdisk):
"""Return FlashCopy mappings that this vdisk is associated with."""
mapping_ids = []
resp = self.ssh.lsvdiskfcmappings(vdisk)
for id in resp.select('id'):
mapping_ids.append(id)
return mapping_ids
def _get_flashcopy_mapping_attributes(self, fc_map_id):
resp = self.ssh.lsfcmap(fc_map_id)
if not len(resp):
return None
return resp[0]
def _check_vdisk_fc_mappings(self, name, allow_snaps=True):
"""FlashCopy mapping check helper."""
LOG.debug('Loopcall: _check_vdisk_fc_mappings(), vdisk %s' % name)
mapping_ids = self._get_vdisk_fc_mappings(name)
wait_for_copy = False
for map_id in mapping_ids:
attrs = self._get_flashcopy_mapping_attributes(map_id)
if not attrs:
continue
source = attrs['source_vdisk_name']
target = attrs['target_vdisk_name']
copy_rate = attrs['copy_rate']
status = attrs['status']
if copy_rate == '0':
if source == name:
# Vdisk with snapshots. Return False if snapshot
# not allowed.
if not allow_snaps:
raise loopingcall.LoopingCallDone(retvalue=False)
self.ssh.chfcmap(map_id, copyrate='50', autodel='on')
wait_for_copy = True
else:
# A snapshot
if target != name:
msg = (_('Vdisk %(name)s not involved in '
'mapping %(src)s -> %(tgt)s') %
{'name': name, 'src': source, 'tgt': target})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if status in ['copying', 'prepared']:
self.ssh.stopfcmap(map_id)
# Need to wait for the fcmap to change to
# stopped state before remove fcmap
wait_for_copy = True
elif status in ['stopping', 'preparing']:
wait_for_copy = True
else:
self.ssh.rmfcmap(map_id)
# Case 4: Copy in progress - wait and will autodelete
else:
if status == 'prepared':
self.ssh.stopfcmap(map_id)
self.ssh.rmfcmap(map_id)
elif status == 'idle_or_copied':
# Prepare failed
self.ssh.rmfcmap(map_id)
else:
wait_for_copy = True
if not wait_for_copy or not len(mapping_ids):
raise loopingcall.LoopingCallDone(retvalue=True)
def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True):
"""Ensure vdisk has no flashcopy mappings."""
timer = loopingcall.FixedIntervalLoopingCall(
self._check_vdisk_fc_mappings, name, allow_snaps)
# Create a timer greenthread. The default volume service heart
# beat is every 10 seconds. The flashcopy usually takes hours
# before it finishes. Don't set the sleep interval shorter
# than the heartbeat. Otherwise volume service heartbeat
# will not be serviced.
LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s'
% name)
ret = timer.start(interval=self.check_fcmapping_interval).wait()
timer.stop()
return ret
def delete_vdisk(self, vdisk, force):
"""Ensures that vdisk is not part of FC mapping and deletes it."""
LOG.debug('enter: delete_vdisk: vdisk %s' % vdisk)
if not self.is_vdisk_defined(vdisk):
LOG.info(_LI('Tried to delete non-existant vdisk %s.') % vdisk)
return
self.ensure_vdisk_no_fc_mappings(vdisk)
self.ssh.rmvdisk(vdisk, force=force)
LOG.debug('leave: delete_vdisk: vdisk %s' % vdisk)
def create_copy(self, src, tgt, src_id, config, opts,
full_copy, pool=None):
"""Create a new snapshot using FlashCopy."""
LOG.debug('enter: create_copy: snapshot %(src)s to %(tgt)s' %
{'tgt': tgt, 'src': src})
src_attrs = self.get_vdisk_attributes(src)
if src_attrs is None:
msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) '
'does not exist') % {'src': src, 'src_id': src_id})
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
src_size = src_attrs['capacity']
# In case we need to use a specific pool
if not pool:
pool = config.storwize_svc_volpool_name
self.create_vdisk(tgt, src_size, 'b', pool, opts)
timeout = config.storwize_svc_flashcopy_timeout
try:
self.run_flashcopy(src, tgt, timeout, full_copy=full_copy)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_vdisk(tgt, True)
LOG.debug('leave: _create_copy: snapshot %(tgt)s from '
'vdisk %(src)s' %
{'tgt': tgt, 'src': src})
def extend_vdisk(self, vdisk, amount):
self.ssh.expandvdisksize(vdisk, amount)
def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config):
"""Add a vdisk copy in the given pool."""
resp = self.ssh.lsvdiskcopy(vdisk)
if len(resp) > 1:
msg = (_('add_vdisk_copy failed: A copy of volume %s exists. '
'Adding another copy would exceed the limit of '
'2 copies.') % vdisk)
raise exception.VolumeDriverException(message=msg)
orig_copy_id = resp[0].get("copy_id", None)
if orig_copy_id is None:
msg = (_('add_vdisk_copy started without a vdisk copy in the '
'expected pool.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
if volume_type is None:
opts = self.get_vdisk_params(config, state, None)
else:
opts = self.get_vdisk_params(config, state, volume_type['id'],
volume_type=volume_type)
params = self._get_vdisk_create_params(opts)
new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params)
return (orig_copy_id, new_copy_id)
def is_vdisk_copy_synced(self, vdisk, copy_id):
sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync']
if sync == 'yes':
return True
return False
def rm_vdisk_copy(self, vdisk, copy_id):
self.ssh.rmvdiskcopy(vdisk, copy_id)
@staticmethod
def can_migrate_to_host(host, state):
if 'location_info' not in host['capabilities']:
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_pool) = info.split(':')
except ValueError:
return None
if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']):
return None
return dest_pool
def add_vdisk_qos(self, vdisk, qos):
"""Add the QoS configuration to the volume."""
for key, value in qos.iteritems():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
self.ssh.chvdisk(vdisk, ['-' + param, str(value)])
def update_vdisk_qos(self, vdisk, qos):
"""Update all the QoS in terms of a key and value.
svc_qos_keys saves all the supported QoS parameters. Going through
this dict, we set the new values to all the parameters. If QoS is
available in the QoS configuration, the value is taken from it;
if not, the value will be set to default.
"""
for key, value in self.svc_qos_keys.iteritems():
param = value['param']
if key in qos.keys():
# If the value is set in QoS, take the value from
# the QoS configuration.
v = qos[key]
else:
# If not, set the value to default.
v = value['default']
self.ssh.chvdisk(vdisk, ['-' + param, str(v)])
def disable_vdisk_qos(self, vdisk, qos):
"""Disable the QoS."""
for key, value in qos.iteritems():
if key in self.svc_qos_keys.keys():
param = self.svc_qos_keys[key]['param']
# Take the default value.
value = self.svc_qos_keys[key]['default']
self.ssh.chvdisk(vdisk, ['-' + param, value])
def change_vdisk_options(self, vdisk, changes, opts, state):
if 'warning' in opts:
opts['warning'] = '%s%%' % str(opts['warning'])
if 'easytier' in opts:
opts['easytier'] = 'on' if opts['easytier'] else 'off'
if 'autoexpand' in opts:
opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off'
for key in changes:
self.ssh.chvdisk(vdisk, ['-' + key, opts[key]])
def change_vdisk_iogrp(self, vdisk, state, iogrp):
if state['code_level'] < (6, 4, 0, 0):
LOG.debug('Ignore change IO group as storage code level is '
'%(code_level)s, below the required 6.4.0.0' %
{'code_level': state['code_level']})
else:
self.ssh.movevdisk(vdisk, str(iogrp[0]))
self.ssh.addvdiskaccess(vdisk, str(iogrp[0]))
self.ssh.rmvdiskaccess(vdisk, str(iogrp[1]))
def vdisk_by_uid(self, vdisk_uid):
"""Returns the properties of the vdisk with the specified UID.
Returns None if no such disk exists.
"""
vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid)
if len(vdisks) == 0:
return None
if len(vdisks) != 1:
msg = (_('Expected single vdisk returned from lsvdisk when '
'filtering on vdisk_UID. %{count}s were returned.') %
{'count': len(vdisks)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vdisk = vdisks.result[0]
return self.ssh.lsvdisk(vdisk['name'])
def is_vdisk_in_use(self, vdisk):
"""Returns True if the specified vdisk is mapped to at least 1 host."""
resp = self.ssh.lsvdiskhostmap(vdisk)
return len(resp) != 0
def rename_vdisk(self, vdisk, new_name):
self.ssh.chvdisk(vdisk, ['-name', new_name])
def change_vdisk_primary_copy(self, vdisk, copy_id):
self.ssh.chvdisk(vdisk, ['-primary', copy_id])
| {
"content_hash": "36187469852cc86224abb1b378592b9c",
"timestamp": "",
"source": "github",
"line_count": 956,
"max_line_length": 79,
"avg_line_length": 41.49058577405858,
"alnum_prop": 0.5260809277700744,
"repo_name": "hguemar/cinder",
"id": "acc4de9fae7e1fbe92b0df37fd5e2bcf88f8f362",
"size": "40292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/ibm/storwize_svc/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3322"
},
{
"name": "Python",
"bytes": "10010542"
},
{
"name": "Shell",
"bytes": "9917"
}
],
"symlink_target": ""
} |
from .schema import Class, Field, Union, Constant, Map, Sequence, Boolean, Integer, String, Base64, SchemaError
class Registry(object):
def __init__(self, type, url, verify, user, password, namespace=None):
self.type = type
self.url = url
self.verify = verify
self.user = user
self.password = password
self.namespace = namespace
DOCKER = Class(
"registry:docker",
"""A generic Docker registry.""",
Registry,
Field("type", Constant('docker'), docs="This must be 'docker' for docker registries"),
Field("url", String(), docs="The url of the docker registry."),
Field("verify", Boolean(), default=True,
docs="A boolean that indicates whether or not to verify the SSL connection to the registry. This defaults to true. Set this to false if you are using a registry with self-signed certs."),
Field("user", String(), default=None, docs="The docker user."),
Field("password", Base64(), default=None, docs="The docker password, base64 encoded."),
Field("namespace", String(), docs="The namespace for the docker registry. For docker hub this is a user or an organization. This is used as the first path component of the registry URL, for example: registry.hub.docker.com/<namespace>")
)
class GCRRegistry(object):
def __init__(self, type, url, project, key=None):
self.type = type
self.url = url
self.project = project
self.key = key
GCR = Class(
"registry:gcr",
"""A Google Cloud registry.""",
GCRRegistry,
Field("type", Constant('gcr'), docs="The type of the registry; this will be 'gcr' for Google registries"),
Field("url", String(), docs="The url of the registry, e.g. `gcr.io`."),
Field("project", String(), docs="The Google project name."),
Field("key", Base64(), default=None, docs="The base64 encoded JSON key used for authentication.")
)
class ECRRegistry(object):
def __init__(self, type, account=None, region=None, aws_access_key_id=None, aws_secret_access_key=None):
self.type = type
self.account = account
self.region = region
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
ECR = Class(
"registry:ecr",
"""An Amazon ECR registry.""",
ECRRegistry,
Field("type", Constant('ecr'), docs="The type of the registry; this will be 'ecr' for amazon registries"),
Field("account", String("string", "integer"), default=None, docs="The amazon account id to use."),
Field("region", String(), default=None, docs="The Amazon region to use."),
Field("aws_access_key_id", String(), default=None, docs="The id of the AWS access key to use."),
Field("aws_secret_access_key", String(), default=None, docs="The AWS secret access key.")
)
class LocalRegistry(object):
def __init__(self, type):
self.type = type
LOCAL = Class(
"registry:local",
"""A local registry.""",
LocalRegistry,
Field("type", Constant('local'), docs="The type of the registry; this will be 'local' for local registries")
)
class Profile(object):
def __init__(self, search_path = None, registry = None):
self.search_path = search_path or ()
self.registry = registry
PROFILE = Class(
"profile",
"""
Profile-specific settings.
""",
Profile,
Field("search-path", Sequence(String()), "search_path", default=None, docs="Search path for service dependencies."),
Field("registry", Union(DOCKER, GCR, ECR, LOCAL), default=None)
)
class Config(object):
def __init__(self, search_path=None, registry=None, docker_repo=None, user=None, password=None, workdir=None,
profiles=None, concurrency=None):
self.search_path = search_path or ()
if registry:
if docker_repo:
raise SchemaError("cannot specify both registry and docker-repo")
if user:
raise SchemaError("cannot specify both registry and user")
if password:
raise SchemaError("cannot specify both registry and password")
else:
if "/" not in docker_repo:
raise SchemaError("docker-repo must be in the form <registry-url>/<namespace>")
url, namespace = docker_repo.split("/", 1)
registry = Registry(type="docker",
url=url,
verify=True,
namespace=namespace,
user=user,
password=password)
self.registry = registry
self.profiles = profiles or {}
if "default" not in self.profiles:
self.profiles["default"] = Profile(search_path=self.search_path, registry=self.registry)
for p in self.profiles.values():
if p.search_path is None:
p.search_path = self.search_path
if p.registry is None:
p.registry = self.registry
self.concurrency = concurrency
CONFIG = Class(
"forge.yaml",
"""
The forge.yaml file contains the global Forge configuration information. Currently this consists of Docker Registry configuration and credentials.
A forge.yaml is automatically created as part of the forge setup process; it can also be created by hand.
""",
Config,
*(tuple(PROFILE.fields.values()) +
(Field("docker-repo", String(), "docker_repo", default=None, docs="Deprecated, use registry instead."),
Field("user", String(), default=None, docs="Deprecated, use registry instead."),
Field("password", Base64(), default=None, docs="Deprecated, use registry instead."),
Field("workdir", String(), default=None, docs="deprecated"),
Field("profiles", Map(PROFILE), default=None, docs="A map keyed by profile-name of profile-specific settings."),
Field("concurrency", Integer(), default=5, docs="This controls the maximum number of parallel builds."),
))
)
def load(*args, **kwargs):
return CONFIG.load(*args, **kwargs)
| {
"content_hash": "d84c2b900bd08ef998312b405402d6d6",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 240,
"avg_line_length": 42.0551724137931,
"alnum_prop": 0.6280747786159396,
"repo_name": "datawire/forge",
"id": "520fa71937fc45871664e98951835ae8adebaa67",
"size": "6693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "forge/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1775"
},
{
"name": "Makefile",
"bytes": "261"
},
{
"name": "Python",
"bytes": "329355"
},
{
"name": "Ruby",
"bytes": "1398"
},
{
"name": "Shell",
"bytes": "9852"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bachelor-thesis'
copyright = '2015, Dusty Wind'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bachelor-thesisdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bachelor-thesis.tex', 'bachelor-thesis Documentation',
'Dusty Wind', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bachelor-thesis', 'bachelor-thesis Documentation',
['Dusty Wind'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bachelor-thesis', 'bachelor-thesis Documentation',
'Dusty Wind', 'bachelor-thesis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "ba59eae29e2e811aa87b227ec4e850c8",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 193,
"avg_line_length": 32.94871794871795,
"alnum_prop": 0.703242542153048,
"repo_name": "dustywind/bachelor-thesis",
"id": "03184cc606ae0a260fde89ea5d51556810bab86f",
"size": "8159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "impl/documentation/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554"
},
{
"name": "HTML",
"bytes": "4363"
},
{
"name": "JavaScript",
"bytes": "26943"
},
{
"name": "Makefile",
"bytes": "10588"
},
{
"name": "Python",
"bytes": "109888"
},
{
"name": "Shell",
"bytes": "3012"
},
{
"name": "TeX",
"bytes": "163624"
}
],
"symlink_target": ""
} |
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
import six
from _version import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, six.string_types):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, six.string_types):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, six.string_types):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, six.string_types):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, six.string_types):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, six.string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning, stacklevel=2)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, six.string_types):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, six.string_types) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, six.text_type):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, six.binary_type) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, six.text_type):
return infile.splitlines(True)
elif isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
if six.PY2 and isinstance(line, str):
# don't actually do any decoding, since we're on python 2 and
# returning a bytestring is fine
return self._decode(infile, None)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, six.binary_type) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, six.string_types):
return infile.splitlines(True)
if isinstance(infile, six.binary_type):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, six.binary_type):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, six.binary_type) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, six.string_types):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, six.string_types):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, six.binary_type):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, six.string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| {
"content_hash": "a7c3968dd866dfd23e91e125b669ab21",
"timestamp": "",
"source": "github",
"line_count": 2468,
"max_line_length": 114,
"avg_line_length": 36.09724473257698,
"alnum_prop": 0.5180832435344828,
"repo_name": "mudbungie/NetExplorer",
"id": "ba886e8639291feabd045dcdf2aedb5ba5a7667a",
"size": "89627",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "env/lib/python3.4/site-packages/configobj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34672"
}
],
"symlink_target": ""
} |
from openflow.optin_manager.sfa.rspecs.elements.element import Element
class OpenFlowSwitch(Element):
fields = [
'component_id',
'component_manager_id',
'dpid',
'port',
]
| {
"content_hash": "65047f72ac03fd0ad69abe6e8ad48e5f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 18.181818181818183,
"alnum_prop": 0.65,
"repo_name": "ict-felix/stack",
"id": "38ac938b0f43396a07f45e9a25dda0b6ca6d79e7",
"size": "200",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "optin_manager/src/python/openflow/optin_manager/sfa/rspecs/elements/openflow_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ApplicationGatewayBackendHealthServer(Model):
"""Application gateway backendhealth http settings.
:param address: IP address or FQDN of backend server.
:type address: str
:param ip_configuration: Reference of IP configuration of backend server.
:type ip_configuration: ~azure.mgmt.network.v2016_09_01.models.SubResource
:param health: Health of backend server. Possible values are: 'Unknown',
'Up', 'Down', and 'Partial'. Possible values include: 'Unknown', 'Up',
'Down', 'Partial'
:type health: str or
~azure.mgmt.network.v2016_09_01.models.ApplicationGatewayBackendHealthServerHealth
"""
_attribute_map = {
'address': {'key': 'address', 'type': 'str'},
'ip_configuration': {'key': 'ipConfiguration', 'type': 'SubResource'},
'health': {'key': 'health', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewayBackendHealthServer, self).__init__(**kwargs)
self.address = kwargs.get('address', None)
self.ip_configuration = kwargs.get('ip_configuration', None)
self.health = kwargs.get('health', None)
| {
"content_hash": "c59cd5fad201d5e97898b0a7931615ad",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 87,
"avg_line_length": 42.214285714285715,
"alnum_prop": 0.6683587140439933,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "94f4dee82abeb2e71dbb391bb2f6e9bf0c610ded",
"size": "1656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/application_gateway_backend_health_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
# Check that multiple features can be enabled.
from __future__ import unicode_literals, print_function
import sys
import unittest
from test import support
class TestMultipleFeatures(unittest.TestCase):
def test_unicode_literals(self):
self.assertIsInstance("", str)
def test_print_function(self):
with support.captured_output("stderr") as s:
print("foo", file=sys.stderr)
self.assertEqual(s.getvalue(), "foo\n")
if __name__ == '__main__':
unittest.main()
=======
# Check that multiple features can be enabled.
from __future__ import unicode_literals, print_function
import sys
import unittest
from test import support
class TestMultipleFeatures(unittest.TestCase):
def test_unicode_literals(self):
self.assertIsInstance("", str)
def test_print_function(self):
with support.captured_output("stderr") as s:
print("foo", file=sys.stderr)
self.assertEqual(s.getvalue(), "foo\n")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Check that multiple features can be enabled.
from __future__ import unicode_literals, print_function
import sys
import unittest
from test import support
class TestMultipleFeatures(unittest.TestCase):
def test_unicode_literals(self):
self.assertIsInstance("", str)
def test_print_function(self):
with support.captured_output("stderr") as s:
print("foo", file=sys.stderr)
self.assertEqual(s.getvalue(), "foo\n")
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "3faa1c4499ba2b3d1d483123a2986cb5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 55,
"avg_line_length": 24.202898550724637,
"alnum_prop": 0.6706586826347305,
"repo_name": "ArcherSys/ArcherSys",
"id": "228469643f4b3340230152274b7cd8d8bfc2107b",
"size": "1670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_future5.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity analysis annotations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import transformer
from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(node, 'live_val', self.context.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
assert anno.hasanno(node, NodeAnno.IS_PARAM), node
symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
if not symbol_is_local and not symbol_is_param:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
# TODO(mdan): Could live values have FQNs? i.e. 'a'.join()
elif node.id in self.context.namespace:
obj = self.context.namespace[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__name__,))
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if not symbol_is_modified:
if node.id in self.context.arg_values:
obj = self.context.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
if not hasattr(parent_object, node.attr):
raise AttributeError('%s has no attribute %s' % (parent_object,
node.attr))
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
stem_name = node.value
# All nonlocal symbols should be fully resolved.
assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
| {
"content_hash": "b6f7916fffa05a7efacfe459595267de",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 40.89795918367347,
"alnum_prop": 0.655938123752495,
"repo_name": "av8ramit/tensorflow",
"id": "9c0a9a9e74eccb3d22840032e8f0c2b81e051e7e",
"size": "4697",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/py2tf/pyct/static_analysis/live_values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9096"
},
{
"name": "C",
"bytes": "332331"
},
{
"name": "C++",
"bytes": "37144977"
},
{
"name": "CMake",
"bytes": "193247"
},
{
"name": "Go",
"bytes": "1061627"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "544069"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48122"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "1487"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "32711532"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "422931"
}
],
"symlink_target": ""
} |
from underscore.underscore import _
from ctf import *
import json
def handle(data, *a):
return _(data['courses']).map(lambda x, *a: (data['school'], x['name']))
data = json.load(open('courses.json'))
result = _(data).chain()
.pluck('response')
.map(lambda x, *a: json.loads(x))
.map(handle)
.flatten(True)
.map(lambda x, *a: '\t'.join(x))
.value()
print result
| {
"content_hash": "672802d7eebc0e64f735756efeade056",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 23.058823529411764,
"alnum_prop": 0.6096938775510204,
"repo_name": "ripples-alive/Crawler",
"id": "0f632d7e0b81e8584910a05fa1c5371d4c57f24f",
"size": "392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kechenggezi/format.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97364"
}
],
"symlink_target": ""
} |
import sys
from itertools import ifilter
import requests
import json
from pprint import pprint
class FacebookDataIngestSource:
"""Ingest data from Facebook"""
def __init__(self, config):
self.config = config
self.pages = []
self.post = []
self.index = 0
def __iter__(self):
if 'track' in self.config:
self.track = self.config['track']
else:
self.track = ['ski', 'surf', 'board']
#### Retrieve the consumer key and secret
consumer_key = self.config['consumer_key']
consumer_secret = self.config['consumer_secret']
#### Define url for http request for access token
auth_url = 'https://graph.facebook.com/oauth/access_token?grant_type=client_credentials&client_id=%s&client_secret=%s'%(consumer_key,consumer_secret)
#### Get authorization token from Facebook and store it for future use
token_req = requests.get(auth_url)
self.access_token = token_req.text.split('=')[1]
#### Request id for pages associated to search term
page_fields='page&fields=id,name'
#### Retrieve term to search
for term in self.track:
#### Define url for http request to get pages id associated to search term
page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&access_token=%s'%(term, page_fields, self.access_token)
page_request = requests.get(page_request_url).json()
#### Get a list of pages id and names associated to search term
for i in range(len(page_request['data'])):
self.pages.append((page_request['data'][i]['id'],page_request['data'][i]['name']))
for page in self.pages:
video_url = 'https://graph.facebook.com/v2.5/%s/videos?&fields=permalink_url,sharedposts,likes,comments&access_token=%s'%(page[0],self.access_token)
request = requests.get(video_url).json()
if 'data' in request:
print "Updating records from page:", page[1]
print "Number of videos published:", len(request['data'])
for i in range(len(request['data'])):
self.post.append((page[1], request['data'][i]))
return self
def next(self):
if self.index < len(self.post):
post = self.post[self.index]
self.index = self.index + 1
pprint(post)
return {'post' : {'page': post[0], 'data': post[1]}}
else:
raise StopIteration()
| {
"content_hash": "18c1aa6a2d59bbc445e7e7c93d4b7a8c",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 156,
"avg_line_length": 33.541666666666664,
"alnum_prop": 0.6244306418219462,
"repo_name": "W205-Social-Media/w205-data-ingest",
"id": "5d51ec424101e6f4adbc528af305134dd479419c",
"size": "2415",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "FacebookDataIngestSource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1968823"
},
{
"name": "Python",
"bytes": "17531"
}
],
"symlink_target": ""
} |
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
import time
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class SleepHandler(tornado.web.RequestHandler):
def get(self):
time.sleep(5)
self.write("when i sleep 5s")
class JustNowHandler(tornado.web.RequestHandler):
def get(self):
self.write("i hope just now see you")
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[
(r"/sleep", SleepHandler), (r"/justnow", JustNowHandler) ])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "efe7c4bdbc0c6f1fe9e53f434b1df2f0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 30,
"alnum_prop": 0.7037037037037037,
"repo_name": "dongweiming/speakerdeck",
"id": "271359c1f51ed43a6c4bd59b41b5da52833dbf21",
"size": "829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tornado/1_blocking_sleep.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "1235"
},
{
"name": "Python",
"bytes": "34275"
}
],
"symlink_target": ""
} |
"""Common functions for skydoc."""
import collections
import re
import textwrap
from xml.sax.saxutils import escape
ARGS_HEADING = "Args:"
EXAMPLES_HEADING = "Examples:"
EXAMPLE_HEADING = "Example:"
OUTPUTS_HEADING = "Outputs:"
class InputError(Exception):
pass
class ExtractedDocs(object):
"""Simple class to contain the documentation extracted from a docstring."""
def __init__(self, doc, attr_docs, example_doc, output_docs):
self.doc = doc
self.attr_docs = attr_docs
self.example_doc = example_doc
self.output_docs = output_docs
def leading_whitespace(line):
"""Returns the number of leading whitespace in the line."""
return len(line) - len(line.lstrip())
def validate_strip_prefix(strip_prefix, bzl_files):
if not strip_prefix:
return strip_prefix
prefix = strip_prefix if strip_prefix.endswith('/') else strip_prefix + '/'
for path in bzl_files:
if not path.startswith(prefix):
raise InputError(
'Input file %s does not have path prefix %s. Directory prefix set '
'with --strip_prefix must be common to all input files.'
% (path, strip_prefix))
return prefix
def _parse_attribute_docs(attr_docs, lines, index):
"""Extracts documentation in the form of name: description.
This includes documentation for attributes and outputs.
Args:
attr_docs: A dict used to store the extracted documentation.
lines: List containing the input docstring split into lines.
index: The index in lines containing the heading that begins the
documentation, such as "Args:" or "Outputs:".
Returns:
Returns the next index after the documentation to resume processing
documentation in the caller.
"""
attr = None # Current attribute name
desc = None # Description for current attribute
args_leading_ws = leading_whitespace(lines[index])
i = index + 1
while i < len(lines):
line = lines[i]
# If a blank line is encountered, we have finished parsing the "Args"
# section.
if line.strip() and leading_whitespace(line) == args_leading_ws:
break
# In practice, users sometimes add a "-" prefix, so we strip it even
# though it is not recommended by the style guide
pattern = re.compile(r"""
# Any amount of leading whitespace, plus an optional "-" prefix.
^\s*-?\s*
# The attribute name, plus an optional "**" prefix for a **kwargs
# attribute.
((?:\*\*)?[`\{\}\%\.\w\*]+)
# A colon plus any amount of whitespace to separate the attribute name
# from the description text.
:\s*
# The attribute description text.
(.*)
""", re.VERBOSE)
match = re.search(pattern, line)
if match: # We have found a new attribute
if attr:
attr_docs[attr] = escape(desc)
attr, desc = match.group(1), match.group(2)
elif attr:
# Merge documentation when it is multiline
desc = desc + "\n" + line.strip()
i += + 1
if attr:
attr_docs[attr] = escape(desc).strip()
return i
def _parse_example_docs(examples, lines, index):
"""Extracts example documentation.
Args:
examples: A list to contain the lines containing the example documentation.
lines: List containing the input docstring split into lines.
index: The index in lines containing "Example[s]:", which begins the
example documentation.
Returns:
Returns the next index after the attribute documentation to resume
processing documentation in the caller.
"""
heading_leading_ws = leading_whitespace(lines[index])
i = index + 1
while i < len(lines):
line = lines[i]
if line.strip() and leading_whitespace(line) == heading_leading_ws:
break
examples.append(line)
i += 1
return i
def parse_docstring(doc):
"""Analyzes the documentation string for attributes.
This looks for the "Args:" separator to fetch documentation for each
attribute. The "Args" section ends at the first blank line.
Args:
doc: The documentation string
Returns:
The new documentation string and a dictionary that maps each attribute to
its documentation
"""
attr_docs = collections.OrderedDict() if hasattr(collections, 'OrderedDict') else {}
output_docs = collections.OrderedDict() if hasattr(collections, 'OrderedDict') else {}
examples = []
lines = doc.split("\n")
docs = []
i = 0
while i < len(lines):
line = lines[i]
if line.strip() == ARGS_HEADING:
i = _parse_attribute_docs(attr_docs, lines, i)
continue
elif line.strip() == EXAMPLES_HEADING or line.strip() == EXAMPLE_HEADING:
i = _parse_example_docs(examples, lines, i)
continue
elif line.strip() == OUTPUTS_HEADING:
i = _parse_attribute_docs(output_docs, lines, i)
continue
docs.append(line)
i += 1
doc = "\n".join(docs).strip()
examples_doc = textwrap.dedent("\n".join(examples)).strip()
return ExtractedDocs(doc, attr_docs, examples_doc, output_docs)
| {
"content_hash": "b35f450d9367605cbdfff69bd1d5b7b2",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 88,
"avg_line_length": 30.790123456790123,
"alnum_prop": 0.6672012830793905,
"repo_name": "bazelbuild/skydoc",
"id": "8ec4bd5982fe3959ee8310b3090de30ecc64259e",
"size": "5591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skydoc/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16208"
},
{
"name": "HTML",
"bytes": "25410"
},
{
"name": "Python",
"bytes": "95857"
},
{
"name": "Shell",
"bytes": "9187"
},
{
"name": "Starlark",
"bytes": "42067"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect, render
from django.views import generic
from braces import views
from wye.organisations.models import Organisation
from wye.profiles.models import Profile
from wye.social.sites.twitter import send_tweet
from wye.base.views import (
verify_user_profile)
from wye.base.constants import WorkshopStatus
from .forms import (
WorkshopForm,
WorkshopEditForm,
WorkshopFeedbackForm,
WorkshopListForm,
WorkshopVolunteer)
from .mixins import (
WorkshopEmailMixin,
WorkshopAccessMixin
)
from .utils import send_mail_to_group
from .models import Workshop, WorkshopFeedBack
@login_required
@verify_user_profile
def workshop_list(request):
template_name = 'workshops/workshop_list.html'
user_profile, created = Profile.objects.get_or_create(
user__id=request.user.id)
if not user_profile.is_profile_filled:
return redirect('profiles:profile-edit', slug=request.user.username)
context_dict = {}
workshop_list = Workshop.objects.filter(
is_active=True, status__in=[
WorkshopStatus.REQUESTED]).order_by('-expected_date', 'status')
workshop_list = workshop_list.filter(
requester__location__state__id__in=[
x.id for x in request.user.profile.interested_states.all()]
)
location_list = request.GET.getlist("location")
if location_list:
workshop_list = workshop_list.filter(
requester__location__id__in=location_list
)
workshop_level_list = request.GET.getlist("level")
if workshop_level_list:
workshop_list = workshop_list.filter(
workshop_level__in=workshop_level_list
)
workshop_section_list = request.GET.getlist("section")
if workshop_section_list:
workshop_list = workshop_list.filter(
workshop_section__id__in=workshop_section_list
)
context_dict['workshop_list'] = workshop_list
context_dict['user'] = request.user
# need to improve the part
context_dict['is_not_tutor'] = False
# as user can be tutor and regional lead hence we need to verify like
# this
if (Profile.is_regional_lead(request.user) or
Profile.is_organiser(request.user) or
Profile.is_admin(request.user)):
context_dict['is_not_tutor'] = True
context_dict['form'] = WorkshopListForm(user=request.user)
return render(request, template_name, context_dict)
def workshop_details(request, pk):
template_name = 'workshops/workshop_detail.html'
workshop_obj = get_object_or_404(Workshop, id=pk)
show_contact_flag = False
display_edit_button = False
user = request.user
user_is_presenter = [u for u in workshop_obj.presenter.all() if user == u]
user_is_requester = [
u for u in workshop_obj.requester.user.all() if user == u]
if (user_is_presenter or user_is_requester or
user.is_superuser or (
(not user.is_anonymous()) and Profile.is_coordinator(user))):
show_contact_flag = True
if (user_is_presenter):
display_edit_button = True
is_admin = True if user.is_superuser else False
form = WorkshopVolunteer(initial={
'number_of_volunteers': workshop_obj.number_of_volunteers or 0})
context_dict = {
'workshop': workshop_obj,
'show_contact_flag': show_contact_flag,
'display_edit_button': display_edit_button,
'is_admin': is_admin,
'form': form,
'user': request.user
}
return render(request, template_name, context_dict)
@login_required
@verify_user_profile
def workshop_create(request):
template_name = 'workshops/workshop_create.html'
context_dict = {}
if not Organisation.list_user_organisations(request.user).exists():
msg = """
To request workshop you need to create organisaiton.\n\n
Please use organisation tab above to create your organisation
"""
return render(request, 'error.html', {'message': msg})
if request.method == 'GET':
form = WorkshopForm(user=request.user)
context_dict['form'] = form
return render(request, template_name, context_dict)
form = WorkshopForm(user=request.user, data=request.POST)
if not form.is_valid():
context_dict['form'] = form
context_dict['errors'] = form.errors
return render(request, template_name, context_dict)
workshop = form.save()
# domain = Site.objects.get_current().domain
if workshop and workshop.id:
context = {
'workshop': workshop,
'date': workshop.expected_date,
'workshop_url': workshop.build_absolute_uri(
reverse('workshops:workshop_detail', args=[workshop.pk]))
}
send_mail_to_group(context, workshop)
send_tweet(context)
success_url = reverse_lazy('workshops:workshop_list')
return HttpResponseRedirect(success_url)
class WorkshopUpdate(views.LoginRequiredMixin, WorkshopAccessMixin,
generic.UpdateView):
model = Workshop
form_class = WorkshopEditForm
template_name = 'workshops/workshop_update.html'
def get_success_url(self):
# pk = self.kwargs.get(self.pk_url_kwarg, None)
self.success_url = reverse("workshops:workshop_list")
return super(WorkshopUpdate, self).get_success_url()
def get_initial(self):
return {
"requester": self.object.requester.name,
}
def get_form_kwargs(self):
kwargs = super(WorkshopUpdate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
class WorkshopToggleActive(views.LoginRequiredMixin, views.CsrfExemptMixin,
views.JSONResponseMixin, WorkshopAccessMixin,
generic.UpdateView):
model = Workshop
fields = ('is_active', 'id')
def post(self, request, *args, **kwargs):
self.object = self.get_object()
response = self.object.toggle_active(request.user, **kwargs)
return self.render_json_response(response)
@login_required
def workshop_feedback_view(request, pk):
context_dict = {}
template_name = "workshops/workshop_feedback.html"
context_dict['workshop'] = Workshop.objects.get(pk=pk)
if request.method == 'POST':
form = WorkshopFeedbackForm(
data=request.POST, user=request.user, id=pk)
if form.is_valid():
WorkshopFeedBack.save_feedback(
request.user, pk, **request.POST)
success_url = reverse_lazy('workshops:workshop_list')
return HttpResponseRedirect(success_url)
context_dict['form'] = form
context_dict['user'] = request.user
return render(request, template_name, context_dict)
else:
context_dict['form'] = WorkshopFeedbackForm(
user=request.user, id=pk)
context_dict['user'] = request.user
return render(request, template_name, context_dict)
class WorkshopAction(views.CsrfExemptMixin, views.LoginRequiredMixin,
views.JSONResponseMixin, WorkshopEmailMixin,
generic.UpdateView):
model = Workshop
email_dir = 'email_messages/workshop/assign_me/'
def post(self, request, *args, **kwargs):
self.object = self.get_object()
response = self.object.manage_action(request.user, **kwargs)
if response['status'] and response.get('notify') is not None:
self.send_mail(request.user, response['assigned'])
del response['notify']
return self.render_json_response(response)
def send_mail(self, user, assigned):
"""Send email to presenter and org users."""
workshop = self.object
context = {
'presenter': True,
'assigned': assigned,
'date': workshop.expected_date,
'presenter_name': user.username,
'workshop_organization': workshop.requester,
'workshop_url': self.request.build_absolute_uri(reverse(
'workshops:workshop_detail', args=[workshop.pk]
))
}
# email to presenter and group
self.send_mail_to_presenter(user, context)
context['presenter'] = False
send_mail_to_group(context, workshop, exclude_emails=[user.email])
@csrf_exempt
@login_required
def workshop_update_volunteer(request, pk):
if request.GET:
return JsonResponse({"items": range(1, 6)})
if request.POST:
volunteers = request.POST.get('number_of_volunteers')
tutor_reimbursement_flag = request.POST.get('tutor_reimbursement_flag')
comments = request.POST.get('comments')
if volunteers.strip() not in ('', None):
workshop_volunteer = Workshop.objects.filter(pk=pk)
workshop_volunteer.update(number_of_volunteers=volunteers)
if tutor_reimbursement_flag:
workshop_volunteer.update(
tutor_reimbursement_flag=tutor_reimbursement_flag)
if comments:
workshop_volunteer.update(comments=comments)
return JsonResponse({
"status": True,
"msg": "Updated successfully"})
return JsonResponse({"status": False, "msg": "Somthing went wrong"})
@csrf_exempt
@login_required
def workshop_accept_as_volunteer(request, pk):
if request.method == 'POST':
workshop = Workshop.objects.get(pk=pk)
user = request.user
if workshop.number_of_volunteers == 0:
return JsonResponse({
"status": False,
"msg": "Volunteer not request for this workshop."})
elif workshop.number_of_volunteers - workshop.volunteer.count() >= 1:
# Check if already registered
if user in workshop.volunteer.all():
return JsonResponse({
"status": False,
"msg": "You are already registered as volunteer."})
else:
workshop.volunteer.add(user)
return JsonResponse({
"status": True,
"msg": "Registered successfully."})
else:
return JsonResponse({
"status": False,
"msg": "Sorry, We have got required volunteers already"})
return JsonResponse({"status": False, "msg": "Something went wrong"})
@csrf_exempt
@login_required
def workshop_opt_out_as_volunteer(request, pk):
if request.method == 'POST':
workshop = Workshop.objects.get(pk=pk)
user = request.user
if user in workshop.volunteer.all():
# remove volunteer
workshop.volunteer.remove(user)
workshop.save()
return JsonResponse({
"status": True,
"msg": "Opt-out successfully."})
else:
return JsonResponse({
"status": False,
"msg": "You are not registered as volunteer."})
return JsonResponse({"status": False, "msg": "Something went wrong"})
| {
"content_hash": "fbbf1ce17702e56e38c0e23f34ce7a74",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 79,
"avg_line_length": 36.670967741935485,
"alnum_prop": 0.6340605207600282,
"repo_name": "pythonindia/wye",
"id": "d28e9f661eeb36275908ae5ac762f87fc435efe3",
"size": "11368",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "wye/workshops/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20768"
},
{
"name": "HTML",
"bytes": "214025"
},
{
"name": "JavaScript",
"bytes": "26852"
},
{
"name": "Python",
"bytes": "250073"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
from pyramid import httpexceptions
from . import BaseTest
class ModelTest(BaseTest):
def setUp(self):
super(ModelTest, self).setUp()
self.record = self.model.create_record({'field': 'value'})
def test_list_gives_number_of_results_in_headers(self):
self.resource.collection_get()
headers = self.last_response.headers
count = headers['Total-Records']
self.assertEquals(int(count), 1)
def test_list_returns_all_records_in_data(self):
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 1)
self.assertDictEqual(records[0], self.record)
class CreateTest(BaseTest):
def setUp(self):
super(CreateTest, self).setUp()
self.resource.request.validated = {'data': {'field': 'new'}}
def test_new_records_are_linked_to_owner(self):
resp = self.resource.collection_post()['data']
record_id = resp['id']
self.model.get_record(record_id) # not raising
def test_create_record_returns_at_least_id_and_last_modified(self):
record = self.resource.collection_post()['data']
self.assertIn(self.resource.model.id_field, record)
self.assertIn(self.resource.model.modified_field, record)
self.assertIn('field', record)
class DeleteModelTest(BaseTest):
def setUp(self):
super(DeleteModelTest, self).setUp()
self.patch_known_field.start()
self.model.create_record({'field': 'a'})
self.model.create_record({'field': 'b'})
def test_delete_on_list_removes_all_records(self):
self.resource.collection_delete()
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 0)
def test_delete_returns_deleted_version_of_records(self):
result = self.resource.collection_delete()
deleted = result['data'][0]
self.assertIn('deleted', deleted)
def test_delete_supports_collection_filters(self):
self.resource.request.GET = {'field': 'a'}
self.resource.collection_delete()
self.resource.request.GET = {}
result = self.resource.collection_get()
records = result['data']
self.assertEqual(len(records), 1)
class IsolatedModelsTest(BaseTest):
def setUp(self):
super(IsolatedModelsTest, self).setUp()
self.stored = self.model.create_record({}, parent_id='bob')
self.resource.record_id = self.stored['id']
def get_request(self):
request = super(IsolatedModelsTest, self).get_request()
request.prefixed_userid = 'basicauth:alice'
return request
def get_context(self):
context = super(IsolatedModelsTest, self).get_context()
context.prefixed_userid = 'basicauth:alice'
return context
def test_list_is_filtered_by_user(self):
resp = self.resource.collection_get()
records = resp['data']
self.assertEquals(len(records), 0)
def test_update_record_of_another_user_will_create_it(self):
self.resource.request.validated = {'data': {'some': 'record'}}
self.resource.put()
self.model.get_record(record_id=self.stored['id'],
parent_id='basicauth:alice') # not raising
def test_cannot_modify_record_of_other_user(self):
self.assertRaises(httpexceptions.HTTPNotFound, self.resource.patch)
def test_cannot_delete_record_of_other_user(self):
self.assertRaises(httpexceptions.HTTPNotFound, self.resource.delete)
| {
"content_hash": "9b27ec88b4e9f1da0f022ad2614d4eda",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 76,
"avg_line_length": 36.111111111111114,
"alnum_prop": 0.6483916083916084,
"repo_name": "monikagrabowska/osf.io",
"id": "e676c6276c62068c856fc229ad3a9e6640484741",
"size": "3575",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kinto/tests/core/resource/test_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176566"
},
{
"name": "HTML",
"bytes": "183119"
},
{
"name": "JavaScript",
"bytes": "2017358"
},
{
"name": "Jupyter Notebook",
"bytes": "8510"
},
{
"name": "Makefile",
"bytes": "6905"
},
{
"name": "Mako",
"bytes": "755899"
},
{
"name": "PLpgSQL",
"bytes": "22144"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "9632033"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
import os
import sys
import numpy as np
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
sys.path.append('..')
import linearsieve
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
if not os.path.exists('faces'):
os.makedirs('faces')
if not os.path.exists('faces/remainder'):
os.makedirs('faces/remainder')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.bwr,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Linear Sieve',
linearsieve.Sieve(n_hidden=n_components), False),
('Eigenfaces - PCA',
decomposition.PCA(n_components=n_components), True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'), False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True), True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3, random_state=rng), True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3, random_state=rng), True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng), True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2), True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
elif hasattr(estimator, 'components_'):
components_ = estimator.components_
else:
components_ = np.array([estimator.ws[i][:n_features] for i in range(n_components)])
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.savefig('faces/%s.pdf' % name)
plt.clf()
data = faces
n_components = 48
out = linearsieve.Sieve(n_hidden=n_components)
out.fit(data)
components_ = np.array([out.ws[i][:n_features] for i in range(n_components)])
plot_gallery('%s' % 'Linear Sieve Components', components_[:n_components], n_col=10, n_row=5)
plt.savefig('faces/big_components.pdf')
plt.clf()
xs = []
for i in range(n_components):
ys, xbar = out.transform(data, level=i, remainder=True)
xs.append(xbar[:, :n_features])
xs = np.array(xs)
for l in range(30):
plot_gallery('Face %d' % l, xs[:, l, :], n_col=8, n_row=6)
plt.savefig('faces/remainder/%d.pdf' % l)
plt.close('all')
| {
"content_hash": "ea1b4069a0c4b287f4afc3310e61947b",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 112,
"avg_line_length": 34.94202898550725,
"alnum_prop": 0.6061800082953132,
"repo_name": "gregversteeg/LinearSieve",
"id": "1e9dc965a5bfc6f11b14862b703e832408c79cc0",
"size": "4822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_faces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1660"
},
{
"name": "Python",
"bytes": "39446"
}
],
"symlink_target": ""
} |
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of IO events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import logging
import socket
import subprocess
import time
import os
import sys
from . import events
from . import futures
from . import tasks
from .log import logger
__all__ = ['BaseEventLoop', 'Server']
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _check_resolved_address(sock, address):
# Ensure that the address is already resolved to avoid the trap of hanging
# the entire event loop when the address requires doing a DNS lookup.
family = sock.family
if family == socket.AF_INET:
host, port = address
elif family == socket.AF_INET6:
host, port = address[:2]
else:
return
type_mask = 0
if hasattr(socket, 'SOCK_NONBLOCK'):
type_mask |= socket.SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
type_mask |= socket.SOCK_CLOEXEC
# Use getaddrinfo(AI_NUMERICHOST) to ensure that the address is
# already resolved.
try:
socket.getaddrinfo(host, port,
family=family,
type=(sock.type & ~type_mask),
proto=sock.proto,
flags=socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
def _raise_stop_error(*args):
raise _StopError
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self.loop = loop
self.sockets = sockets
self.active_count = 0
self.waiters = []
def attach(self, transport):
assert self.sockets is not None
self.active_count += 1
def detach(self, transport):
assert self.active_count > 0
self.active_count -= 1
if self.active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is not None:
self.sockets = None
for sock in sockets:
self.loop._stop_serving(sock)
if self.active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self.waiters
self.waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@tasks.coroutine
def wait_closed(self):
if self.sockets is None or self.waiters is None:
return
waiter = futures.Future(loop=self.loop)
self.waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
self._running = False
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self._debug = False
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@tasks.coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _read_from_self(self):
"""XXX"""
raise NotImplementedError
def _write_to_self(self):
"""XXX"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def run_forever(self):
"""Run until stop() is called."""
if self._running:
raise RuntimeError('Event loop is running.')
self._running = True
try:
while True:
try:
self._run_once()
except _StopError:
break
finally:
self._running = False
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
XXX TBD: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
future = tasks.async(future, loop=self)
future.add_done_callback(_raise_stop_error)
self.run_forever()
future.remove_done_callback(_raise_stop_error)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run.
Callback scheduled after stop() is called won't. However,
those callbacks will run if run() is called again later.
"""
self.call_soon(_raise_stop_error)
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
"""
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_running(self):
"""Returns running status of event loop."""
return self._running
def time(self):
"""Return the time according to the event loop's clock."""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always a relative time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
return self.call_at(self.time() + delay, callback, *args)
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time."""
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_at()")
if self._debug:
self._assert_is_current_event_loop()
timer = events.TimerHandle(when, callback, args, self)
heapq.heappush(self._scheduled, timer)
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue, callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
return self._call_soon(callback, args, check_loop=True)
def _call_soon(self, callback, args, check_loop):
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_soon()")
if self._debug and check_loop:
self._assert_is_current_event_loop()
handle = events.Handle(callback, args, self)
self._ready.append(handle)
return handle
def _assert_is_current_event_loop(self):
"""Asserts that this event loop is the current event loop.
Non-threadsafe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if events.get_event_loop() is not self:
raise RuntimeError(
"non-threadsafe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args):
"""XXX"""
handle = self._call_soon(callback, args, check_loop=False)
self._write_to_self()
return handle
def run_in_executor(self, executor, callback, *args):
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with run_in_executor()")
if isinstance(callback, events.Handle):
assert not args
assert not isinstance(callback, events.TimerHandle)
if callback._cancelled:
f = futures.Future(loop=self)
f.set_result(None)
return f
callback, args = callback._callback, callback._args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@tasks.coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""XXX"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
f1 = self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs = [f1]
if local_addr is not None:
f2 = self.getaddrinfo(
*local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs.append(f2)
else:
f2 = None
yield from tasks.wait(fs, loop=self)
infos = f1.result()
if not infos:
raise OSError('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if f2 is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
exc = OSError(
exc.errno, 'error while '
'attempting to bind on address '
'{!r}: {}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
sock.close()
sock = None
continue
yield from self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
return transport, protocol
@tasks.coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=False, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join addresss by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = yield from self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
yield from self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
transport = self._make_datagram_transport(sock, protocol, r_addr)
return transport, protocol
@tasks.coroutine
def create_server(self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None):
"""XXX"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
AF_INET6 = getattr(socket, 'AF_INET6', 0)
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
host = None
infos = yield from self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=0, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
True)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
return server
@tasks.coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
return transport, protocol
@tasks.coroutine
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
return transport, protocol
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
matching signature to '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
'got {!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
log_lines.append('{}: {!r}'.format(key, context[key]))
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop exception handler.
context is a dict object containing the following keys
(new keys maybe introduced later):
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance.
Note: this method should not be overloaded in subclassed
event loops. For any custom exception handling, use
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it's
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to ready or scheduled."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
if isinstance(handle, events.TimerHandle):
heapq.heappush(self._scheduled, handle)
else:
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
heapq.heappop(self._scheduled)
timeout = None
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
deadline = max(0, when - self.time())
if timeout is None:
timeout = deadline
else:
timeout = min(timeout, deadline)
# TODO: Instrumentation only in debug mode?
if logger.isEnabledFor(logging.INFO):
t0 = self.time()
event_list = self._selector.select(timeout)
t1 = self.time()
if t1-t0 >= 1:
level = logging.INFO
else:
level = logging.DEBUG
if timeout is not None:
logger.log(level, 'poll %.3f took %.3f seconds',
timeout, t1-t0)
else:
logger.log(level, 'poll took %.3f seconds', t1-t0)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is threadsafe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if not handle._cancelled:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
| {
"content_hash": "3dbda64efe86531868dddf185f52528e",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 79,
"avg_line_length": 38.51932367149758,
"alnum_prop": 0.5510440835266821,
"repo_name": "leetreveil/tulip",
"id": "d2bdc07d36c23b1d963f088a8e8d44ec75481055",
"size": "31894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncio/base_events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38501"
},
{
"name": "Python",
"bytes": "779115"
},
{
"name": "Shell",
"bytes": "1223"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from django.core.cache import cache
from django.db.models import signals
from cmstemplates import models as m
def delete_templategroup_cache(sender, instance, **kwargs):
cache.delete(instance.cache_key)
def delete_template_templategroup_cache(sender, instance, **kwargs):
cache.delete(instance.group.cache_key)
signals.post_delete.connect(
delete_templategroup_cache,
sender=m.TemplateGroup,
dispatch_uid='delete_templategroup_cache_post_delete',
)
signals.post_delete.connect(
delete_template_templategroup_cache,
sender=m.Template,
dispatch_uid='delete_template_templategroup_cache_post_delete',
)
signals.post_save.connect(
delete_templategroup_cache,
sender=m.TemplateGroup,
dispatch_uid='delete_templategroup_cache_post_save',
)
signals.post_save.connect(
delete_template_templategroup_cache,
sender=m.Template,
dispatch_uid='delete_template_templategroup_cache_post_save',
)
| {
"content_hash": "9c61bb72a991c837c87ca3217c19a54d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 25.743589743589745,
"alnum_prop": 0.7599601593625498,
"repo_name": "asyncee/django-cmstemplates",
"id": "f6e121b371e7eefd158cc502bf4e33d05f4c52c2",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmstemplates/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "373"
},
{
"name": "Python",
"bytes": "21239"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
} |
from warnings import warn
from itertools import product
import numpy as np
from skbio.alignment import TabularMSA
from skbio.alignment._ssw_wrapper import StripedSmithWaterman
from skbio.sequence import DNA, RNA, Protein
from skbio.sequence import GrammaredSequence
from skbio.util import EfficiencyWarning
from skbio.util._decorator import experimental, deprecated
# This is temporary: blosum50 does not exist in skbio yet as per
# issue 161. When the issue is resolved, this should be removed in favor
# of an import.
blosum50 = \
{
'*': {'*': 1, 'A': -5, 'C': -5, 'B': -5, 'E': -5, 'D': -5, 'G': -5,
'F': -5, 'I': -5, 'H': -5, 'K': -5, 'M': -5, 'L': -5,
'N': -5, 'Q': -5, 'P': -5, 'S': -5, 'R': -5, 'T': -5,
'W': -5, 'V': -5, 'Y': -5, 'X': -5, 'Z': -5},
'A': {'*': -5, 'A': 5, 'C': -1, 'B': -2, 'E': -1, 'D': -2, 'G': 0,
'F': -3, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -2,
'N': -1, 'Q': -1, 'P': -1, 'S': 1, 'R': -2, 'T': 0, 'W': -3,
'V': 0, 'Y': -2, 'X': -1, 'Z': -1},
'C': {'*': -5, 'A': -1, 'C': 13, 'B': -3, 'E': -3, 'D': -4,
'G': -3, 'F': -2, 'I': -2, 'H': -3, 'K': -3, 'M': -2,
'L': -2, 'N': -2, 'Q': -3, 'P': -4, 'S': -1, 'R': -4,
'T': -1, 'W': -5, 'V': -1, 'Y': -3, 'X': -1, 'Z': -3},
'B': {'*': -5, 'A': -2, 'C': -3, 'B': 6, 'E': 1, 'D': 6, 'G': -1,
'F': -4, 'I': -4, 'H': 0, 'K': 0, 'M': -3, 'L': -4, 'N': 5,
'Q': 0, 'P': -2, 'S': 0, 'R': -1, 'T': 0, 'W': -5, 'V': -3,
'Y': -3, 'X': -1, 'Z': 1},
'E': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 6, 'D': 2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 1, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': -1, 'R': 0, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5},
'D': {'*': -5, 'A': -2, 'C': -4, 'B': 6, 'E': 2, 'D': 8, 'G': -1,
'F': -5, 'I': -4, 'H': -1, 'K': -1, 'M': -4, 'L': -4, 'N': 2,
'Q': 0, 'P': -1, 'S': 0, 'R': -2, 'T': -1, 'W': -5, 'V': -4,
'Y': -3, 'X': -1, 'Z': 1},
'G': {'*': -5, 'A': 0, 'C': -3, 'B': -1, 'E': -3, 'D': -1, 'G': 8,
'F': -4, 'I': -4, 'H': -2, 'K': -2, 'M': -3, 'L': -4, 'N': 0,
'Q': -2, 'P': -2, 'S': 0, 'R': -3, 'T': -2, 'W': -3, 'V': -4,
'Y': -3, 'X': -1, 'Z': -2},
'F': {'*': -5, 'A': -3, 'C': -2, 'B': -4, 'E': -3, 'D': -5,
'G': -4, 'F': 8, 'I': 0, 'H': -1, 'K': -4, 'M': 0, 'L': 1,
'N': -4, 'Q': -4, 'P': -4, 'S': -3, 'R': -3, 'T': -2, 'W': 1,
'V': -1, 'Y': 4, 'X': -1, 'Z': -4},
'I': {'*': -5, 'A': -1, 'C': -2, 'B': -4, 'E': -4, 'D': -4,
'G': -4, 'F': 0, 'I': 5, 'H': -4, 'K': -3, 'M': 2, 'L': 2,
'N': -3, 'Q': -3, 'P': -3, 'S': -3, 'R': -4, 'T': -1,
'W': -3, 'V': 4, 'Y': -1, 'X': -1, 'Z': -3},
'H': {'*': -5, 'A': -2, 'C': -3, 'B': 0, 'E': 0, 'D': -1, 'G': -2,
'F': -1, 'I': -4, 'H': 10, 'K': 0, 'M': -1, 'L': -3, 'N': 1,
'Q': 1, 'P': -2, 'S': -1, 'R': 0, 'T': -2, 'W': -3, 'V': -4,
'Y': 2, 'X': -1, 'Z': 0},
'K': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 1, 'D': -1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 6, 'M': -2, 'L': -3, 'N': 0,
'Q': 2, 'P': -1, 'S': 0, 'R': 3, 'T': -1, 'W': -3, 'V': -3,
'Y': -2, 'X': -1, 'Z': 1},
'M': {'*': -5, 'A': -1, 'C': -2, 'B': -3, 'E': -2, 'D': -4,
'G': -3, 'F': 0, 'I': 2, 'H': -1, 'K': -2, 'M': 7, 'L': 3,
'N': -2, 'Q': 0, 'P': -3, 'S': -2, 'R': -2, 'T': -1, 'W': -1,
'V': 1, 'Y': 0, 'X': -1, 'Z': -1},
'L': {'*': -5, 'A': -2, 'C': -2, 'B': -4, 'E': -3, 'D': -4,
'G': -4, 'F': 1, 'I': 2, 'H': -3, 'K': -3, 'M': 3, 'L': 5,
'N': -4, 'Q': -2, 'P': -4, 'S': -3, 'R': -3, 'T': -1,
'W': -2, 'V': 1, 'Y': -1, 'X': -1, 'Z': -3},
'N': {'*': -5, 'A': -1, 'C': -2, 'B': 5, 'E': 0, 'D': 2, 'G': 0,
'F': -4, 'I': -3, 'H': 1, 'K': 0, 'M': -2, 'L': -4, 'N': 7,
'Q': 0, 'P': -2, 'S': 1, 'R': -1, 'T': 0, 'W': -4, 'V': -3,
'Y': -2, 'X': -1, 'Z': 0},
'Q': {'*': -5, 'A': -1, 'C': -3, 'B': 0, 'E': 2, 'D': 0, 'G': -2,
'F': -4, 'I': -3, 'H': 1, 'K': 2, 'M': 0, 'L': -2, 'N': 0,
'Q': 7, 'P': -1, 'S': 0, 'R': 1, 'T': -1, 'W': -1, 'V': -3,
'Y': -1, 'X': -1, 'Z': 4},
'P': {'*': -5, 'A': -1, 'C': -4, 'B': -2, 'E': -1, 'D': -1,
'G': -2, 'F': -4, 'I': -3, 'H': -2, 'K': -1, 'M': -3,
'L': -4, 'N': -2, 'Q': -1, 'P': 10, 'S': -1, 'R': -3,
'T': -1, 'W': -4, 'V': -3, 'Y': -3, 'X': -1, 'Z': -1},
'S': {'*': -5, 'A': 1, 'C': -1, 'B': 0, 'E': -1, 'D': 0, 'G': 0,
'F': -3, 'I': -3, 'H': -1, 'K': 0, 'M': -2, 'L': -3, 'N': 1,
'Q': 0, 'P': -1, 'S': 5, 'R': -1, 'T': 2, 'W': -4, 'V': -2,
'Y': -2, 'X': -1, 'Z': 0},
'R': {'*': -5, 'A': -2, 'C': -4, 'B': -1, 'E': 0, 'D': -2, 'G': -3,
'F': -3, 'I': -4, 'H': 0, 'K': 3, 'M': -2, 'L': -3, 'N': -1,
'Q': 1, 'P': -3, 'S': -1, 'R': 7, 'T': -1, 'W': -3, 'V': -3,
'Y': -1, 'X': -1, 'Z': 0},
'T': {'*': -5, 'A': 0, 'C': -1, 'B': 0, 'E': -1, 'D': -1, 'G': -2,
'F': -2, 'I': -1, 'H': -2, 'K': -1, 'M': -1, 'L': -1, 'N': 0,
'Q': -1, 'P': -1, 'S': 2, 'R': -1, 'T': 5, 'W': -3, 'V': 0,
'Y': -2, 'X': -1, 'Z': -1},
'W': {'*': -5, 'A': -3, 'C': -5, 'B': -5, 'E': -3, 'D': -5,
'G': -3, 'F': 1, 'I': -3, 'H': -3, 'K': -3, 'M': -1, 'L': -2,
'N': -4, 'Q': -1, 'P': -4, 'S': -4, 'R': -3, 'T': -3,
'W': 15, 'V': -3, 'Y': 2, 'X': -1, 'Z': -2},
'V': {'*': -5, 'A': 0, 'C': -1, 'B': -3, 'E': -3, 'D': -4, 'G': -4,
'F': -1, 'I': 4, 'H': -4, 'K': -3, 'M': 1, 'L': 1, 'N': -3,
'Q': -3, 'P': -3, 'S': -2, 'R': -3, 'T': 0, 'W': -3, 'V': 5,
'Y': -1, 'X': -1, 'Z': -3},
'Y': {'*': -5, 'A': -2, 'C': -3, 'B': -3, 'E': -2, 'D': -3,
'G': -3, 'F': 4, 'I': -1, 'H': 2, 'K': -2, 'M': 0, 'L': -1,
'N': -2, 'Q': -1, 'P': -3, 'S': -2, 'R': -1, 'T': -2, 'W': 2,
'V': -1, 'Y': 8, 'X': -1, 'Z': -2},
'X': {'*': -5, 'A': -1, 'C': -1, 'B': -1, 'E': -1, 'D': -1,
'G': -1, 'F': -1, 'I': -1, 'H': -1, 'K': -1, 'M': -1,
'L': -1, 'N': -1, 'Q': -1, 'P': -1, 'S': -1, 'R': -1,
'T': -1, 'W': -1, 'V': -1, 'Y': -1, 'X': -1, 'Z': -1},
'Z': {'*': -5, 'A': -1, 'C': -3, 'B': 1, 'E': 5, 'D': 1, 'G': -2,
'F': -4, 'I': -3, 'H': 0, 'K': 1, 'M': -1, 'L': -3, 'N': 0,
'Q': 4, 'P': -1, 'S': 0, 'R': 0, 'T': -1, 'W': -2, 'V': -3,
'Y': -2, 'X': -1, 'Z': 5}}
@experimental(as_of="0.4.0")
def local_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=2, mismatch_score=-3,
substitution_matrix=None):
"""Locally align exactly two nucleotide seqs with Smith-Waterman
Parameters
----------
seq1 : DNA or RNA
The first unaligned sequence.
seq2 : DNA or RNA
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be DNA or RNA, not type %r"
% type(seq).__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None):
"""Locally align exactly two protein seqs with Smith-Waterman
Parameters
----------
seq1 : Protein
The first unaligned sequence.
seq2 : Protein
The second unaligned sequence.
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, Protein):
raise TypeError(
"`seq1` and `seq2` must be Protein, not type %r"
% type(seq).__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix)
@experimental(as_of="0.4.0")
def local_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix):
"""Locally align exactly two seqs with Smith-Waterman
Parameters
----------
seq1 : GrammaredSequence
The first unaligned sequence.
seq2 : GrammaredSequence
The second unaligned sequence.
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm was originally described in [1]_. The scikit-bio
implementation was validated against the EMBOSS water web server [2]_.
References
----------
.. [1] Identification of common molecular subsequences.
Smith TF, Waterman MS.
J Mol Biol. 1981 Mar 25;147(1):195-7.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_water/
"""
warn("You're using skbio's python implementation of Smith-Waterman "
"alignment. This will be very slow (e.g., thousands of times slower) "
"than skbio.alignment.local_pairwise_align_ssw.",
EfficiencyWarning)
for seq in seq1, seq2:
if not isinstance(seq, GrammaredSequence):
raise TypeError(
"`seq1` and `seq2` must be %r subclasses, not type %r" %
(GrammaredSequence.__name__, type(seq).__name__))
if type(seq1) is not type(seq2):
raise TypeError(
"`seq1` and `seq2` must be the same type: %r != %r"
% (type(seq1).__name__, type(seq2).__name__))
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
score_matrix, traceback_matrix = _compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=0.0,
init_matrices_f=_init_matrices_sw)
end_row_position, end_col_position =\
np.unravel_index(np.argmax(score_matrix), score_matrix.shape)
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def global_pairwise_align_nucleotide(seq1, seq2, gap_open_penalty=5,
gap_extend_penalty=2,
match_score=1, mismatch_score=-2,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align nucleotide seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : DNA, RNA, or TabularMSA[DNA|RNA]
The first unaligned sequence(s).
seq2 : DNA, RNA, or TabularMSA[DNA|RNA]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
match_score : int or float, optional
The score to add for a match between a pair of bases (this is added
to the previous best alignment score, so is typically positive).
mismatch_score : int or float, optional
The score to add for a mismatch between a pair of bases (this is
added to the previous best alignment score, so is typically
negative).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score). If provided, this overrides
``match_score`` and ``mismatch_score``.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_protein
Notes
-----
Default ``match_score``, ``mismatch_score``, ``gap_open_penalty`` and
``gap_extend_penalty`` parameters are derived from the NCBI BLAST
Server [1]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
"""
for seq in seq1, seq2:
if not isinstance(seq, (DNA, RNA, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be DNA, RNA, or TabularMSA, not type "
"%r" % type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype,
(DNA, RNA)):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with DNA or RNA dtype, "
"not dtype %r" % seq.dtype.__name__)
# use the substitution matrix provided by the user, or compute from
# match_score and mismatch_score if a substitution matrix was not provided
if substitution_matrix is None:
substitution_matrix = \
make_identity_substitution_matrix(match_score, mismatch_score)
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align_protein(seq1, seq2, gap_open_penalty=11,
gap_extend_penalty=1,
substitution_matrix=None,
penalize_terminal_gaps=False):
"""Globally align pair of protein seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : Protein or TabularMSA[Protein]
The first unaligned sequence(s).
seq2 : Protein or TabularMSA[Protein]
The second unaligned sequence(s).
gap_open_penalty : int or float, optional
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float, optional
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar), optional
Lookup for substitution scores (these values are added to the
previous best alignment score); default is BLOSUM 50.
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align
global_pairwise_align_nucelotide
Notes
-----
Default ``gap_open_penalty`` and ``gap_extend_penalty`` parameters are
derived from the NCBI BLAST Server [1]_.
The BLOSUM (blocks substitution matrices) amino acid substitution matrices
were originally defined in [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] http://blast.ncbi.nlm.nih.gov/Blast.cgi
.. [2] Amino acid substitution matrices from protein blocks.
S Henikoff and J G Henikoff.
Proc Natl Acad Sci U S A. Nov 15, 1992; 89(22): 10915-10919.
"""
for seq in seq1, seq2:
if not isinstance(seq, (Protein, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be Protein or TabularMSA, not type %r"
% type(seq).__name__)
if isinstance(seq, TabularMSA) and not issubclass(seq.dtype, Protein):
raise TypeError(
"`seq1` and `seq2` must be TabularMSA with Protein dtype, "
"not dtype %r" % seq.dtype.__name__)
if substitution_matrix is None:
substitution_matrix = blosum50
return global_pairwise_align(seq1, seq2, gap_open_penalty,
gap_extend_penalty, substitution_matrix,
penalize_terminal_gaps=penalize_terminal_gaps)
@experimental(as_of="0.4.0")
def global_pairwise_align(seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, penalize_terminal_gaps=False):
"""Globally align a pair of seqs or alignments with Needleman-Wunsch
Parameters
----------
seq1 : GrammaredSequence or TabularMSA
The first unaligned sequence(s).
seq2 : GrammaredSequence or TabularMSA
The second unaligned sequence(s).
gap_open_penalty : int or float
Penalty for opening a gap (this is substracted from previous best
alignment score, so is typically positive).
gap_extend_penalty : int or float
Penalty for extending a gap (this is substracted from previous best
alignment score, so is typically positive).
substitution_matrix: 2D dict (or similar)
Lookup for substitution scores (these values are added to the
previous best alignment score).
penalize_terminal_gaps: bool, optional
If True, will continue to penalize gaps even after one sequence has
been aligned through its end. This behavior is true Needleman-Wunsch
alignment, but results in (biologically irrelevant) artifacts when
the sequences being aligned are of different length. This is ``False``
by default, which is very likely to be the behavior you want in all or
nearly all cases.
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
See Also
--------
local_pairwise_align
local_pairwise_align_protein
local_pairwise_align_nucleotide
skbio.alignment.local_pairwise_align_ssw
global_pairwise_align_protein
global_pairwise_align_nucelotide
Notes
-----
This algorithm (in a slightly more basic form) was originally described
in [1]_. The scikit-bio implementation was validated against the
EMBOSS needle web server [2]_.
This function can be use to align either a pair of sequences, a pair of
alignments, or a sequence and an alignment.
References
----------
.. [1] A general method applicable to the search for similarities in
the amino acid sequence of two proteins.
Needleman SB, Wunsch CD.
J Mol Biol. 1970 Mar;48(3):443-53.
.. [2] http://www.ebi.ac.uk/Tools/psa/emboss_needle/
"""
warn("You're using skbio's python implementation of Needleman-Wunsch "
"alignment. This is known to be very slow (e.g., thousands of times "
"slower than a native C implementation). We'll be adding a faster "
"version soon (see https://github.com/biocore/scikit-bio/issues/254 "
"to track progress on this).", EfficiencyWarning)
for seq in seq1, seq2:
# We don't need to check the case where `seq` is a `TabularMSA` with a
# dtype that isn't a subclass of `GrammaredSequence`, this is
# guaranteed by `TabularMSA`.
if not isinstance(seq, (GrammaredSequence, TabularMSA)):
raise TypeError(
"`seq1` and `seq2` must be GrammaredSequence subclasses or "
"TabularMSA, not type %r" % type(seq).__name__)
seq1 = _coerce_alignment_input_type(seq1)
seq2 = _coerce_alignment_input_type(seq2)
if seq1.dtype is not seq2.dtype:
raise TypeError(
"`seq1` and `seq2` must have the same dtype: %r != %r"
% (seq1.dtype.__name__, seq2.dtype.__name__))
if penalize_terminal_gaps:
init_matrices_f = _init_matrices_nw
else:
init_matrices_f = _init_matrices_nw_no_terminal_gap_penalty
score_matrix, traceback_matrix = \
_compute_score_and_traceback_matrices(
seq1, seq2, gap_open_penalty, gap_extend_penalty,
substitution_matrix, new_alignment_score=-np.inf,
init_matrices_f=init_matrices_f,
penalize_terminal_gaps=penalize_terminal_gaps)
end_row_position = traceback_matrix.shape[0] - 1
end_col_position = traceback_matrix.shape[1] - 1
aligned1, aligned2, score, seq1_start_position, seq2_start_position = \
_traceback(traceback_matrix, score_matrix, seq1, seq2,
end_row_position, end_col_position)
start_end_positions = [(seq1_start_position, end_col_position-1),
(seq2_start_position, end_row_position-1)]
msa = TabularMSA(aligned1 + aligned2)
return msa, score, start_end_positions
@experimental(as_of="0.4.0")
def local_pairwise_align_ssw(sequence1, sequence2, **kwargs):
"""Align query and target sequences with Striped Smith-Waterman.
Parameters
----------
sequence1 : DNA, RNA, or Protein
The first unaligned sequence
sequence2 : DNA, RNA, or Protein
The second unaligned sequence
Returns
-------
tuple
``TabularMSA`` object containing the aligned sequences, alignment score
(float), and start/end positions of each input sequence (iterable
of two-item tuples). Note that start/end positions are indexes into the
unaligned sequences.
Notes
-----
This is a wrapper for the SSW package [1]_.
For a complete list of optional keyword-arguments that can be provided,
see ``skbio.alignment.StripedSmithWaterman``.
The following kwargs will not have any effect: `suppress_sequences`,
`zero_index`, and `protein`
If an alignment does not meet a provided filter, `None` will be returned.
References
----------
.. [1] Zhao, Mengyao, Wan-Ping Lee, Erik P. Garrison, & Gabor T.
Marth. "SSW Library: An SIMD Smith-Waterman C/C++ Library for
Applications". PLOS ONE (2013). Web. 11 July 2014.
http://www.plosone.org/article/info:doi/10.1371/journal.pone.0082138
See Also
--------
skbio.alignment.StripedSmithWaterman
"""
for seq in sequence1, sequence2:
if not isinstance(seq, (DNA, RNA, Protein)):
raise TypeError(
"`sequence1` and `sequence2` must be DNA, RNA, or Protein, "
"not type %r" % type(seq).__name__)
if type(sequence1) is not type(sequence2):
raise TypeError(
"`sequence1` and `sequence2` must be the same type: %r != %r"
% (type(sequence1).__name__, type(sequence2).__name__))
# We need the sequences for `TabularMSA` to make sense, so don't let the
# user suppress them.
kwargs['suppress_sequences'] = False
kwargs['zero_index'] = True
kwargs['protein'] = False
if isinstance(sequence1, Protein):
kwargs['protein'] = True
query = StripedSmithWaterman(str(sequence1), **kwargs)
alignment = query(str(sequence2))
# If there is no cigar, then it has failed a filter. Return None.
if not alignment.cigar:
return None
start_end = None
if alignment.query_begin != -1:
start_end = [
(alignment.query_begin, alignment.query_end),
(alignment.target_begin, alignment.target_end_optimal)
]
constructor = type(sequence1)
msa = TabularMSA([
constructor(alignment.aligned_query_sequence),
constructor(alignment.aligned_target_sequence)
])
return msa, alignment.optimal_alignment_score, start_end
@deprecated(as_of="0.4.0", until="0.5.0",
reason="Will be replaced by a SubstitutionMatrix class. To track "
"progress, see [#161]"
"(https://github.com/biocore/scikit-bio/issues/161).")
def make_identity_substitution_matrix(match_score, mismatch_score,
alphabet='ACGTU'):
"""Generate substitution matrix where all matches are scored equally
Parameters
----------
match_score : int, float
The score that should be assigned for all matches. This value is
typically positive.
mismatch_score : int, float
The score that should be assigned for all mismatches. This value is
typically negative.
alphabet : iterable of str, optional
The characters that should be included in the substitution matrix.
Returns
-------
dict of dicts
All characters in alphabet are keys in both dictionaries, so that any
pair of characters can be looked up to get their match or mismatch
score.
"""
result = {}
for c1 in alphabet:
row = {}
for c2 in alphabet:
if c1 == c2:
row[c2] = match_score
else:
row[c2] = mismatch_score
result[c1] = row
return result
# Functions from here allow for generalized (global or local) alignment. I
# will likely want to put these in a single object to make the naming a little
# less clunky.
def _coerce_alignment_input_type(seq):
if isinstance(seq, GrammaredSequence):
return TabularMSA([seq])
else:
return seq
_traceback_encoding = {'match': 1, 'vertical-gap': 2, 'horizontal-gap': 3,
'uninitialized': -1, 'alignment-end': 0}
def _init_matrices_sw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, :] = _traceback_encoding['alignment-end']
traceback_matrix[:, 0] = _traceback_encoding['alignment-end']
return score_matrix, traceback_matrix
def _init_matrices_nw(aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
score_matrix[i, 0] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
score_matrix[0, i] = -gap_open_penalty - ((i-1) * gap_extend_penalty)
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _init_matrices_nw_no_terminal_gap_penalty(
aln1, aln2, gap_open_penalty, gap_extend_penalty):
shape = (aln2.shape.position+1, aln1.shape.position+1)
score_matrix = np.zeros(shape)
traceback_matrix = np.zeros(shape, dtype=np.int)
traceback_matrix += _traceback_encoding['uninitialized']
traceback_matrix[0, 0] = _traceback_encoding['alignment-end']
# cache some values for quicker access
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
for i in range(1, shape[0]):
traceback_matrix[i, 0] = vgap
for i in range(1, shape[1]):
traceback_matrix[0, i] = hgap
return score_matrix, traceback_matrix
def _compute_substitution_score(aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, gap_chars):
substitution_score = 0
for aln1_char, aln2_char in product(aln1_chars, aln2_chars):
if aln1_char in gap_chars or aln2_char in gap_chars:
substitution_score += gap_substitution_score
else:
try:
substitution_score += \
substitution_matrix[aln1_char][aln2_char]
except KeyError:
offending_chars = \
[c for c in (aln1_char, aln2_char)
if c not in substitution_matrix]
raise ValueError(
"One of the sequences contains a character that is "
"not contained in the substitution matrix. Are you "
"using an appropriate substitution matrix for your "
"sequence type (e.g., a nucleotide substitution "
"matrix does not make sense for aligning protein "
"sequences)? Does your sequence contain invalid "
"characters? The offending character(s) is: "
" %s." % ', '.join(offending_chars))
substitution_score /= (len(aln1_chars) * len(aln2_chars))
return substitution_score
def _compute_score_and_traceback_matrices(
aln1, aln2, gap_open_penalty, gap_extend_penalty, substitution_matrix,
new_alignment_score=-np.inf, init_matrices_f=_init_matrices_nw,
penalize_terminal_gaps=True, gap_substitution_score=0):
"""Return dynamic programming (score) and traceback matrices.
A note on the ``penalize_terminal_gaps`` parameter. When this value is
``False``, this function is no longer true Smith-Waterman/Needleman-Wunsch
scoring, but when ``True`` it can result in biologically irrelevant
artifacts in Needleman-Wunsch (global) alignments. Specifically, if one
sequence is longer than the other (e.g., if aligning a primer sequence to
an amplification product, or searching for a gene in a genome) the shorter
sequence will have a long gap inserted. The parameter is ``True`` by
default (so that this function computes the score and traceback matrices as
described by the original authors) but the global alignment wrappers pass
``False`` by default, so that the global alignment API returns the result
that users are most likely to be looking for.
"""
aln1_length = aln1.shape.position
aln2_length = aln2.shape.position
# cache some values for quicker/simpler access
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
new_alignment_score = (new_alignment_score, aend)
# Initialize a matrix to use for scoring the alignment and for tracing
# back the best alignment
score_matrix, traceback_matrix = init_matrices_f(
aln1, aln2, gap_open_penalty, gap_extend_penalty)
# Iterate over the characters in aln2 (which corresponds to the vertical
# sequence in the matrix)
for aln2_pos, aln2_chars in enumerate(aln2.iter_positions(), 1):
aln2_chars = str(aln2_chars)
# Iterate over the characters in aln1 (which corresponds to the
# horizontal sequence in the matrix)
for aln1_pos, aln1_chars in enumerate(aln1.iter_positions(), 1):
aln1_chars = str(aln1_chars)
# compute the score for a match/mismatch
substitution_score = _compute_substitution_score(
aln1_chars, aln2_chars, substitution_matrix,
gap_substitution_score, aln1.dtype.gap_chars)
diag_score = \
(score_matrix[aln2_pos-1, aln1_pos-1] + substitution_score,
match)
# compute the score for adding a gap in aln2 (vertical)
if not penalize_terminal_gaps and (aln1_pos == aln1_length):
# we've reached the end of aln1, so adding vertical gaps
# (which become gaps in aln1) should no longer
# be penalized (if penalize_terminal_gaps == False)
up_score = (score_matrix[aln2_pos-1, aln1_pos], vgap)
elif traceback_matrix[aln2_pos-1, aln1_pos] == vgap:
# gap extend, because the cell above was also a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_extend_penalty,
vgap)
else:
# gap open, because the cell above was not a gap
up_score = \
(score_matrix[aln2_pos-1, aln1_pos] - gap_open_penalty,
vgap)
# compute the score for adding a gap in aln1 (horizontal)
if not penalize_terminal_gaps and (aln2_pos == aln2_length):
# we've reached the end of aln2, so adding horizontal gaps
# (which become gaps in aln2) should no longer
# be penalized (if penalize_terminal_gaps == False)
left_score = (score_matrix[aln2_pos, aln1_pos-1], hgap)
elif traceback_matrix[aln2_pos, aln1_pos-1] == hgap:
# gap extend, because the cell to the left was also a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_extend_penalty,
hgap)
else:
# gap open, because the cell to the left was not a gap
left_score = \
(score_matrix[aln2_pos, aln1_pos-1] - gap_open_penalty,
hgap)
# identify the largest score, and use that information to populate
# the score and traceback matrices
best_score = _first_largest([new_alignment_score, left_score,
diag_score, up_score])
score_matrix[aln2_pos, aln1_pos] = best_score[0]
traceback_matrix[aln2_pos, aln1_pos] = best_score[1]
return score_matrix, traceback_matrix
def _traceback(traceback_matrix, score_matrix, aln1, aln2, start_row,
start_col):
# cache some values for simpler reference
aend = _traceback_encoding['alignment-end']
match = _traceback_encoding['match']
vgap = _traceback_encoding['vertical-gap']
hgap = _traceback_encoding['horizontal-gap']
gap_character = aln1.dtype.default_gap_char
# initialize the result alignments
aln1_sequence_count = aln1.shape.sequence
aligned_seqs1 = [[] for e in range(aln1_sequence_count)]
aln2_sequence_count = aln2.shape.sequence
aligned_seqs2 = [[] for e in range(aln2_sequence_count)]
current_row = start_row
current_col = start_col
best_score = score_matrix[current_row, current_col]
current_value = None
while current_value != aend:
current_value = traceback_matrix[current_row, current_col]
if current_value == match:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
current_col -= 1
elif current_value == vgap:
for aligned_seq in aligned_seqs1:
aligned_seq.append(gap_character)
for aligned_seq, input_seq in zip(aligned_seqs2, aln2):
aligned_seq.append(str(input_seq[current_row-1]))
current_row -= 1
elif current_value == hgap:
for aligned_seq, input_seq in zip(aligned_seqs1, aln1):
aligned_seq.append(str(input_seq[current_col-1]))
for aligned_seq in aligned_seqs2:
aligned_seq.append(gap_character)
current_col -= 1
elif current_value == aend:
continue
else:
raise ValueError(
"Invalid value in traceback matrix: %s" % current_value)
for i in range(aln1_sequence_count):
aligned_seq = ''.join(aligned_seqs1[i][::-1])
constructor = aln1.dtype
aligned_seqs1[i] = constructor(aligned_seq)
for i in range(aln2_sequence_count):
aligned_seq = ''.join(aligned_seqs2[i][::-1])
constructor = aln2.dtype
aligned_seqs2[i] = constructor(aligned_seq)
return aligned_seqs1, aligned_seqs2, best_score, current_col, current_row
def _first_largest(scores):
""" Similar to max, but returns the first element achieving the high score
If max receives a tuple, it will break a tie for the highest value
of entry[i] with entry[i+1]. We don't want that here - to better match
with the results of other tools, we want to be able to define which
entry is returned in the case of a tie.
"""
result = scores[0]
for score, direction in scores[1:]:
if score > result[0]:
result = (score, direction)
return result
| {
"content_hash": "bbcced1e3977338905b2d0dd28b1fc6a",
"timestamp": "",
"source": "github",
"line_count": 1029,
"max_line_length": 79,
"avg_line_length": 42.2575315840622,
"alnum_prop": 0.5672331715842973,
"repo_name": "anderspitman/scikit-bio",
"id": "9669962e70df8fb10ea77017427dc67a7bd1194f",
"size": "43837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/alignment/_pairwise.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "377"
},
{
"name": "Jupyter Notebook",
"bytes": "210926"
},
{
"name": "Makefile",
"bytes": "1054"
},
{
"name": "Python",
"bytes": "2502133"
}
],
"symlink_target": ""
} |
from traceback import format_exc
from logging import getLogger
# pylint: disable=F0401
from paste.deploy.converters import asbool
# pylint: enable=F0401
from simplejson import JSONEncoder
# pylint: disable=C0103
_json_encoder = JSONEncoder(encoding='utf-8', separators=(',',':'))
# pylint: enable=C0103
from turbulenz_local.lib.exceptions import PostOnlyException, ApiUnavailable, ApiNotImplemented, ApiException
LOG = getLogger(__name__)
class ErrorMiddleware(object):
"""
Catch errors and report.
"""
error_response = ['{"ok":false,"msg":"Request could not be processed!"}']
error_headers = [('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', str(len(error_response[0])))]
postonly_response = ['{"ok":false,"msg":"Post Only!"}']
postonly_headers = [('Content-Type', 'application/json; charset=utf-8'),
('Cache-Control', 'no-store'),
('Content-Length', str(len(postonly_response[0]))),
('Allow', 'POST')]
def __init__(self, app, config):
self.app = app
self.config = config
def __call__(self, environ, start_response):
try:
# To see exceptions thrown above this call (i.e. higher in the middleware stack
# and exceptions in this file) see the devserver/devserver.log file
return self.app(environ, start_response)
except ApiUnavailable as e:
json_data = _json_encoder.encode(e.value)
msg = '{"ok":false,"msg":"Service Unavailable","data":%s}' % json_data
headers = [('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', str(len(msg)))]
start_response('503 Service Unavailable', headers)
return [msg]
except ApiNotImplemented:
start_response('501 Not Implemented', self.error_headers)
return self.error_headers
except ApiException as e:
json_msg_data = _json_encoder.encode(e.value)
if e.json_data:
msg = '{"ok":false,"msg":%s,"data":%s}' % (json_msg_data, _json_encoder.encode(e.json_data))
else:
msg = '{"ok":false,"msg":%s}' % json_msg_data
headers = [('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', str(len(msg)))]
start_response(e.status, headers)
return [msg]
except PostOnlyException:
start_response('405 Method Not Allowed', self.postonly_headers)
return self.postonly_response
except:
log_msg = 'Exception when processing request: %s' % environ['PATH_INFO']
trace_string = format_exc()
LOG.error(log_msg)
LOG.error(trace_string)
if asbool(self.config.get('debug')):
print log_msg
print trace_string
start_response('500 Internal Server Error', self.error_headers)
return self.error_response
| {
"content_hash": "409e8f1e618f777f2a57e92dbc8c20a3",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 109,
"avg_line_length": 41.54054054054054,
"alnum_prop": 0.5858815875081327,
"repo_name": "turbulenz/turbulenz_local",
"id": "8b9b712dfde2b042907b560fe1ad84e4577709d4",
"size": "3119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbulenz_local/middleware/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "282"
},
{
"name": "CSS",
"bytes": "29719"
},
{
"name": "HTML",
"bytes": "54841"
},
{
"name": "JavaScript",
"bytes": "200107"
},
{
"name": "Python",
"bytes": "459206"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
import filecmp
import os
import posix
import stat
import sys
import sysconfig
import time
import unittest
from ..helpers import st_mtime_ns
from ..xattr import get_all
try:
import llfuse
# Does this version of llfuse support ns precision?
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
except ImportError:
have_fuse_mtime_ns = False
has_lchflags = hasattr(os, 'lchflags')
# The mtime get/set precision varies on different OS and Python versions
if 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
st_mtime_ns_round = 0
elif 'HAVE_UTIMES' in sysconfig.get_config_vars():
st_mtime_ns_round = -6
else:
st_mtime_ns_round = -9
if sys.platform.startswith('netbsd'):
st_mtime_ns_round = -4 # only >1 microsecond resolution here?
has_mtime_ns = sys.version >= '3.3'
utime_supports_fd = os.utime in getattr(os, 'supports_fd', {})
class BaseTestCase(unittest.TestCase):
"""
"""
assert_in = unittest.TestCase.assertIn
assert_not_in = unittest.TestCase.assertNotIn
assert_equal = unittest.TestCase.assertEqual
assert_not_equal = unittest.TestCase.assertNotEqual
assert_raises = unittest.TestCase.assertRaises
assert_true = unittest.TestCase.assertTrue
@contextmanager
def assert_creates_file(self, path):
self.assert_true(not os.path.exists(path), '{} should not exist'.format(path))
yield
self.assert_true(os.path.exists(path), '{} should exist'.format(path))
def assert_dirs_equal(self, dir1, dir2):
diff = filecmp.dircmp(dir1, dir2)
self._assert_dirs_equal_cmp(diff)
def _assert_dirs_equal_cmp(self, diff):
self.assert_equal(diff.left_only, [])
self.assert_equal(diff.right_only, [])
self.assert_equal(diff.diff_files, [])
self.assert_equal(diff.funny_files, [])
for filename in diff.common:
path1 = os.path.join(diff.left, filename)
path2 = os.path.join(diff.right, filename)
s1 = os.lstat(path1)
s2 = os.lstat(path2)
# Assume path2 is on FUSE if st_dev is different
fuse = s1.st_dev != s2.st_dev
attrs = ['st_mode', 'st_uid', 'st_gid', 'st_rdev']
if has_lchflags:
attrs.append('st_flags')
if not fuse or not os.path.isdir(path1):
# dir nlink is always 1 on our fuse filesystem
attrs.append('st_nlink')
d1 = [filename] + [getattr(s1, a) for a in attrs]
d2 = [filename] + [getattr(s2, a) for a in attrs]
# ignore st_rdev if file is not a block/char device, fixes #203
if not stat.S_ISCHR(d1[1]) and not stat.S_ISBLK(d1[1]):
d1[4] = None
if not stat.S_ISCHR(d2[1]) and not stat.S_ISBLK(d2[1]):
d2[4] = None
if not os.path.islink(path1) or utime_supports_fd:
# Older versions of llfuse do not support ns precision properly
if fuse and not have_fuse_mtime_ns:
d1.append(round(st_mtime_ns(s1), -4))
d2.append(round(st_mtime_ns(s2), -4))
d1.append(round(st_mtime_ns(s1), st_mtime_ns_round))
d2.append(round(st_mtime_ns(s2), st_mtime_ns_round))
d1.append(get_all(path1, follow_symlinks=False))
d2.append(get_all(path2, follow_symlinks=False))
self.assert_equal(d1, d2)
for sub_diff in diff.subdirs.values():
self._assert_dirs_equal_cmp(sub_diff)
def wait_for_mount(self, path, timeout=5):
"""Wait until a filesystem is mounted on `path`
"""
timeout += time.time()
while timeout > time.time():
if os.path.ismount(path):
return
time.sleep(.1)
raise Exception('wait_for_mount(%s) timeout' % path)
| {
"content_hash": "ede9b707e17327cd33827425c88a1a6e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 86,
"avg_line_length": 37.73076923076923,
"alnum_prop": 0.6083078491335372,
"repo_name": "level323/borg",
"id": "cd790b57149091c7b61e98fbe63ec36d8ecc42cc",
"size": "3924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "borg/testsuite/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "21707"
},
{
"name": "Python",
"bytes": "433887"
},
{
"name": "Shell",
"bytes": "1852"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger(__name__)
def configure_logging():
"""Definiciones y configuraciones iniciales."""
handler_console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s | %(levelname)-8s | %(message)s')
handler_console.setFormatter(formatter)
logger.addHandler(handler_console)
logger.setLevel("DEBUG")
def main():
"""Registra mensajes de distintos niveles."""
logger.debug("Detalles de implementación...")
logger.info("%s", "Mensajes informativos")
logger.warning("Aviso")
logger.error("Error")
logger.critical("Fallo crítico")
if __name__ == '__main__':
configure_logging()
main()
| {
"content_hash": "6d89ca41298e0972393699566f4bc6f7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 23.033333333333335,
"alnum_prop": 0.6685962373371924,
"repo_name": "javiromero/pycones2017",
"id": "5161890f5dba6ce6281b0937fee0a98dbd42cb8d",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01_levels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1230675"
},
{
"name": "Python",
"bytes": "11864"
}
],
"symlink_target": ""
} |
"""Generates a toy v1 saved model for testing."""
import shutil
from absl import app
from absl import flags
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils
flags.DEFINE_string('saved_model_path', '', 'Path to save the model to.')
FLAGS = flags.FLAGS
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
shutil.rmtree(FLAGS.saved_model_path)
# Create the graph
zero = constant_op.constant(0)
one = variable_scope.get_variable(name='y', initializer=[1])
neg_one = variable_scope.get_variable(name='z', initializer=[-1])
x = array_ops.placeholder(dtypes.int32, shape=(), name='input')
r = control_flow_ops.cond(
x < zero, lambda: math_ops.cast(math_ops.greater(x, one), dtypes.int32),
lambda: math_ops.cast(math_ops.greater(x, neg_one), dtypes.int32))
sess = session.Session()
sess.run(variables.global_variables_initializer())
sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path)
tensor_info_x = utils.build_tensor_info(x)
tensor_info_r = utils.build_tensor_info(r)
func_signature = (
signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'r': tensor_info_r},
method_name=signature_constants.PREDICT_METHOD_NAME))
sm_builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
'serving_default': func_signature,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: func_signature,
},
strip_default_attrs=True)
sm_builder.save()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "4210e756698e0dc4e683d2e6fc4b9763",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 80,
"avg_line_length": 34.671875,
"alnum_prop": 0.7278053177106805,
"repo_name": "Intel-Corporation/tensorflow",
"id": "a425b6c6e2fed5f1afa0b636f9ac15d87339cbc6",
"size": "2928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/core/tfrt/saved_model/tests/gen_if_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
} |
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
# Linux allow all characters other than \x00
# Windows disallow control characters (0-31) and /\?%:|"<>
FILE_CHAR_START = 32 if os.name == 'nt' else 1
FILE_CHAR_END = 128
FILE_CHARS_DISALLOWED = '/\\?%*:|"<>' if os.name == 'nt' else '/'
def notify_outputname(walletname, txid):
return txid if os.name == 'nt' else '{}_{}'.format(walletname, txid)
class NotificationsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.wallet = ''.join(chr(i) for i in range(FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHARS_DISALLOWED)
self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify")
self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify")
self.walletnotify_dir = os.path.join(self.options.tmpdir, "walletnotify")
os.mkdir(self.alertnotify_dir)
os.mkdir(self.blocknotify_dir)
os.mkdir(self.walletnotify_dir)
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [[
"-alertnotify=echo > {}".format(os.path.join(self.alertnotify_dir, '%s')),
"-blocknotify=echo > {}".format(os.path.join(self.blocknotify_dir, '%s')),
], [
"-rescan",
"-walletnotify=echo > {}".format(os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))),
]]
self.wallet_names = [self.default_wallet_name, self.wallet]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE)
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10)
# directory content should equal the generated blocks hashes
assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir)))
if self.is_wallet_compiled():
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected number of files before reading the content
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
self.stop_node(1)
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.start_node(1)
self.connect_nodes(0, 1)
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10)
# directory content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: notify_outputname(self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count)))
assert_equal(sorted(txids_rpc), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
# Conflicting transactions tests. Give node 0 same wallet seed as
# node 1, generate spends from node 0, and check notifications
# triggered by node 1
self.log.info("test -walletnotify with conflicting transactions")
self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(keyhash_to_p2pkh(hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])[::-1])))
self.nodes[0].rescanblockchain()
self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
# Generate transaction on node 0, sync mempools, and check for
# notification on node 1.
tx1 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx1])
# Generate bump transaction, sync mempools, and check for bump1
# notification. In the future, per
# https://github.com/bitcoin/bitcoin/pull/9371, it might be better
# to have notifications for both tx1 and bump1.
bump1 = self.nodes[0].bumpfee(tx1)["txid"]
assert_equal(bump1 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([bump1])
# Add bump1 transaction to new block, checking for a notification
# and the correct number of confirmations.
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_blocks()
self.expect_wallet_notify([bump1])
assert_equal(self.nodes[1].gettransaction(bump1)["confirmations"], 1)
# Generate a second transaction to be bumped.
tx2 = self.nodes[0].sendtoaddress(address=ADDRESS_BCRT1_UNSPENDABLE, amount=1, replaceable=True)
assert_equal(tx2 in self.nodes[0].getrawmempool(), True)
self.sync_mempools()
self.expect_wallet_notify([tx2])
# Bump tx2 as bump2 and generate a block on node 0 while
# disconnected, then reconnect and check for notifications on node 1
# about newly confirmed bump2 and newly conflicted tx2.
self.disconnect_nodes(0, 1)
bump2 = self.nodes[0].bumpfee(tx2)["txid"]
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
assert_equal(self.nodes[0].gettransaction(bump2)["confirmations"], 1)
assert_equal(tx2 in self.nodes[1].getrawmempool(), True)
self.connect_nodes(0, 1)
self.sync_blocks()
self.expect_wallet_notify([bump2, tx2])
assert_equal(self.nodes[1].gettransaction(bump2)["confirmations"], 1)
# TODO: add test for `-alertnotify` large fork notifications
def expect_wallet_notify(self, tx_ids):
self.wait_until(lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10)
assert_equal(sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), sorted(os.listdir(self.walletnotify_dir)))
for tx_file in os.listdir(self.walletnotify_dir):
os.remove(os.path.join(self.walletnotify_dir, tx_file))
if __name__ == '__main__':
NotificationsTest().main()
| {
"content_hash": "99cc0d38a2578ebd302fbed692b936d9",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 152,
"avg_line_length": 50.59722222222222,
"alnum_prop": 0.6437002470491353,
"repo_name": "jnewbery/bitcoin",
"id": "f2313bac13c868ed039d7e348cbdfd958424af3b",
"size": "7500",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/feature_notifications.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "959143"
},
{
"name": "C++",
"bytes": "8134257"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "218255"
},
{
"name": "Makefile",
"bytes": "124030"
},
{
"name": "Objective-C",
"bytes": "113876"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2246986"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "9339"
},
{
"name": "Shell",
"bytes": "166312"
}
],
"symlink_target": ""
} |
from setuptools import setup
# auto-convert README.md
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (ImportError, OSError):
# we'll just use the poorly formatted Markdown file instead
long_description = open('README.md').read()
setup(
name='wireless',
version='0.3.3',
description='A dead simple, cross-platform Python library to connect to ' +
'wireless networks.',
long_description=long_description,
url='https://github.com/joshvillbrandt/wireless',
author='Josh Villbrandt',
author_email='josh@javconcepts.com',
license=open('LICENSE').read(),
packages=['wireless'],
setup_requires=[
'tox',
'nose',
'flake8',
'packaging'
],
install_requires=[
],
scripts=[],
test_suite='tests',
zip_safe=False
)
| {
"content_hash": "42b7138a365c180ba7b19a132dc501cb",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 25.87878787878788,
"alnum_prop": 0.639344262295082,
"repo_name": "joshvillbrandt/wireless",
"id": "fc0a98672febefc92bf5587debfa4abf2a8d9069",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16541"
}
],
"symlink_target": ""
} |
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
def run_test(self):
self.log.info("Connect nodes, set fees, generate blocks, and sync")
self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(self.min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
self.test_simple_two_outputs()
self.test_change()
self.test_no_change()
self.test_invalid_option()
self.test_invalid_change_address()
self.test_valid_change_address()
self.test_change_type()
self.test_coin_selection()
self.test_two_vin()
self.test_two_vin_two_vout()
self.test_invalid_input()
self.test_fee_p2pkh()
self.test_fee_p2pkh_multi_out()
self.test_fee_p2sh()
self.test_fee_4of5()
self.test_spend_2of2()
self.test_locked_wallet()
self.test_many_inputs_fee()
self.test_many_inputs_send()
self.test_op_return()
self.test_watchonly()
self.test_all_watched_funds()
self.test_option_feerate()
self.test_address_reuse()
self.test_option_subtract_fee_from_outputs()
self.test_subtract_fee_with_presets()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
self.log.info("Test fundrawtxn changePosition option")
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
self.watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
self.watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, self.watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
self.watchonly_vout = find_vout_for_address(self.nodes[0], self.watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), self.watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
def test_simple(self):
self.log.info("Test fundrawtxn")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test that we have enough inputs
def test_simple_two_coins(self):
self.log.info("Test fundrawtxn with 2 coins")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test if we have enough inputs
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_simple_two_outputs(self):
self.log.info("Test fundrawtxn with 2 outputs")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert len(dec_tx['vin']) > 0
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_change(self):
self.log.info("Test fundrawtxn with a vin > required amount")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
self.test_no_change_fee = fee # Use the same fee for the next tx
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_no_change(self):
self.log.info("Test fundrawtxn not having a change output")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_invalid_option(self):
self.log.info("Test fundrawtxn with an invalid option")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
def test_invalid_change_address(self):
self.log.info("Test fundrawtxn with an invalid change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
def test_valid_change_address(self):
self.log.info("Test fundrawtxn with a provided change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
def test_change_type(self):
self.log.info("Test fundrawtxn with a provided change type")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
def test_coin_selection(self):
self.log.info("Test fundrawtxn with a vin < required amount")
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
# add_inputs is enabled by default
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
def test_two_vin(self):
self.log.info("Test fundrawtxn with 2 vins")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
def test_two_vin_two_vout(self):
self.log.info("Test fundrawtxn with 2 vins and 2 vouts")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
def test_invalid_input(self):
self.log.info("Test fundrawtxn with an invalid vin")
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
def test_fee_p2pkh(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn p2pkh fee")
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2pkh_multi_out(self):
"""Compare fee of a standard pubkeyhash transaction with multiple outputs."""
self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")
inputs = []
outputs = {
self.nodes[1].getnewaddress():1.1,
self.nodes[1].getnewaddress():1.2,
self.nodes[1].getnewaddress():0.1,
self.nodes[1].getnewaddress():1.3,
self.nodes[1].getnewaddress():0.2,
self.nodes[1].getnewaddress():0.3,
}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2sh(self):
"""Compare fee of a 2-of-2 multisig p2sh transaction."""
# Create 2-of-2 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_4of5(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn fee with 4-of-5 addresses")
# Create 4-of-5 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].addmultisigaddress(
4,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
addr3Obj['pubkey'],
addr4Obj['pubkey'],
addr5Obj['pubkey'],
]
)['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_spend_2of2(self):
"""Spend a 2-of-2 multisig transaction over fundraw."""
self.log.info("Test fundrawtxn spending 2-of-2 multisig")
# Create 2-of-2 addr.
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
mSigObj = self.nodes[2].addmultisigaddress(
2,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
]
)['address']
# Send 1.2 BTC to msig addr.
self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.nodes[0].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[2].sendrawtransaction(signedTx['hex'])
self.nodes[2].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
def test_locked_wallet(self):
self.log.info("Test fundrawtxn with locked wallet")
self.nodes[1].encryptwallet("test")
# Drain the keypool.
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.09999500}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that does not require a new key for the change output
self.nodes[1].fundrawtransaction(rawtx)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.", self.nodes[1].fundrawtransaction, rawtx)
# Refill the keypool.
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Now we need to unlock.
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
def test_many_inputs_fee(self):
"""Multiple (~19) inputs tx test | Compare fee."""
self.log.info("Test fundrawtxn fee with many inputs")
# Empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs
def test_many_inputs_send(self):
"""Multiple (~19) inputs tx test | sign/send."""
self.log.info("Test fundrawtxn sign+send with many inputs")
# Again, empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
def test_op_return(self):
self.log.info("Test fundrawtxn with OP_RETURN and no vin")
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
def test_watchonly(self):
self.log.info("Test fundrawtxn using only watchonly")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], self.watchonly_txid)
assert "fee" in result.keys()
assert_greater_than(result["changepos"], -1)
def test_all_watched_funds(self):
self.log.info("Test fundrawtxn using entirety of watched funds")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching).
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert res_dec["vin"][0]["txid"] == self.watchonly_txid or res_dec["vin"][1]["txid"] == self.watchonly_txid
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], self.watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransactionwithwallet(result["hex"])
assert not signedtx["complete"]
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert signedtx["complete"]
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
def test_option_feerate(self):
self.log.info("Test fundrawtxn feeRate option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10 * self.min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
def test_address_reuse(self):
"""Test no address reuse occurs."""
self.log.info("Test fundrawtxn does not reuse addresses")
rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert changeaddress != ""
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool.
assert changeaddress != nextaddr
def test_option_subtract_fee_from_outputs(self):
self.log.info("Test fundrawtxn subtractFeeFromOutputs option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# Split the fee between outputs 0, 2, and 3, but not output 1.
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction.
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions.
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# Output 1 is the same in both transactions.
assert_equal(share[1], 0)
# The other 3 outputs are smaller as a result of subtractFeeFromOutputs.
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# Outputs 2 and 3 take the same share of the fee.
assert_equal(share[2], share[3])
# Output 0 takes at least as much share of the fee, and no more than 2
# satoshis more, than outputs 2 and 3.
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# The fee is the same in both transactions.
assert_equal(result[0]['fee'], result[1]['fee'])
# The total subtracted from the outputs is equal to the fee.
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
def test_subtract_fee_with_presets(self):
self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")
addr = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
rawtx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 5}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx, {'subtractFeeFromOutputs': [0]})
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])
self.nodes[0].sendrawtransaction(signedtx['hex'])
if __name__ == '__main__':
RawTransactionsTest().main()
| {
"content_hash": "7bb7c36dff765b21ffbe2860383f7220",
"timestamp": "",
"source": "github",
"line_count": 769,
"max_line_length": 174,
"avg_line_length": 44.77113133940182,
"alnum_prop": 0.6216271166748961,
"repo_name": "midnightmagic/bitcoin",
"id": "57c8f511acd1f2766296480680a42b4eaeacf95c",
"size": "34643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/rpc_fundrawtransaction.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "534165"
},
{
"name": "C++",
"bytes": "3705952"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "19797"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "65360"
},
{
"name": "Objective-C",
"bytes": "2022"
},
{
"name": "Objective-C++",
"bytes": "7238"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "447889"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "40702"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .mock import _all_auth_mock_
from .mock import public_incursion
from .mock import public_incursion_no_expires
from .mock import public_incursion_no_expires_second
from .mock import public_incursion_warning
from .mock import public_incursion_server_error
from esipy import App
from esipy import EsiClient
from esipy import EsiSecurity
from esipy.cache import BaseCache
from esipy.cache import DictCache
from esipy.cache import DummyCache
from requests.adapters import HTTPAdapter
import httmock
import mock
import time
import six
import unittest
import warnings
import logging
# set pyswagger logger to error, as it displays too much thing for test needs
pyswagger_logger = logging.getLogger('pyswagger')
pyswagger_logger.setLevel(logging.ERROR)
class TestEsiPy(unittest.TestCase):
CALLBACK_URI = "https://foo.bar/baz/callback"
LOGIN_EVE = "https://login.eveonline.com"
OAUTH_VERIFY = "%s/oauth/verify" % LOGIN_EVE
OAUTH_TOKEN = "%s/oauth/token" % LOGIN_EVE
CLIENT_ID = 'foo'
SECRET_KEY = 'bar'
BASIC_TOKEN = six.u('Zm9vOmJhcg==')
SECURITY_NAME = 'evesso'
@mock.patch('six.moves.urllib.request.urlopen')
def setUp(self, urlopen_mock):
# I hate those mock... thx urlopen instead of requests...
urlopen_mock.return_value = open('test/resources/swagger.json')
self.app = App.create(
'https://esi.tech.ccp.is/latest/swagger.json'
)
self.security = EsiSecurity(
self.app,
TestEsiPy.CALLBACK_URI,
TestEsiPy.CLIENT_ID,
TestEsiPy.SECRET_KEY
)
self.cache = DictCache()
self.client = EsiClient(self.security, cache=self.cache)
self.client_no_auth = EsiClient(cache=self.cache, retry_requests=True)
def tearDown(self):
""" clear the cache so we don't have residual data """
self.cache._dict = {}
def test_esipy_client_no_args(self):
client_no_args = EsiClient()
self.assertIsNone(client_no_args.security)
self.assertTrue(isinstance(client_no_args.cache, DictCache))
self.assertEqual(
client_no_args._session.headers['User-Agent'],
'EsiPy/Client - https://github.com/Kyria/EsiPy'
)
self.assertEqual(client_no_args.raw_body_only, False)
def test_esipy_client_with_headers(self):
client_with_headers = EsiClient(headers={'User-Agent': 'foobar'})
self.assertEqual(
client_with_headers._session.headers['User-Agent'],
'foobar'
)
def test_esipy_client_with_adapter(self):
transport_adapter = HTTPAdapter()
client_with_adapters = EsiClient(
transport_adapter=transport_adapter
)
self.assertEqual(
client_with_adapters._session.get_adapter('http://'),
transport_adapter
)
self.assertEqual(
client_with_adapters._session.get_adapter('https://'),
transport_adapter
)
def test_esipy_client_without_cache(self):
client_without_cache = EsiClient(cache=None)
self.assertTrue(isinstance(client_without_cache.cache, DummyCache))
def test_esipy_client_with_cache(self):
cache = DictCache()
client_with_cache = EsiClient(cache=cache)
self.assertTrue(isinstance(client_with_cache.cache, BaseCache))
self.assertEqual(client_with_cache.cache, cache)
def test_esipy_client_wrong_cache(self):
with self.assertRaises(ValueError):
EsiClient(cache=DictCache)
def test_esipy_request_public(self):
with httmock.HTTMock(public_incursion):
incursions = self.client_no_auth.request(
self.app.op['get_incursions']()
)
self.assertEqual(incursions.data[0].type, 'Incursion')
self.assertEqual(incursions.data[0].faction_id, 500019)
def test_esipy_request_authed(self):
with httmock.HTTMock(*_all_auth_mock_):
self.security.auth('let it bee')
char_location = self.client.request(
self.app.op['get_characters_character_id_location'](
character_id=123456789
)
)
self.assertEqual(char_location.data.station_id, 60004756)
# force expire
self.security.token_expiry = 0
char_location_with_refresh = self.client.request(
self.app.op['get_characters_character_id_location'](
character_id=123456789
)
)
self.assertEqual(
char_location_with_refresh.data.station_id,
60004756
)
def test_client_cache_request(self):
@httmock.all_requests
def fail_if_request(url, request):
self.fail('Cached data is not supposed to do requests')
incursion_operation = self.app.op['get_incursions']
with httmock.HTTMock(public_incursion_no_expires):
incursions = self.client_no_auth.request(incursion_operation())
self.assertEqual(incursions.data[0].state, 'mobilizing')
with httmock.HTTMock(public_incursion_no_expires_second):
incursions = self.client_no_auth.request(incursion_operation())
self.assertEqual(incursions.data[0].state, 'established')
with httmock.HTTMock(public_incursion):
incursions = self.client_no_auth.request(incursion_operation())
self.assertEqual(incursions.data[0].state, 'mobilizing')
with httmock.HTTMock(fail_if_request):
incursions = self.client_no_auth.request(incursion_operation())
self.assertEqual(incursions.data[0].state, 'mobilizing')
def test_client_warning_header(self):
with httmock.HTTMock(public_incursion_warning):
warnings.simplefilter('error')
incursion_operation = self.app.op['get_incursions']
with self.assertRaises(UserWarning):
self.client_no_auth.request(incursion_operation())
def test_client_raw_body_only(self):
client = EsiClient(raw_body_only=True)
self.assertEqual(client.raw_body_only, True)
with httmock.HTTMock(public_incursion):
incursions = client.request(self.app.op['get_incursions']())
self.assertIsNone(incursions.data)
self.assertTrue(len(incursions.raw) > 0)
incursions = client.request(
self.app.op['get_incursions'](),
raw_body_only=False
)
self.assertIsNotNone(incursions.data)
def test_esipy_reuse_operation(self):
operation = self.app.op['get_incursions']()
with httmock.HTTMock(public_incursion):
incursions = self.client_no_auth.request(operation)
self.assertEqual(incursions.data[0].faction_id, 500019)
# this shouldn't create any errors
incursions = self.client_no_auth.request(operation)
self.assertEqual(incursions.data[0].faction_id, 500019)
def test_esipy_multi_request(self):
operation = self.app.op['get_incursions']()
with httmock.HTTMock(public_incursion):
count = 0
for req, incursions in self.client_no_auth.multi_request(
[operation, operation, operation], threads=2):
self.assertEqual(incursions.data[0].faction_id, 500019)
count += 1
# Check we made 3 requests
self.assertEqual(count, 3)
def test_esipy_backoff(self):
operation = self.app.op['get_incursions']()
start_calls = time.time()
with httmock.HTTMock(public_incursion_server_error):
incursions = self.client_no_auth.request(operation)
self.assertEqual(incursions.data.error, 'broke')
end_calls = time.time()
# Check we retried 5 times
self.assertEqual(incursions.data.count, 5)
# Check that backoff slept for a sum > 2 seconds
self.assertTrue(end_calls - start_calls > 2)
| {
"content_hash": "fe55227ce8f54e87636a776e3348f678",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 78,
"avg_line_length": 36.375,
"alnum_prop": 0.6311978399607265,
"repo_name": "a-tal/EsiPy",
"id": "171b6e3810a757de3c8c9c53741600a3b5540f6a",
"size": "8174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57733"
}
],
"symlink_target": ""
} |
"""
Feature file parser.
One Feature file parser instance is able to parse one feature file.
"""
import os
import io
import re
import json
import filecmp
import copy
import string
from .compat import RecursionError
from .exceptions import RadishError, FeatureFileSyntaxError, LanguageNotSupportedError
from .feature import Feature
from .scenario import Scenario
from .scenariooutline import ScenarioOutline
from .scenarioloop import ScenarioLoop
from .stepmodel import Step
from .background import Background
from .model import Tag
from . import utils
class Keywords(object):
"""
Represent config object for gherkin keywords.
"""
def __init__(
self,
feature,
background,
scenario,
scenario_outline,
examples,
scenario_loop,
iterations,
):
self.feature = feature
self.background = background
self.scenario = scenario
self.scenario_outline = scenario_outline
self.examples = examples
self.scenario_loop = scenario_loop
self.iterations = iterations
class FeatureParser(object):
"""
Class to parse a feature file.
A feature file contains just one feature.
"""
LANGUAGE_LOCATION = os.path.join(os.path.dirname(__file__), "languages")
DEFAULT_LANGUAGE = "en"
CONTEXT_CLASSES = ["given", "when", "then", "but"]
class State(object):
"""
Represents the parser state
"""
INIT = "init"
FEATURE = "feature"
BACKGROUND = "background"
SCENARIO = "scenario"
STEP = "step"
EXAMPLES = "examples"
EXAMPLES_ROW = "examples_row"
STEP_TEXT = "step_text"
SKIP_SCENARIO = "skip_scenario"
def __init__(
self,
core,
featurefile,
featureid,
tag_expr=None,
inherited_tags=None,
language="en",
):
if not os.path.exists(featurefile):
raise OSError("Feature file at '{0}' does not exist".format(featurefile))
self._core = core
self._featureid = featureid
self._featurefile = featurefile
self._tag_expr = tag_expr
self.keywords = {}
self._keywords_delimiter = ":"
self._inherited_tags = inherited_tags or []
self._current_state = FeatureParser.State.FEATURE
self._current_line = 0
self._current_tags = []
self._current_preconditions = []
self._current_constants = []
self._current_scenario = None
#: Holds the current context class for a Step.
# eg. If a step is: 'And I have the number'
# and this step was preceeded by 'Given I have the number
# it's context class is 'Given'. This is used to correctly
# match the 'And' sentences
self._current_context_class = None
# used to save text indention
# - negative number indicates that there is now step text parsing
self._in_step_text_index = -1
self.feature = None
self._load_language(language)
def _load_language(self, language=None):
"""
Loads all keywords of the given language
:param string language: the lanugage to use for the feature files.
if None is given `radish` tries to detect the language.
:returns: if the language could be loaded or not
:rtype: bool
:raises LanguageNotSupportedError: if the given language is not supported by radish
"""
if not language: # try to detect language
raise NotImplementedError("Auto detect language is not implemented yet")
language_path = os.path.join(self.LANGUAGE_LOCATION, language + ".json")
try:
with io.open(language_path, "r", encoding="utf-8") as f:
language_pkg = json.load(f)
except IOError:
raise LanguageNotSupportedError(language)
self.keywords = Keywords(**language_pkg["keywords"])
def parse(self):
"""
Parses the feature file of this `FeatureParser` instance
:returns: if the parsing was successful or not
:rtype: bool
"""
with io.open(self._featurefile, "r", encoding="utf-8") as f:
for line in f.readlines():
self._current_line += 1
line_strip = line.strip()
if not line_strip: # line is empty
continue
if line_strip.startswith("#"):
# try to detect feature file language
language = self._detect_language(line)
if language:
self._load_language(language)
continue
if self.feature:
if self._detect_feature(line_strip):
raise FeatureFileSyntaxError(
"radish supports only one Feature per feature file"
)
if self._detect_background(line_strip):
if self.feature.background:
raise FeatureFileSyntaxError(
"The Background block may only appear once in a Feature"
)
if self.feature.scenarios:
raise FeatureFileSyntaxError(
"The Background block must be placed before any Scenario block"
)
result = self._parse_context(line)
if result is False:
raise FeatureFileSyntaxError(
"Syntax error in feature file {0} on line {1}".format(
self._featurefile, self._current_line
)
)
if not self.feature:
raise FeatureFileSyntaxError(
"No Feature found in file {0}".format(self._featurefile)
)
if not self.feature.scenarios:
return None
if (
self._current_scenario and not self._current_scenario.complete
): # for the last scenario
self._current_scenario.after_parse()
return self.feature
def _parse_context(self, line):
"""
Parses arbitrary context from a line
:param string line: the line to parse from
"""
parse_context_func = getattr(self, "_parse_" + self._current_state, None)
if not parse_context_func:
raise RadishError(
"FeatureParser state {0} is not supported".format(self._current_state)
)
return parse_context_func(line)
def _parse_feature(self, line):
"""
Parses a Feature Sentence
The `INIT` state is used as initiale state.
:param string line: the line to parse from
"""
line = line.strip()
detected_feature = self._detect_feature(line)
if not detected_feature:
tag = self._detect_tag(line)
if tag:
self._current_tags.append(Tag(tag[0], tag[1]))
if tag[0] == "constant":
name, value = self._parse_constant(tag[1])
self._current_constants.append((name, value))
return True
return False
self.feature = Feature(
self._featureid,
self.keywords.feature,
detected_feature,
self._featurefile,
self._current_line,
self._current_tags,
)
self.feature.context.constants = self._current_constants
self._current_state = FeatureParser.State.BACKGROUND
self._current_tags = []
self._current_constants = []
return True
def _parse_background(self, line):
"""
Parses a background context
:param str line: the line to parse the background
"""
line = line.strip()
detected_background = self._detect_background(line)
if detected_background is None:
# try to find a scenario
if self._detect_scenario_type(line):
return self._parse_scenario(line)
# this line is interpreted as a feature description line
self.feature.description.append(line)
return True
self.feature.background = Background(
self.keywords.background,
detected_background,
self._featurefile,
self._current_line,
self.feature,
)
self._current_scenario = self.feature.background
self._current_state = FeatureParser.State.STEP
return True
def _parse_scenario(self, line):
"""
Parses a Feature context
:param string line: the line to parse from
"""
line = line.strip()
detected_scenario = self._detect_scenario(line)
scenario_type = Scenario
keywords = (self.keywords.scenario,)
if not detected_scenario:
detected_scenario = self._detect_scenario_outline(line)
scenario_type = ScenarioOutline
keywords = (self.keywords.scenario_outline, self.keywords.examples)
if not detected_scenario:
detected_scenario = self._detect_scenario_loop(line)
if not detected_scenario:
tag = self._detect_tag(line)
if tag:
self._current_tags.append(Tag(tag[0], tag[1]))
if tag[0] == "precondition":
scenario = self._parse_precondition(tag[1])
if scenario is not None:
self._current_preconditions.append(scenario)
elif tag[0] == "constant":
name, value = self._parse_constant(tag[1])
self._current_constants.append((name, value))
return True
raise FeatureFileSyntaxError(
"The parser expected a scenario or a tag on this line. Given: '{0}'".format(
line
)
)
detected_scenario, iterations = (
detected_scenario
) # pylint: disable=unpacking-non-sequence
scenario_type = ScenarioLoop
keywords = (self.keywords.scenario_loop, self.keywords.iterations)
if detected_scenario in self.feature:
raise FeatureFileSyntaxError(
"Scenario with name '{0}' defined twice in feature '{1}'".format(
detected_scenario, self.feature.path
)
)
scenario_id = 1
if self.feature.scenarios:
previous_scenario = self._current_scenario
if hasattr(previous_scenario, "scenarios") and previous_scenario.scenarios:
scenario_id = previous_scenario.scenarios[-1].id + 1
else:
scenario_id = previous_scenario.id + 1
# all tags of this scenario have been consumed so we can
# check if this scenario has to be evaluated or not
if self._tag_expr:
# inherit the tags from the current feature and the explicitely
# inherited tags given to the parser. This tags are coming from precondition scenarios
current_tags = self._current_tags + self.feature.tags + self._inherited_tags
scenario_in_tags = self._tag_expr.evaluate([t.name for t in current_tags])
if (
not scenario_in_tags
): # this scenario does not match the given tag expression
self._current_tags = []
self._current_preconditions = []
self._current_constants = []
self._current_state = FeatureParser.State.SKIP_SCENARIO
return True
background = self._create_scenario_background(
steps_runable=scenario_type is Scenario
)
scenario = scenario_type(
scenario_id,
*keywords,
sentence=detected_scenario,
path=self._featurefile,
line=self._current_line,
parent=self.feature,
tags=self._current_tags,
preconditions=self._current_preconditions,
background=background
)
self.feature.scenarios.append(scenario)
self._current_scenario = scenario
self._current_scenario.context.constants = self._current_constants
self._current_tags = []
self._current_preconditions = []
self._current_constants = []
if scenario_type == ScenarioLoop:
self._current_scenario.iterations = iterations
self._current_state = FeatureParser.State.STEP
return True
def _parse_examples(self, line):
"""
Parses the Examples header line
:param string line: the line to parse from
"""
line = line.strip()
if not isinstance(self._current_scenario, ScenarioOutline):
raise FeatureFileSyntaxError(
"Scenario does not support Examples. Use 'Scenario Outline'"
)
self._current_scenario.examples_header = [
x.strip() for x in line.split("|")[1:-1]
]
self._current_state = FeatureParser.State.EXAMPLES_ROW
return True
def _parse_examples_row(self, line):
"""
Parses an Examples row
:param string line: the line to parse from
"""
line = line.strip()
# detect next keyword
if self._detect_scenario_type(line):
self._current_scenario.after_parse()
return self._parse_scenario(line)
example = ScenarioOutline.Example(
[x.strip() for x in utils.split_unescape(line, "|")[1:-1]],
self._featurefile,
self._current_line,
)
self._current_scenario.examples.append(example)
return True
def _parse_step(self, line):
"""
Parses a single step
:param string line: the line to parse from
"""
line_strip = line.strip()
# detect next keyword
if self._detect_scenario_type(line_strip):
self._current_scenario.after_parse()
return self._parse_scenario(line_strip)
if self._detect_step_text(line_strip):
self._current_state = self.State.STEP_TEXT
return self._parse_step_text(line)
if self._detect_table(line_strip):
self._parse_table(line_strip)
return True
if self._detect_examples(line_strip):
self._current_state = FeatureParser.State.EXAMPLES
return True
# get context class
step_context_class = line_strip.split()[0].lower()
if step_context_class in FeatureParser.CONTEXT_CLASSES:
self._current_context_class = step_context_class
step_id = len(self._current_scenario.all_steps) + 1
not_runable = isinstance(
self._current_scenario, (ScenarioOutline, ScenarioLoop, Background)
)
step = Step(
step_id,
line_strip,
self._featurefile,
self._current_line,
self._current_scenario,
not not_runable,
context_class=self._current_context_class,
)
self._current_scenario.steps.append(step)
return True
def _parse_table(self, line):
"""
Parses a step table row
:param string line: the line to parse from
"""
line = line.strip()
if not self._current_scenario.steps:
raise FeatureFileSyntaxError(
"Found step table without previous step definition on line {0}".format(
self._current_line
)
)
current_step = self._current_scenario.steps[-1]
table_columns = [x.strip() for x in utils.split_unescape(line, "|")[1:-1]]
if not current_step.table_header: # it's the table heading
current_step.table_header = table_columns
else: # it's a table data row
table_data = {
k: v for k, v in zip(current_step.table_header, table_columns)
}
current_step.table_data.append(table_columns)
current_step.table.append(table_data)
return True
def _parse_step_text(self, line):
"""
Parses additional step text
:param str line: the line to parse
"""
def dedent(_str):
ret_line = ''
for char_index in range(len(_str)):
if not ret_line and char_index < self._in_step_text_index and _str[char_index] in string.whitespace:
continue
else:
ret_line += _str[char_index]
return ret_line.rstrip()
line_strip = line.strip()
if line_strip.startswith('"""') and self._in_step_text_index == -1:
self._in_step_text_index = line.index('"')
line_strip = line_strip[3:]
if line_strip:
self._current_scenario.steps[-1].raw_text.append(line_strip.rstrip())
elif line_strip.endswith('"""') and self._in_step_text_index >= 0:
self._current_state = self.State.STEP
line = line.rstrip()[:-3]
line_dedent = dedent(line)
self._in_step_text_index = -1
if line_dedent:
self._current_scenario.steps[-1].raw_text.append(line_dedent)
else:
line_dedent = dedent(line)
self._current_scenario.steps[-1].raw_text.append(line_dedent)
return True
def _parse_precondition(self, arguments):
"""
Parses scenario preconditions
The arguments must be in format:
File.feature: Some scenario
:param str arguments: the raw arguments
"""
match = re.search(r"(.*?\.feature): (.*)", arguments)
if not match:
raise FeatureFileSyntaxError(
"Scenario @precondition tag must have argument in format: 'test.feature: Some scenario'"
)
feature_file_name, scenario_sentence = match.groups()
feature_file = os.path.join(
os.path.dirname(self._featurefile), feature_file_name
)
# check if the precondition Scenario is in the same feature file.
# If this happens to be the case the current feature is just copied as is.
if filecmp.cmp(self._featurefile, feature_file):
if scenario_sentence not in self.feature:
raise FeatureFileSyntaxError(
"Cannot import precondition scenario '{0}' from feature '{1}': No such scenario".format(
scenario_sentence, feature_file
)
)
feature = copy.deepcopy(self.feature)
self._core.features.append(feature)
else:
try:
current_tags = (
self._current_tags + self.feature.tags + self._inherited_tags
)
feature = self._core.parse_feature(
feature_file, self._tag_expr, inherited_tags=current_tags
)
except (RuntimeError, RecursionError) as e:
if str(e).startswith(
"maximum recursion depth exceeded"
): # precondition cycling
raise FeatureFileSyntaxError(
"Your feature '{0}' has cycling preconditions with '{1}: {2}' starting at line {3}".format(
self._featurefile,
feature_file_name,
scenario_sentence,
self._current_line,
)
)
raise
if feature is None:
return None
if scenario_sentence not in feature:
raise FeatureFileSyntaxError(
"Cannot import precondition scenario '{0}' from feature '{1}': No such scenario".format(
scenario_sentence, feature_file
)
)
return feature[scenario_sentence]
def _parse_constant(self, arguments):
"""
Parses tag arguments as a constant containing name and value
The arguments must be in format:
ConstantName: SomeValue
ConstantName: 5
:param str arguments: the raw arguments to parse
"""
name, value = arguments.split(":", 1)
return name.strip(), value.strip()
def _parse_skip_scenario(self, line):
"""
Parses the next lines until the next scenario is reached
"""
line = line.strip()
if self._detect_scenario_type(line):
return self._parse_scenario(line)
return True
def _detect_keyword(self, keyword, line):
"""
Detects a keyword on a given line
:param keyword: the keyword to detect
:param line: the line in which we want to detect the keyword
:return: the line without the detected keyword
:rtype: string or None
"""
pattern = r"^{keyword}\s*{delimiter}(.*)$".format(
keyword=keyword, delimiter=self._keywords_delimiter
)
match = re.match(pattern, line)
if match:
return match.group(1).strip()
return None
def _detect_feature(self, line):
"""
Detects a feature on the given line
:param string line: the line to detect a feature
:returns: the detected feature on the given line
:rtype: string or None
"""
return self._detect_keyword(self.keywords.feature, line)
def _detect_background(self, line):
"""
Detects a background on the given line
:param string line: the line to detect a background
:returns: the detected background on the given line
:rtype: string or None
"""
return self._detect_keyword(self.keywords.background, line)
def _detect_scenario_type(self, line):
"""
Detect a Scenario/ScenarioOutline/ScenarioLoop/Tag on the given line.
:returns: if a scenario of any type is present on the given line
:rtype: bool
"""
if (
self._detect_scenario(line)
or self._detect_scenario_outline(line)
or self._detect_scenario_loop(line)
or self._detect_tag(line)
):
self._current_state = FeatureParser.State.SCENARIO
return True
return False
def _detect_scenario(self, line):
"""
Detects a scenario on the given line
:param string line: the line to detect a scenario
:returns: the scenario detected on the given line
:rtype: string or None
"""
return self._detect_keyword(self.keywords.scenario, line)
def _detect_scenario_outline(self, line):
"""
Detects a scenario outline on the given line
:param string line: the line to detect a scenario outline
:returns: the scenario outline detected on the given line
:rtype: string or None
"""
return self._detect_keyword(self.keywords.scenario_outline, line)
def _detect_examples(self, line):
"""
Detects an Examples block on the given line
:param string line: the line to detect the Examples
:returns: if an Examples block was found on the given line
:rtype: bool
"""
return self._detect_keyword(self.keywords.examples, line) is not None
def _detect_scenario_loop(self, line):
"""
Detects a scenario loop on the given line
:param string line: the line to detect a scenario loop
:returns: if a scenario loop was found on the given line
:rtype: string
"""
match = re.search(r"^{0} (\d+):(.*)".format(self.keywords.scenario_loop), line)
if match:
return match.group(2).strip(), int(match.group(1))
return None
def _detect_table(self, line):
"""
Detects a step table row on the given line
:param string line: the line to detect the table row
:returns: if an step table row was found or not
:rtype: bool
"""
return line.startswith("|")
def _detect_step_text(self, line):
"""
Detects the beginning of an additional step text block
:param str line: the line to detect the step text block
:returns: if a step text block was found or not
:rtype: bool
"""
return line.startswith('"""')
def _detect_language(self, line):
"""
Detects a language on the given line
:param string line: the line to detect the language
:returns: the language or None
:rtype: str or None
"""
match = re.search("^# language: (.*)", line)
if match:
return match.group(1)
return None
def _detect_tag(self, line):
"""
Detects a tag on the given line
:param string line: the line to detect the tag
:returns: the tag or None
:rtype: str or None
"""
match = re.search(r"^@([^\s(]+)(?:\((.*?)\))?", line)
if match:
return match.group(1), match.group(2)
return None
def _create_scenario_background(self, steps_runable):
"""
Creates a new instance of the features current
Background to assign to a new Scenario.
"""
if not self.feature.background:
return None
return self.feature.background.create_instance(steps_runable=steps_runable)
| {
"content_hash": "c3a6f5a4748263d7d2c17d6ad00edd86",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 116,
"avg_line_length": 33.39897698209719,
"alnum_prop": 0.5567424764530209,
"repo_name": "radish-bdd/radish",
"id": "181fa6fed6d0894a953c038419c1f4c8fc832614",
"size": "26143",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "radish/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "44053"
},
{
"name": "Python",
"bytes": "340136"
},
{
"name": "Shell",
"bytes": "1839"
}
],
"symlink_target": ""
} |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from random import randint
Test.Summary = '''
Test transactions and sessions, making sure they open and close in the proper order.
'''
# need Apache Benchmark. For RHEL7, this is httpd-tools
Test.SkipUnless(
Condition.HasProgram("ab", "apache benchmark (httpd-tools) needs to be installed on system for this test to work")
)
Test.ContinueOnFail = True
# Define default ATS
ts = Test.MakeATSProcess("ts", command="traffic_manager")
server = Test.MakeOriginServer("server")
Test.testName = ""
request_header = {"headers": "GET / HTTP/1.1\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# expected response from the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
Test.prepare_plugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'continuations_verify.cc'), ts)
# add response to the server dictionary
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'continuations_verify.*',
'proxy.config.http.cache.http' : 0, #disable cache to simply the test.
'proxy.config.cache.enable_read_while_writer' : 0
})
ts.Disk.remap_config.AddLine(
'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)
)
numberOfRequests = randint(1000, 1500)
# Make a *ton* of calls to the proxy!
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'ab -n {0} -c 10 http://127.0.0.1:{1}/;sleep 5'.format(numberOfRequests, ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
# time delay as proxy.config.http.wait_for_cache could be broken
tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
tr.Processes.Default.StartBefore(ts, ready=When.PortOpen(ts.Variables.port))
tr.StillRunningAfter = ts
comparator_command = '''
if [ "`traffic_ctl metric get continuations_verify.{0}.close.1 | cut -d ' ' -f 2`" == "`traffic_ctl metric get continuations_verify.{0}.close.2 | cut -d ' ' -f 2`" ]; then\
echo yes;\
else \
echo no; \
fi;
'''
records = ts.Disk.File(os.path.join(ts.Variables.RUNTIMEDIR, "records.snap"))
def file_is_ready():
return os.path.exists(records.AbsPath)
# number of sessions/transactions opened and closed are equal
tr = Test.AddTestRun()
tr.Processes.Process("filesleeper", "python -c 'from time import sleep; sleep(10)'")
tr.Processes.Default.Command = comparator_command.format('ssn')
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.StartBefore(tr.Processes.filesleeper, ready=file_is_ready)
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
tr.StillRunningAfter = ts
# for debugging session number
ssn1 = tr.Processes.Process("session1", 'traffic_ctl metric get continuations_verify.ssn.close.1 > ssn1')
ssn2 = tr.Processes.Process("session2", 'traffic_ctl metric get continuations_verify.ssn.close.2 > ssn2')
ssn1.Env = ts.Env
ssn2.Env = ts.Env
tr.Processes.Default.StartBefore(ssn1)
tr.Processes.Default.StartBefore(ssn2)
tr = Test.AddTestRun()
tr.Processes.Default.Command = comparator_command.format('txn')
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents')
tr.StillRunningAfter = ts
# for debugging transaction number
txn1 = tr.Processes.Process("transaction1", 'traffic_ctl metric get continuations_verify.txn.close.1 > txn1')
txn2 = tr.Processes.Process("transaction2", 'traffic_ctl metric get continuations_verify.txn.close.2 > txn2')
txn1.Env = ts.Env
txn2.Env = ts.Env
tr.Processes.Default.StartBefore(txn1)
tr.Processes.Default.StartBefore(txn2)
# session count is positive,
tr = Test.AddTestRun()
tr.Processes.Default.Command = "traffic_ctl metric get continuations_verify.ssn.close.1"
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression(" 0", 'should be nonzero')
tr.StillRunningAfter = ts
# and we receive the same number of transactions as we asked it to make
tr = Test.AddTestRun()
tr.Processes.Default.Command = "traffic_ctl metric get continuations_verify.txn.close.1"
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
"continuations_verify.txn.close.1 {}".format(numberOfRequests), 'should be the number of transactions we made')
tr.StillRunningAfter = ts
| {
"content_hash": "4bfde01f656ee7cd61a6b6d5e6c46220",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 172,
"avg_line_length": 43.75,
"alnum_prop": 0.7496774193548387,
"repo_name": "clearswift/trafficserver",
"id": "15b845d9cc7de3aa1902d07379e03a1dc899e093",
"size": "5425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/gold_tests/continuations/double.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13053"
},
{
"name": "C",
"bytes": "3363353"
},
{
"name": "C++",
"bytes": "11399274"
},
{
"name": "CSS",
"bytes": "8089"
},
{
"name": "HTML",
"bytes": "238770"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "JavaScript",
"bytes": "1609"
},
{
"name": "Lex",
"bytes": "4029"
},
{
"name": "Lua",
"bytes": "380105"
},
{
"name": "M4",
"bytes": "271199"
},
{
"name": "Makefile",
"bytes": "196400"
},
{
"name": "Objective-C",
"bytes": "13254"
},
{
"name": "Perl",
"bytes": "67408"
},
{
"name": "Perl 6",
"bytes": "1163"
},
{
"name": "Protocol Buffer",
"bytes": "4013"
},
{
"name": "Python",
"bytes": "365710"
},
{
"name": "Roff",
"bytes": "2339"
},
{
"name": "Shell",
"bytes": "87299"
},
{
"name": "Vim script",
"bytes": "192"
},
{
"name": "Yacc",
"bytes": "3251"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from neutron.api import extensions
from neutron.api.v2 import attributes
LOG = logging.getLogger(__name__)
FLAVOR_NETWORK = 'flavor:network'
FLAVOR_ROUTER = 'flavor:router'
FLAVOR_ATTRIBUTE = {
'networks': {
FLAVOR_NETWORK: {'allow_post': True,
'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED}
},
'routers': {
FLAVOR_ROUTER: {'allow_post': True,
'allow_put': False,
'is_visible': True,
'default': attributes.ATTR_NOT_SPECIFIED}
}
}
class Flavor(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Flavor support for network and router"
@classmethod
def get_alias(cls):
return "flavor"
@classmethod
def get_description(cls):
return "Flavor"
@classmethod
def get_updated(cls):
return "2012-07-20T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return FLAVOR_ATTRIBUTE
else:
return {}
| {
"content_hash": "1354b64a553af973a7843acb1d51809d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 66,
"avg_line_length": 24.53061224489796,
"alnum_prop": 0.559900166389351,
"repo_name": "eonpatapon/neutron",
"id": "9cafb13ef0a33659694366d4c53f3efeab0cba46",
"size": "1843",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/extensions/flavor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7388312"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
} |
import gc
import linecache
import tracemalloc
tracemalloc.start()
def display_top(snapshot, key_type="lineno", limit=10):
# via: https://docs.python.org/3/library/tracemalloc.html#pretty-top
snapshot = snapshot.filter_traces(
(
tracemalloc.Filter(False, "<frozen importlib._bootstrap_external>"),
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
)
)
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
print("#%s: %s:%s: %.1f KiB" % (index, frame.filename, frame.lineno, stat.size / 1024))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(" %s" % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
def main():
# import within main to keep isort happy
# while also invoking tracemalloc.start() immediately upon start.
import io
import os
import time
import contextlib
import psutil
import capa.main
count = int(os.environ.get("CAPA_PROFILE_COUNT", 1))
print("total iterations planned: %d (set via env var CAPA_PROFILE_COUNT)." % (count))
print()
for i in range(count):
print("iteration %d/%d..." % (i + 1, count))
with contextlib.redirect_stdout(io.StringIO()):
with contextlib.redirect_stderr(io.StringIO()):
t0 = time.time()
capa.main.main()
t1 = time.time()
gc.collect()
process = psutil.Process(os.getpid())
print(" duration: %0.02fs" % (t1 - t0))
print(" rss: %.1f MiB" % (process.memory_info().rss / 1024 / 1024))
print(" vms: %.1f MiB" % (process.memory_info().vms / 1024 / 1024))
print("done.")
gc.collect()
snapshot0 = tracemalloc.take_snapshot()
display_top(snapshot0)
main()
| {
"content_hash": "302537079e341469d3dd2b90323c1afd",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 95,
"avg_line_length": 30.34246575342466,
"alnum_prop": 0.5932279909706546,
"repo_name": "mandiant/capa",
"id": "349f6ee90d0fc91042de08f8b77777cf9006a894",
"size": "2215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/profile-memory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1371"
},
{
"name": "Python",
"bytes": "889005"
},
{
"name": "Shell",
"bytes": "5346"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from .visualize import violinplot
def add_noise(data, iteration_per_noise=100,
noise_percentages=np.arange(0, 101, step=10), plot=True,
violinplot_kws=None, figure_prefix='anchor_simulation'):
data_dfs = []
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
width = len(data.columns) * 0.75
alpha = max(0.05, 1. / iteration_per_noise)
for noise_percentage in noise_percentages:
if plot:
fig, ax = plt.subplots(figsize=(width, 3))
for iteration in range(iteration_per_noise):
if iteration > 0 and noise_percentage == 0:
continue
noisy_data = data.copy()
shape = (noisy_data.shape[0] * noise_percentage / 100,
noisy_data.shape[1])
size = np.product(shape)
noise_ind = np.random.choice(noisy_data.index,
size=noise_percentage,
replace=False)
noisy_data.loc[noise_ind] = np.random.uniform(
low=0., high=1., size=size).reshape(shape)
renamer = dict(
(col, '{}_noise{}_iter{}'.format(
col, noise_percentage, iteration))
for col in noisy_data.columns)
renamed = noisy_data.rename(columns=renamer)
data_dfs.append(renamed)
if plot:
noisy_data_tidy = noisy_data.unstack()
noisy_data_tidy = noisy_data_tidy.reset_index()
noisy_data_tidy = noisy_data_tidy.rename(
columns={'level_0': 'Feature ID',
'level_1': 'Sample ID',
0: '$\Psi$'})
violinplot(x='Feature ID', y='$\Psi$',
data=noisy_data_tidy, ax=ax,
**violinplot_kws)
if plot:
if noise_percentage > 0:
for c in ax.collections:
c.set_alpha(alpha)
ax.set(ylim=(0, 1), title='{}% Uniform Noise'.format(
noise_percentage), yticks=(0, 0.5, 1), ylabel='$\Psi$')
sns.despine()
fig.tight_layout()
fig.savefig('{}_noise_percentage_{}.pdf'.format(figure_prefix,
noise_percentage))
all_noisy_data = pd.concat(data_dfs, axis=1)
return all_noisy_data
| {
"content_hash": "53049574eb078f27426d442819635f29",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 38.89393939393939,
"alnum_prop": 0.5056486170627191,
"repo_name": "olgabot/anchor",
"id": "75ee38eb472fe95316f4e0e4f1bab2f43a92e657",
"size": "2568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "anchor/simulate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "330"
},
{
"name": "Python",
"bytes": "49253"
}
],
"symlink_target": ""
} |
from xd.build.core.recipe_file import *
from case import *
import unittest
import io
class tests(unittest.case.TestCase):
def test_split_no_version(self):
name, version = RecipeFile.split_name_and_version('foo')
self.assertEqual(name, 'foo')
self.assertEqual(str(version), '')
def test_split_version_1(self):
name, version = RecipeFile.split_name_and_version('foo_4.2')
self.assertEqual(name, 'foo')
self.assertEqual(str(version), '4.2')
def test_split_version_2(self):
name, version = RecipeFile.split_name_and_version('foo_4.2.xd')
self.assertEqual(name, 'foo')
self.assertEqual(str(version), '4.2')
def test_split_bad_version_1(self):
with self.assertRaises(InvalidRecipeName):
RecipeFile.split_name_and_version('')
def test_split_bad_version_2(self):
with self.assertRaises(InvalidRecipeName):
RecipeFile.split_name_and_version('foo_bar_4.2')
def test_with_path_1(self):
recipe_file = RecipeFile('/path/to/something/foo.xd')
self.assertEqual(recipe_file.name, 'foo')
self.assertEqual(str(recipe_file.version), '')
def test_with_path_2(self):
recipe_file = RecipeFile('/some/path/bar_31.7.xd')
self.assertEqual(recipe_file.name, 'bar')
self.assertEqual(str(recipe_file.version), '31.7')
def test_with_odd_path_1(self):
with self.assertRaises(InvalidRecipeFilename):
RecipeFile('/some/path/.xd')
def test_bad_filename_1(self):
with self.assertRaises(InvalidRecipeFilename):
RecipeFile('/tmp/foo.bar')
def test_bad_filename_2(self):
with self.assertRaises(InvalidRecipeFilename):
RecipeFile('/tmp/foo')
def test_badd_filename_3(self):
with self.assertRaises(InvalidRecipeFilename):
RecipeFile('/some/path/.xd')
def test_badd_filename_4(self):
with self.assertRaises(InvalidRecipeFilename):
RecipeFile('/some/path/foo_bar_1.xd')
def test_with_odd_name(self):
recipe_file = RecipeFile('/some/path/bar.93-1_4.xd')
self.assertEqual(recipe_file.name, 'bar.93-1')
self.assertEqual(str(recipe_file.version), '4')
def test_with_odd_version_1(self):
recipe_file = RecipeFile('/some/path/bar_4.2.1rc3.1-1.xd')
self.assertEqual(recipe_file.name, 'bar')
self.assertEqual(str(recipe_file.version), '4.2.1rc3.1-1')
def test_with_odd_version_2(self):
recipe_file = RecipeFile('/some/path/bar_89.23~build-189.xd')
self.assertEqual(recipe_file.name, 'bar')
self.assertEqual(str(recipe_file.version), '89.23~build-189')
def test_str_1(self):
recipe_file = RecipeFile('/tmp/foo.xd')
self.assertEqual(str(recipe_file), 'foo')
def test_str_2(self):
recipe_file = RecipeFile('/tmp/foo_1.89.xd')
self.assertEqual(str(recipe_file), 'foo_1.89')
def test_repr(self):
recipe_file = RecipeFile('/tmp/foo_1.89.xd')
self.assertEqual(repr(recipe_file), "RecipeFile('/tmp/foo_1.89.xd')")
def test_eq_1(self):
recipe_file_a = RecipeFile('/tmp/foo_1.89.xd')
recipe_file_b = RecipeFile('/tmp/foo_1.89.xd')
self.assertEqual(recipe_file_a, recipe_file_b)
def test_eq_2(self):
recipe_file_a = RecipeFile('/tmp/foo_1.89.xd')
recipe_file_b = RecipeFile('/tmp/foo_1.90.xd')
self.assertNotEqual(recipe_file_a, recipe_file_b)
def test_eq_3(self):
recipe_file_a = RecipeFile('/tmp/foo_1.89.xd')
recipe_file_b = '/tmp/foo_1.89.xd'
self.assertNotEqual(recipe_file_a, recipe_file_b)
class parse_tests(TestCase):
def test_parse_1(self):
with open('foobar.xd', 'w') as f:
f.write('FOO="foo"\n')
recipe_file = RecipeFile('foobar.xd')
d = recipe_file.parse()
self.assertEqual(len(d), 1)
self.assertEqual(d['FOO'].get(), 'foo')
def test_dump_1(self):
with open('foobar.xd', 'w') as f:
f.write('FOO="foo"\n')
recipe_file = RecipeFile('foobar.xd')
recipe_file.parse()
stream = io.StringIO()
recipe_file.dump(stream=stream)
self.assertRegex("FOO='foo'\n", stream.getvalue())
| {
"content_hash": "33ccbdadba7306329545da8b934f31a0",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 35.278688524590166,
"alnum_prop": 0.6194237918215614,
"repo_name": "XD-embedded/xd-build-core",
"id": "1cc803032bd71ae23a8a5845fea14a55f21678cc",
"size": "4304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/recipe_file_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103602"
}
],
"symlink_target": ""
} |
class Directory(object):
def __init__(self, name):
self._name = name
self._content = []
self._directories = {}
def __len__(self):
return len(self._directories)
def __getitem__(self, i):
return self._directories[i]
def get_directory(self, name):
return self._directories.get(name)
def get_directories(self):
return self._directories.values()
def add_directory(self, name):
self._directories[name] = Directory(name)
def add_file(self, file_info):
self._content.append(file_info)
def get_file_info(self, name):
file_info = list(filter(lambda f: str(f['fileName']) == name, self._content))
if len(file_info) == 1:
return file_info[0]
else:
return None
def get_file_infos(self):
return self._content
def __repr__(self):
return self._name
def get_content_names(self):
files = [file_info['fileName'] for file_info in self._content]
directories = map(str, self._directories)
return directories + files
class DirectoryStructure(object):
def __init__(self):
self._directories = Directory("")
def update_structure(self, file_info_list, local_directories):
self._directories = Directory("")
local_directories_split = map(lambda f: f.split("/"), local_directories)
for directory in local_directories_split:
self._lookup(self._directories, directory, True)
online_directories_split = map(
lambda file_info: file_info['fileName'].split("/")[:-1], file_info_list
)
for directory in online_directories_split:
self._lookup(self._directories, directory, True)
for file_info in file_info_list:
folder_path_split = file_info['fileName'].split("/")[:-1]
folder_path = "/".join(folder_path_split)
directory = self.get_directory(folder_path)
directory.add_file(file_info)
def _lookup(self, directory, path, update=False):
if len(path) == 0:
return directory
head = path.pop(0)
if update and directory.get_directory(head) is None:
directory.add_directory(head)
if directory.get_directory(head) is not None:
return self._lookup(directory.get_directory(head), path, update)
else:
return None
def is_directory(self, path):
return self.get_directories(path) is not None
def is_file(self, path):
return self.get_file_info(path) is not None
def get_directories(self, path):
if len(path) == 0:
return self._directories.get_directories()
else:
path_split = path.split("/")
r = self._lookup(self._directories, path_split)
if r is not None:
return r.get_directories()
else:
return None
def get_directory(self, path):
if len(path) == 0:
return self._directories
else:
path_split = path.split("/")
r = self._lookup(self._directories, path_split)
return r
def get_file_info(self, path):
path_split = path.split("/")
file_path = path_split[:-1]
directory = self._lookup(self._directories, file_path)
if directory is not None:
return directory.get_file_info(path)
else:
return None
| {
"content_hash": "4a1a1a1fc8ea663fa65ae92b434252a4",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 85,
"avg_line_length": 29.386554621848738,
"alnum_prop": 0.5756362596511295,
"repo_name": "sondree/b2_fuse",
"id": "f08da24efa0ad1c95eb3a53a25684f272165ad3b",
"size": "4648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "b2fuse/directory_structure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44471"
},
{
"name": "Shell",
"bytes": "8829"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import math
import copy
import QSTK.qstkutil.qsdateutil as du
import datetime as dt
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkstudy.EventProfiler as ep
import time
#Holds data related to order
#Executes order on a given timeseries
class Order:
TYPE_BUY = "Buy"
TYPE_SELL = "Sell"
def __init__(self, market, timestamp, symbol, type, quantity):
"""
Populates Order instance with initial data
Parameters:
market - Market object, where related stock's prices can be retrieved
timestamp - Date in YYYY-mm-dd when to execute the order (closing price
will be used by default)
symbol - Stock symbol (e.g. GOOG)
type - Buy/Sell
quantity - Number of shares to buy/sell
"""
self.market = market
timestamp += " 16:00:00"
self.timestamp = dt.datetime.strptime(timestamp,"%Y-%m-%d %H:%M:%S")
self.symbol = symbol
self.type = type
self.quantity = quantity
def update_number_of_shares_held(self, ts):
"""
Execute order on time series, which stores number of
shares held on a given timestamp
"""
if self.type == self.TYPE_BUY:
ts[self.timestamp:] = ts[self.timestamp] + self.quantity
if self.type == self.TYPE_SELL:
ts[self.timestamp:] = ts[self.timestamp] - self.quantity
return ts
def to_string(self):
"""
Return string with Order's data, useful for debugging
"""
return self.symbol + ", " + self.type + ", " + str(self.quantity) | {
"content_hash": "145dd1e7d77eef1ba5a631594718d201",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 30.87037037037037,
"alnum_prop": 0.6250749850029994,
"repo_name": "GedRap/voyager",
"id": "741bc3435cfcab6f69b4b58b62a1bf5111a67607",
"size": "1667",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backtesting/Order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24789"
}
],
"symlink_target": ""
} |
BROKER_URL = 'sqla+sqlite:///perfrunner.db'
CELERY_RESULT_BACKEND = 'database'
CELERY_RESULT_DBURI = 'sqlite:///results.db'
| {
"content_hash": "0cf0c574e79077c48bba0544b3ccbc1e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 44,
"avg_line_length": 41.333333333333336,
"alnum_prop": 0.7258064516129032,
"repo_name": "pavel-paulau/perfrunner",
"id": "435c01b39da33bcd9ab90bb85704ee443eb1c249",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfrunner/celerylocal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1853"
},
{
"name": "Go",
"bytes": "37134"
},
{
"name": "Groovy",
"bytes": "6085"
},
{
"name": "Inno Setup",
"bytes": "25281"
},
{
"name": "JavaScript",
"bytes": "1869"
},
{
"name": "Makefile",
"bytes": "1665"
},
{
"name": "Python",
"bytes": "810017"
},
{
"name": "Shell",
"bytes": "2087"
}
],
"symlink_target": ""
} |
"""Main entry point into the Identity service."""
import abc
import functools
import os
import uuid
from oslo.config import cfg
from oslo.utils import importutils
import six
from keystone import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping
from keystone import notifications
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('tenants', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
class DomainConfigs(dict):
"""Discover, store and provide access to domain specific configs.
The setup_domain_drivers() call will be made via the wrapper from
the first call to any driver function handled by this manager. This
setup call it will scan the domain config directory for files of the form
keystone.<domain_name>.conf
For each file, the domain_name will be turned into a domain_id and then
this class will:
- Create a new config structure, adding in the specific additional options
defined in this config file
- Initialise a new instance of the required driver with this new config.
"""
configured = False
driver = None
_any_sql = False
def _load_driver(self, domain_config, assignment_api):
domain_config_driver = (
importutils.import_object(
domain_config['cfg'].identity.driver, domain_config['cfg']))
domain_config_driver.assignment_api = assignment_api
return domain_config_driver
def _load_config(self, assignment_api, file_list, domain_name):
def assert_no_more_than_one_sql_driver(new_config, config_file):
"""Ensure there is more than one sql driver.
Check to see if the addition of the driver in this new config
would cause there to now be more than one sql driver.
"""
if (new_config['driver'].is_sql and
(self.driver.is_sql or self._any_sql)):
# The addition of this driver would cause us to have more than
# one sql driver, so raise an exception.
raise exception.MultipleSQLDriversInConfig(
config_file=config_file)
self._any_sql = new_config['driver'].is_sql
try:
domain_ref = assignment_api.get_domain_by_name(domain_name)
except exception.DomainNotFound:
LOG.warning(
_('Invalid domain name (%s) found in config file name'),
domain_name)
return
# Create a new entry in the domain config dict, which contains
# a new instance of both the conf environment and driver using
# options defined in this set of config files. Later, when we
# service calls via this Manager, we'll index via this domain
# config dict to make sure we call the right driver
domain_config = {}
domain_config['cfg'] = cfg.ConfigOpts()
config.configure(conf=domain_config['cfg'])
domain_config['cfg'](args=[], project='keystone',
default_config_files=file_list)
domain_config['driver'] = self._load_driver(
domain_config, assignment_api)
assert_no_more_than_one_sql_driver(domain_config, file_list)
self[domain_ref['id']] = domain_config
def setup_domain_drivers(self, standard_driver, assignment_api):
# This is called by the api call wrapper
self.configured = True
self.driver = standard_driver
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
LOG.warning(_('Unable to locate domain config directory: %s'),
conf_dir)
return
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
self._load_config(assignment_api,
[os.path.join(r, fname)],
fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)])
else:
LOG.debug(('Ignoring file (%s) while scanning domain '
'config directory'),
fname)
def get_domain_driver(self, domain_id):
if domain_id in self:
return self[domain_id]['driver']
def get_domain_conf(self, domain_id):
if domain_id in self:
return self[domain_id]['cfg']
def reload_domain_driver(self, assignment_api, domain_id):
# Only used to support unit tests that want to set
# new config values. This should only be called once
# the domains have been configured, since it relies on
# the fact that the configuration files have already been
# read.
if self.configured:
if domain_id in self:
self[domain_id]['driver'] = (
self._load_driver(self[domain_id], assignment_api))
else:
# The standard driver
self.driver = self.driver()
self.driver.assignment_api = assignment_api
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
This is required since the assignment manager needs to be initialized
before this manager, and yet this manager's init wants to be
able to make assignment calls (to build the domain configs). So
instead, we check if the domains have been initialized on entry
to each call, and if requires load them,
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
self.domain_configs.setup_domain_drivers(
self.driver, self.assignment_api)
return f(self, *args, **kwargs)
return wrapper
def exception_translated(exception_type):
"""Wraps API calls to map to correct exception."""
def _exception_translated(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except exception.PublicIDNotFound as e:
if exception_type == 'user':
raise exception.UserNotFound(user_id=e.message)
elif exception_type == 'group':
raise exception.GroupNotFound(group_id=e.message)
elif exception_type == 'assertion':
raise AssertionError(_('Invalid user / password'))
else:
raise
return wrapper
return _exception_translated
@dependency.provider('identity_api')
@dependency.optional('revoke_api')
@dependency.requires('assignment_api', 'credential_api', 'id_mapping_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
This class also handles the support of domain specific backends, by using
the DomainConfigs class. The setup call for DomainConfigs is called
from with the @domains_configured wrapper in a lazy loading fashion
to get around the fact that we can't satisfy the assignment api it needs
from within our __init__() function since the assignment driver is not
itself yet initialized.
Each of the identity calls are pre-processed here to choose, based on
domain, which of the drivers should be called. The non-domain-specific
driver is still in place, and is used if there is no specific driver for
the domain in question (or we are not using multiple domain drivers).
Starting with Juno, in order to be able to obtain the domain from
just an ID being presented as part of an API call, a public ID to domain
and local ID mapping is maintained. This mapping also allows for the local
ID of drivers that do not provide simple UUIDs (such as LDAP) to be
referenced via a public facing ID. The mapping itself is automatically
generated as entities are accessed via the driver.
This mapping is only used when:
- the entity is being handled by anything other than the default driver, or
- the entity is being handled by the default LDAP driver and backward
compatible IDs are not required.
This means that in the standard case of a single SQL backend or the default
settings of a single LDAP backend (since backward compatible IDs is set to
True by default), no mapping is used. An alternative approach would be to
always use the mapping table, but in the cases where we don't need it to
make the public and local IDs the same. It is felt that not using the
mapping by default is a more prudent way to introduce this functionality.
"""
_USER = 'user'
_USER_PASSWORD = 'user_password'
_USER_REMOVED_FROM_GROUP = 'user_removed_from_group'
_GROUP = 'group'
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
self.domain_configs = DomainConfigs()
# Domain ID normalization methods
def _set_domain_id_and_mapping(self, ref, domain_id, driver,
entity_type):
"""Patch the domain_id/public_id into the resulting entity(ies).
:param ref: the entity or list of entities to post process
:param domain_id: the domain scope used for the call
:param driver: the driver used to execute the call
:param entity_type: whether this is a user or group
:returns: post processed entity or list or entities
Called to post-process the entity being returned, using a mapping
to substitute a public facing ID as necessary. This method must
take into account:
- If the driver is not domain aware, then we must set the domain
attribute of all entities irrespective of mapping.
- If the driver does not support UUIDs, then we always want to provide
a mapping, except for the special case of this being the default
driver and backward_compatible_ids is set to True. This is to ensure
that entity IDs do not change for an existing LDAP installation (only
single domain/driver LDAP configurations were previously supported).
- If the driver does support UUIDs, then we always create a mapping
entry, but use the local UUID as the public ID. The exception to
- this is that if we just have single driver (i.e. not using specific
multi-domain configs), then we don't both with the mapping at all.
"""
conf = CONF.identity
if not self._needs_post_processing(driver):
# a classic case would be when running with a single SQL driver
return ref
LOG.debug('ID Mapping - Domain ID: %(domain)s, '
'Default Driver: %(driver)s, '
'Domains: %(aware)s, UUIDs: %(generate)s, '
'Compatible IDs: %(compat)s',
{'domain': domain_id,
'driver': (driver == self.driver),
'aware': driver.is_domain_aware(),
'generate': driver.generates_uuids(),
'compat': CONF.identity_mapping.backward_compatible_ids})
if isinstance(ref, dict):
return self._set_domain_id_and_mapping_for_single_ref(
ref, domain_id, driver, entity_type, conf)
elif isinstance(ref, list):
return [self._set_domain_id_and_mapping(
x, domain_id, driver, entity_type) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _needs_post_processing(self, driver):
"""Returns whether entity from driver needs domain added or mapping."""
return (driver is not self.driver or not driver.generates_uuids() or
not driver.is_domain_aware())
def _set_domain_id_and_mapping_for_single_ref(self, ref, domain_id,
driver, entity_type, conf):
LOG.debug('Local ID: %s', ref['id'])
ref = ref.copy()
self._insert_domain_id_if_needed(ref, driver, domain_id, conf)
if self._is_mapping_needed(driver):
local_entity = {'domain_id': ref['domain_id'],
'local_id': ref['id'],
'entity_type': entity_type}
public_id = self.id_mapping_api.get_public_id(local_entity)
if public_id:
ref['id'] = public_id
LOG.debug('Found existing mapping to public ID: %s',
ref['id'])
else:
# Need to create a mapping. If the driver generates UUIDs
# then pass the local UUID in as the public ID to use.
if driver.generates_uuids():
public_id = ref['id']
ref['id'] = self.id_mapping_api.create_id_mapping(
local_entity, public_id)
LOG.debug('Created new mapping to public ID: %s',
ref['id'])
return ref
def _insert_domain_id_if_needed(self, ref, driver, domain_id, conf):
"""Inserts the domain ID into the ref, if required.
If the driver can't handle domains, then we need to insert the
domain_id into the entity being returned. If the domain_id is
None that means we are running in a single backend mode, so to
remain backwardly compatible, we put in the default domain ID.
"""
if not driver.is_domain_aware():
if domain_id is None:
domain_id = conf.default_domain_id
ref['domain_id'] = domain_id
def _is_mapping_needed(self, driver):
"""Returns whether mapping is needed.
There are two situations where we must use the mapping:
- this isn't the default driver (i.e. multiple backends), or
- we have a single backend that doesn't use UUIDs
The exception to the above is that we must honor backward
compatibility if this is the default driver (e.g. to support
current LDAP)
"""
is_not_default_driver = driver is not self.driver
return (is_not_default_driver or (
not driver.generates_uuids() and
not CONF.identity_mapping.backward_compatible_ids))
def _clear_domain_id_if_domain_unaware(self, driver, ref):
"""Clear domain_id details if driver is not domain aware."""
if not driver.is_domain_aware() and 'domain_id' in ref:
ref = ref.copy()
ref.pop('domain_id')
return ref
def _select_identity_driver(self, domain_id):
"""Choose a backend driver for the given domain_id.
:param domain_id: The domain_id for which we want to find a driver. If
the domain_id is specified as None, then this means
we need a driver that handles multiple domains.
:returns: chosen backend driver
If there is a specific driver defined for this domain then choose it.
If the domain is None, or there no specific backend for the given
domain is found, then we chose the default driver.
"""
if domain_id is None:
driver = self.driver
else:
driver = (self.domain_configs.get_domain_driver(domain_id) or
self.driver)
# If the driver is not domain aware (e.g. LDAP) then check to
# ensure we are not mapping multiple domains onto it - the only way
# that would happen is that the default driver is LDAP and the
# domain is anything other than None or the default domain.
if (not driver.is_domain_aware() and driver == self.driver and
domain_id != CONF.identity.default_domain_id and
domain_id is not None):
LOG.warning('Found multiple domains being mapped to a '
'driver that does not support that (e.g. '
'LDAP) - Domain ID: %(domain)s, '
'Default Driver: %(driver)s',
{'domain': domain_id,
'driver': (driver == self.driver)})
raise exception.DomainNotFound(domain_id=domain_id)
return driver
def _get_domain_driver_and_entity_id(self, public_id):
"""Look up details using the public ID.
:param public_id: the ID provided in the call
:returns: domain_id, which can be None to indicate that the driver
in question supports multiple domains
driver selected based on this domain
entity_id which will is understood by the driver.
Use the mapping table to look up the domain, driver and local entity
that is represented by the provided public ID. Handle the situations
were we do not use the mapping (e.g. single driver that understands
UUIDs etc.)
"""
conf = CONF.identity
# First, since we don't know anything about the entity yet, we must
# assume it needs mapping, so long as we are using domain specific
# drivers.
if conf.domain_specific_drivers_enabled:
local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
if local_id_ref:
return (
local_id_ref['domain_id'],
self._select_identity_driver(local_id_ref['domain_id']),
local_id_ref['local_id'])
# So either we are using multiple drivers but the public ID is invalid
# (and hence was not found in the mapping table), or the public ID is
# being handled by the default driver. Either way, the only place left
# to look is in that standard driver. However, we don't yet know if
# this driver also needs mapping (e.g. LDAP in non backward
# compatibility mode).
driver = self.driver
if driver.generates_uuids():
if driver.is_domain_aware:
# No mapping required, and the driver can handle the domain
# information itself. The classic case of this is the
# current SQL driver.
return (None, driver, public_id)
else:
# Although we don't have any drivers of this type, i.e. that
# understand UUIDs but not domains, conceptually you could.
return (conf.default_domain_id, driver, public_id)
# So the only place left to find the ID is in the default driver which
# we now know doesn't generate UUIDs
if not CONF.identity_mapping.backward_compatible_ids:
# We are not running in backward compatibility mode, so we
# must use a mapping.
local_id_ref = self.id_mapping_api.get_id_mapping(public_id)
if local_id_ref:
return (
local_id_ref['domain_id'],
driver,
local_id_ref['local_id'])
else:
raise exception.PublicIDNotFound(id=public_id)
# If we reach here, this means that the default driver
# requires no mapping - but also doesn't understand domains
# (e.g. the classic single LDAP driver situation). Hence we pass
# back the public_ID unmodified and use the default domain (to
# keep backwards compatibility with existing installations).
#
# It is still possible that the public ID is just invalid in
# which case we leave this to the caller to check.
return (conf.default_domain_id, driver, public_id)
def _assert_user_and_group_in_same_backend(
self, user_entity_id, user_driver, group_entity_id, group_driver):
"""Ensures that user and group IDs are backed by the same backend.
Raise a CrossBackendNotAllowed exception if they are not from the same
backend, otherwise return None.
"""
if user_driver is not group_driver:
# Determine first if either IDs don't exist by calling
# the driver.get methods (which will raise a NotFound
# exception).
user_driver.get_user(user_entity_id)
group_driver.get_group(group_entity_id)
# If we get here, then someone is attempting to create a cross
# backend membership, which is not allowed.
raise exception.CrossBackendNotAllowed(group_id=group_entity_id,
user_id=user_entity_id)
def _mark_domain_id_filter_satisfied(self, hints):
if hints:
for filter in hints.filters:
if (filter['name'] == 'domain_id' and
filter['comparator'] == 'equals'):
hints.filters.remove(filter)
def _ensure_domain_id_in_hints(self, hints, domain_id):
if (domain_id is not None and
not hints.get_exact_filter_by_name('domain_id')):
hints.add_filter('domain_id', domain_id)
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
# - create any ID mapping that might be required
@notifications.emit_event('authenticate')
@domains_configured
@exception_translated('assertion')
def authenticate(self, context, user_id, password):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
ref = driver.authenticate(entity_id, password)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@notifications.created(_USER, result_id_arg_attr='id')
@domains_configured
@exception_translated('user')
def create_user(self, user_ref):
user = user_ref.copy()
user['name'] = clean.user_name(user['name'])
user.setdefault('enabled', True)
user['enabled'] = clean.user_enabled(user['enabled'])
domain_id = user['domain_id']
self.assignment_api.get_domain(domain_id)
# For creating a user, the domain is in the object itself
domain_id = user_ref['domain_id']
driver = self._select_identity_driver(domain_id)
user = self._clear_domain_id_if_domain_unaware(driver, user)
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
user['id'] = uuid.uuid4().hex
ref = driver.create_user(user['id'], user)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@domains_configured
@exception_translated('user')
def get_user(self, user_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
ref = driver.get_user(entity_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
def assert_user_enabled(self, user_id, user=None):
"""Assert the user and the user's domain are enabled.
:raise AssertionError if the user or the user's domain is disabled.
"""
if user is None:
user = self.get_user(user_id)
self.assignment_api.assert_domain_enabled(user['domain_id'])
if not user.get('enabled', True):
raise AssertionError(_('User is disabled: %s') % user_id)
@domains_configured
@exception_translated('user')
def get_user_by_name(self, user_name, domain_id):
driver = self._select_identity_driver(domain_id)
ref = driver.get_user_by_name(user_name, domain_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@manager.response_truncated
@domains_configured
@exception_translated('user')
def list_users(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
# back domains for that scope.
self._ensure_domain_id_in_hints(hints, domain_scope)
else:
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter.
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users(hints)
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.USER)
@notifications.updated(_USER)
@domains_configured
@exception_translated('user')
def update_user(self, user_id, user_ref):
old_user_ref = self.get_user(user_id)
user = user_ref.copy()
if 'name' in user:
user['name'] = clean.user_name(user['name'])
if 'enabled' in user:
user['enabled'] = clean.user_enabled(user['enabled'])
if 'domain_id' in user:
self.assignment_api.get_domain(user['domain_id'])
if 'id' in user:
if user_id != user['id']:
raise exception.ValidationError(_('Cannot change user ID'))
# Since any ID in the user dict is now irrelevant, remove its so as
# the driver layer won't be confused by the fact the this is the
# public ID not the local ID
user.pop('id')
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
user = self._clear_domain_id_if_domain_unaware(driver, user)
ref = driver.update_user(entity_id, user)
enabled_change = ((user.get('enabled') is False) and
user['enabled'] != old_user_ref.get('enabled'))
if enabled_change or user.get('password') is not None:
self.emit_invalidate_user_token_persistence(user_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.USER)
@notifications.deleted(_USER)
@domains_configured
@exception_translated('user')
def delete_user(self, user_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
driver.delete_user(entity_id)
self.assignment_api.delete_user(user_id)
self.credential_api.delete_credentials_for_user(user_id)
self.id_mapping_api.delete_id_mapping(user_id)
@notifications.created(_GROUP, result_id_arg_attr='id')
@domains_configured
@exception_translated('group')
def create_group(self, group_ref):
group = group_ref.copy()
group.setdefault('description', '')
domain_id = group['domain_id']
self.assignment_api.get_domain(domain_id)
# For creating a group, the domain is in the object itself
domain_id = group_ref['domain_id']
driver = self._select_identity_driver(domain_id)
group = self._clear_domain_id_if_domain_unaware(driver, group)
# Generate a local ID - in the future this might become a function of
# the underlying driver so that it could conform to rules set down by
# that particular driver type.
group['id'] = uuid.uuid4().hex
ref = driver.create_group(group['id'], group)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
@domains_configured
@exception_translated('group')
def get_group(self, group_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
ref = driver.get_group(entity_id)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
@notifications.updated(_GROUP)
@domains_configured
@exception_translated('group')
def update_group(self, group_id, group):
if 'domain_id' in group:
self.assignment_api.get_domain(group['domain_id'])
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
group = self._clear_domain_id_if_domain_unaware(driver, group)
ref = driver.update_group(entity_id, group)
return self._set_domain_id_and_mapping(
ref, domain_id, driver, mapping.EntityType.GROUP)
@notifications.deleted(_GROUP)
@domains_configured
@exception_translated('group')
def delete_group(self, group_id):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
user_ids = (u['id'] for u in self.list_users_in_group(group_id))
driver.delete_group(entity_id)
self.id_mapping_api.delete_id_mapping(group_id)
self.assignment_api.delete_group(group_id)
for uid in user_ids:
self.emit_invalidate_user_token_persistence(uid)
@domains_configured
@exception_translated('group')
def add_user_to_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
group_driver.add_user_to_group(user_entity_id, group_entity_id)
@domains_configured
@exception_translated('group')
def remove_user_from_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
group_driver.remove_user_from_group(user_entity_id, group_entity_id)
self.emit_invalidate_user_token_persistence(user_id)
@notifications.internal(notifications.INVALIDATE_USER_TOKEN_PERSISTENCE)
def emit_invalidate_user_token_persistence(self, user_id):
"""Emit a notification to the callback system to revoke user tokens.
This method and associated callback listener removes the need for
making a direct call to another manager to delete and revoke tokens.
:param user_id: user identifier
:type user_id: string
"""
pass
@manager.response_truncated
@domains_configured
@exception_translated('user')
def list_groups_for_user(self, user_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(user_id))
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups_for_user(entity_id, hints)
return self._set_domain_id_and_mapping(
ref_list, domain_id, driver, mapping.EntityType.GROUP)
@manager.response_truncated
@domains_configured
@exception_translated('group')
def list_groups(self, domain_scope=None, hints=None):
driver = self._select_identity_driver(domain_scope)
hints = hints or driver_hints.Hints()
if driver.is_domain_aware():
# Force the domain_scope into the hint to ensure that we only get
# back domains for that scope.
self._ensure_domain_id_in_hints(hints, domain_scope)
else:
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter.
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups(hints)
return self._set_domain_id_and_mapping(
ref_list, domain_scope, driver, mapping.EntityType.GROUP)
@manager.response_truncated
@domains_configured
@exception_translated('group')
def list_users_in_group(self, group_id, hints=None):
domain_id, driver, entity_id = (
self._get_domain_driver_and_entity_id(group_id))
hints = hints or driver_hints.Hints()
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users_in_group(entity_id, hints)
return self._set_domain_id_and_mapping(
ref_list, domain_id, driver, mapping.EntityType.USER)
@domains_configured
@exception_translated('group')
def check_user_in_group(self, user_id, group_id):
@exception_translated('user')
def get_entity_info_for_user(public_id):
return self._get_domain_driver_and_entity_id(public_id)
_domain_id, group_driver, group_entity_id = (
self._get_domain_driver_and_entity_id(group_id))
# Get the same info for the user_id, taking care to map any
# exceptions correctly
_domain_id, user_driver, user_entity_id = (
get_entity_info_for_user(user_id))
self._assert_user_and_group_in_same_backend(
user_entity_id, user_driver, group_entity_id, group_driver)
return group_driver.check_user_in_group(user_entity_id,
group_entity_id)
@domains_configured
def change_password(self, context, user_id, original_password,
new_password):
# authenticate() will raise an AssertionError if authentication fails
self.authenticate(context, user_id, original_password)
update_dict = {'password': new_password}
self.update_user(user_id, update_dict)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Identity driver."""
def _get_list_limit(self):
return CONF.identity.list_limit or CONF.list_limit
def is_domain_aware(self):
"""Indicates if Driver supports domains."""
return True
@property
def is_sql(self):
"""Indicates if this Driver uses SQL."""
return False
@property
def multiple_domains_supported(self):
return (self.is_domain_aware() or
CONF.identity.domain_specific_drivers_enabled)
def generates_uuids(self):
"""Indicates if Driver generates UUIDs as the local entity ID."""
return True
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
:returns: user_ref
:raises: AssertionError
"""
raise exception.NotImplemented() # pragma: no cover
# user crud
@abc.abstractmethod
def create_user(self, user_id, user):
"""Creates a new user.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users(self, hints):
"""List users in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_users_in_group(self, group_id, hints):
"""List users in a group.
:param group_id: the group in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user(self, user_id):
"""Get a user by ID.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_user(self, user_id, user):
"""Updates an existing user.
:raises: keystone.exception.UserNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes an existing user.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_user_by_name(self, user_name, domain_id):
"""Get a user by name.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented() # pragma: no cover
# group crud
@abc.abstractmethod
def create_group(self, group_id, group):
"""Creates a new group.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups(self, hints):
"""List groups in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_groups_for_user(self, user_id, hints):
"""List groups a user is in
:param user_id: the user in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_group(self, group_id):
"""Get a group by ID.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_group(self, group_id, group):
"""Updates an existing group.
:raises: keystone.exceptionGroupNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes an existing group.
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented() # pragma: no cover
# end of identity
@dependency.provider('id_mapping_api')
class MappingManager(manager.Manager):
"""Default pivot point for the ID Mapping backend."""
def __init__(self):
super(MappingManager, self).__init__(CONF.identity_mapping.driver)
@six.add_metaclass(abc.ABCMeta)
class MappingDriver(object):
"""Interface description for an ID Mapping driver."""
@abc.abstractmethod
def get_public_id(self, local_entity):
"""Returns the public ID for the given local entity.
:param dict local_entity: Containing the entity domain, local ID and
type ('user' or 'group').
:returns: public ID, or None if no mapping is found.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_id_mapping(self, public_id):
"""Returns the local mapping.
:param public_id: The public ID for the mapping required.
:returns dict: Containing the entity domain, local ID and type. If no
mapping is found, it returns None.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_id_mapping(self, local_entity, public_id=None):
"""Create and store a mapping to a public_id.
:param dict local_entity: Containing the entity domain, local ID and
type ('user' or 'group').
:param public_id: If specified, this will be the public ID. If this
is not specified, a public ID will be generated.
:returns: public ID
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_id_mapping(self, public_id):
"""Deletes an entry for the given public_id.
:param public_id: The public ID for the mapping to be deleted.
The method is silent if no mapping is found.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def purge_mappings(self, purge_filter):
"""Purge selected identity mappings.
:param dict purge_filter: Containing the attributes of the filter that
defines which entries to purge. An empty
filter means purge all mappings.
"""
raise exception.NotImplemented() # pragma: no cover
| {
"content_hash": "18e8118d76e93d04dd4c3273adc67e92",
"timestamp": "",
"source": "github",
"line_count": 1110,
"max_line_length": 79,
"avg_line_length": 39.51891891891892,
"alnum_prop": 0.614553412665846,
"repo_name": "hughsaunders/keystone",
"id": "ef0b36f6fcd7e7f2c7928c14ff5d7e963f00ccf4",
"size": "44452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/identity/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_action_set_ip_ttl\
import OFPFlowActionSetIpTtl
import unittest
class OFPFlowActionSetIpTtlTest(unittest.TestCase):
def setUp(self):
self.target = OFPFlowActionSetIpTtl("OFPFlowActionSetIpTtl",
1234)
def tearDown(self):
self.target = None
def test_constractor(self):
self.assertEqual(self.target._body[self.target.TYPE],
"OFPFlowActionSetIpTtl")
self.assertEqual(self.target._body[self.target.IP_TTL],
1234)
def test_ip_ttl(self):
self.assertEqual(self.target.ip_ttl, 1234)
def test_create_from_packed(self):
self.value = {self.target.TYPE: "OFPFlowActionSetIpTtl",
self.target.IP_TTL: 4321}
self.result = OFPFlowActionSetIpTtl.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
"OFPFlowActionSetIpTtl")
self.assertEqual(self.result._body[self.target.IP_TTL],
4321)
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.TYPE],
"OFPFlowActionSetIpTtl")
self.assertEqual(self.result[self.target.IP_TTL],
1234)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8f5c70bab228777b60807c045e354d0d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 89,
"avg_line_length": 32.82222222222222,
"alnum_prop": 0.5964793500338524,
"repo_name": "y-higuchi/odenos",
"id": "b97e3ca058edeec3f655c0409402289872e093de",
"size": "2503",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "src/test/python/org/o3project/odenos/core/component/network/flow/ofpflow/test_ofp_flow_action_set_ip_ttl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2905"
},
{
"name": "Java",
"bytes": "4104386"
},
{
"name": "Python",
"bytes": "1431932"
},
{
"name": "Ruby",
"bytes": "782541"
},
{
"name": "Shell",
"bytes": "215811"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sdopt-tearing'
copyright = u'2014-2016, Ali Baharev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sdopt-tearingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sdopt-tearing.tex', u'sdopt-tearing Documentation',
u'Ali Baharev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sdopt-tearing', u'sdopt-tearing Documentation',
[u'Ali Baharev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sdopt-tearing', u'sdopt-tearing Documentation',
u'Ali Baharev', 'sdopt-tearing', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "d10252fcb4cadef82779efd39a9fd021",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.7040358744394619,
"repo_name": "baharev/sdopt-tearing",
"id": "dbc336277c1900b025bc2b1f682c9a0a6764cc5d",
"size": "8454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "985"
},
{
"name": "Modelica",
"bytes": "17648"
},
{
"name": "Python",
"bytes": "506293"
}
],
"symlink_target": ""
} |
import argparse
import os
import platform
import subprocess
import sys
from lib.config import get_target_arch, PLATFORM
from lib.util import get_host_arch, import_vs_env
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
if PLATFORM != 'win32' and platform.architecture()[0] != '64bit':
print 'Electron is required to be built on a 64bit machine'
return 1
update_external_binaries()
return update_gyp()
def parse_args():
parser = argparse.ArgumentParser(description='Update build configurations')
parser.add_argument('--defines', default='',
help='The definetions passed to gyp')
return parser.parse_args()
def update_external_binaries():
uf = os.path.join('script', 'update-external-binaries.py')
subprocess.check_call([sys.executable, uf])
def update_gyp():
# Since gyp doesn't support specify link_settings for each configuration,
# we are not able to link to different libraries in "Debug" and "Release"
# configurations.
# In order to work around this, we decided to generate the configuration
# for twice, one is to generate "Debug" config, the other one to generate
# the "Release" config. And the settings are controlled by the variable
# "libchromiumcontent_component" which is defined before running gyp.
target_arch = get_target_arch()
return (run_gyp(target_arch, 0) or run_gyp(target_arch, 1))
def run_gyp(target_arch, component):
# Update the VS build env.
import_vs_env(target_arch)
env = os.environ.copy()
if PLATFORM == 'linux' and target_arch != get_host_arch():
env['GYP_CROSSCOMPILE'] = '1'
elif PLATFORM == 'win32':
env['GYP_MSVS_VERSION'] = '2015'
python = sys.executable
if sys.platform == 'cygwin':
# Force using win32 python on cygwin.
python = os.path.join('vendor', 'python_26', 'python.exe')
gyp = os.path.join('vendor', 'brightray', 'vendor', 'gyp', 'gyp_main.py')
gyp_pylib = os.path.join(os.path.dirname(gyp), 'pylib')
# Avoid using the old gyp lib in system.
env['PYTHONPATH'] = os.path.pathsep.join([gyp_pylib,
env.get('PYTHONPATH', '')])
# Whether to build for Mac App Store.
if os.environ.has_key('MAS_BUILD'):
mas_build = 1
else:
mas_build = 0
defines = [
'-Dlibchromiumcontent_component={0}'.format(component),
'-Dtarget_arch={0}'.format(target_arch),
'-Dhost_arch={0}'.format(get_host_arch()),
'-Dlibrary=static_library',
'-Dmas_build={0}'.format(mas_build),
]
# Add the defines passed from command line.
args = parse_args()
for define in [d.strip() for d in args.defines.split(' ')]:
if define:
defines += ['-D' + define]
return subprocess.call([python, gyp, '-f', 'ninja', '--depth', '.',
'electron.gyp', '-Icommon.gypi'] + defines, env=env)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "84432002a65f539b9f4ae06edfb7027e",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 31.92391304347826,
"alnum_prop": 0.6561116785835887,
"repo_name": "kcrt/electron",
"id": "a67a49e7ab518f1b2989b05e1acb7dae4c902b96",
"size": "2960",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "script/update.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10344"
},
{
"name": "C++",
"bytes": "1805376"
},
{
"name": "HTML",
"bytes": "15144"
},
{
"name": "JavaScript",
"bytes": "377603"
},
{
"name": "Objective-C",
"bytes": "15969"
},
{
"name": "Objective-C++",
"bytes": "153171"
},
{
"name": "Python",
"bytes": "88773"
},
{
"name": "Shell",
"bytes": "2593"
}
],
"symlink_target": ""
} |
import psutil
# project
from checks import AgentCheck
class SystemCore(AgentCheck):
def check(self, instance):
instance_tags = instance.get('tags', [])
cpu_times = psutil.cpu_times(percpu=True)
self.gauge("system.core.count", len(cpu_times), tags=instance_tags)
for i, cpu in enumerate(cpu_times):
tags = instance_tags + ["core:{0}".format(i)]
for key, value in cpu._asdict().iteritems():
self.rate(
"system.core.{0}".format(key),
100.0 * value,
tags=tags
)
| {
"content_hash": "8e10ab3573516b7d7eccbf9cdce26863",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.5365853658536586,
"repo_name": "serverdensity/sd-agent-core-plugins",
"id": "2b1b1de1c6e2fdf465f7714940dfdb22e549cf66",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "system_core/check.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "10855"
},
{
"name": "Erlang",
"bytes": "15429"
},
{
"name": "Go",
"bytes": "1471"
},
{
"name": "PLSQL",
"bytes": "27516"
},
{
"name": "Perl",
"bytes": "5845"
},
{
"name": "Python",
"bytes": "1734120"
},
{
"name": "Roff",
"bytes": "488"
},
{
"name": "Ruby",
"bytes": "167975"
},
{
"name": "Shell",
"bytes": "28906"
}
],
"symlink_target": ""
} |
"""momentsinfo_convroll4_doublescale_fs5"""
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
train_pred_file = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/train--doublescale_fs5_latemerge_2233--doublescale_fs5_latemerge_2233-paard-20150128-121022--avg-probs.npy"
valid_pred_file = "/mnt/storage/users/sedielem/git/kaggle-plankton/predictions/valid--doublescale_fs5_latemerge_2233--doublescale_fs5_latemerge_2233-paard-20150128-121022--avg-probs.npy"
test_pred_file = "/mnt/storage/users/sedielem/git/kaggle-plankton/predictions/test--doublescale_fs5_latemerge_2233--doublescale_fs5_latemerge_2233-paard-20150128-121022--avg-probs.npy"
data_loader = load.PredictionsWithMomentsDataLoader(train_pred_file=train_pred_file, valid_pred_file=valid_pred_file, test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train, chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 7))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
print "regu"
lambda_reg = 0.002
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
# L2 0.0005 0.5646362
# L2 0.001 0.560494
# L2 0.002 0.559762
# L2 0.01 0.560949
# L2 0.05 0.563861
# 0.559762
# 1 layer 64
| {
"content_hash": "f69601bd2ff7f8de15eabf9f330c3dc4",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 186,
"avg_line_length": 32.38823529411765,
"alnum_prop": 0.7083181983290955,
"repo_name": "freakynit/kaggle-ndsb",
"id": "3974cc00a61141ff6c309cd69bde76a1326884f2",
"size": "2754",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "configurations/featmomentsinfo_doublescale_fs5_latemerge_2233.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "581526"
}
],
"symlink_target": ""
} |
import threading
import logging
class MyThreadWithArgs(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
super().__init__(group=group, target=target, name=name,
daemon=daemon)
self.args = args
self.kwargs = kwargs
def run(self):
logging.debug('running with %s and %s',
self.args, self.kwargs)
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
for i in range(5):
t = MyThreadWithArgs(args=(i,), kwargs={'a': 'A', 'b': 'B'})
t.start()
| {
"content_hash": "46d9409a42338cce72640e6b869ba63d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 64,
"avg_line_length": 23.607142857142858,
"alnum_prop": 0.5642965204236006,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "ce9076cb4ef022c31975903b0017e19021f088dc",
"size": "661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_concurrency_with_processes_threads_and_coroutines/threading_subclass_args.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
import requests
import six
from datetime import datetime
from six.moves.urllib.parse import parse_qs
from xml.etree.ElementTree import tostring, SubElement, Element
from .exceptions import (
XeroBadRequest, XeroExceptionUnknown, XeroForbidden, XeroInternalError,
XeroNotAvailable, XeroNotFound, XeroNotImplemented, XeroRateLimitExceeded,
XeroUnauthorized
)
from .utils import singular, isplural, json_load_object_hook
class BaseManager(object):
DECORATED_METHODS = (
'get',
'save',
'filter',
'all',
'put',
'delete',
'get_attachments',
'get_attachment_data',
'put_attachment_data',
)
DATETIME_FIELDS = (
'UpdatedDateUTC',
'Updated',
'FullyPaidOnDate',
'DateTimeUTC',
'CreatedDateUTC'
)
DATE_FIELDS = (
'DueDate',
'Date',
'PaymentDate',
'StartDate',
'EndDate',
'PeriodLockDate',
'DateOfBirth',
'OpeningBalanceDate',
'PaymentDueDate',
'ReportingDate',
'DeliveryDate',
'ExpectedArrivalDate',
)
BOOLEAN_FIELDS = (
'IsSupplier',
'IsCustomer',
'IsDemoCompany',
'PaysTax',
'IsAuthorisedToApproveTimesheets',
'IsAuthorisedToApproveLeave',
'HasHELPDebt',
'AustralianResidentForTaxPurposes',
'TaxFreeThresholdClaimed',
'HasSFSSDebt',
'EligibleToReceiveLeaveLoading',
'IsExemptFromTax',
'IsExemptFromSuper',
'SentToContact',
'IsSubscriber',
'HasAttachments',
'ShowOnCashBasisReports',
'IncludeInEmails',
'SentToContact',
'CanApplyToRevenue',
'IsReconciled',
'EnablePaymentsToAccount',
'ShowInExpenseClaims'
)
DECIMAL_FIELDS = (
'Hours',
'NumberOfUnit',
)
INTEGER_FIELDS = (
'FinancialYearEndDay',
'FinancialYearEndMonth',
)
NO_SEND_FIELDS = (
'UpdatedDateUTC',
'HasValidationErrors',
'IsDiscounted',
'DateString',
'HasErrors',
'DueDateString',
)
OPERATOR_MAPPINGS = {
'gt': '>',
'lt': '<',
'lte': '<=',
'gte': '>=',
'ne': '!='
}
def __init__(self):
pass
def dict_to_xml(self, root_elm, data):
for key in data.keys():
# Xero will complain if we send back these fields.
if key in self.NO_SEND_FIELDS:
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
# Key references a dict. Unroll the dict
# as it's own XML node with subnodes
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
# Key references a list/tuple
elif isinstance(sub_data, list) or isinstance(sub_data, tuple):
# key name is a plural. This means each item
# in the list needs to be wrapped in an XML
# node that is a singular version of the list name.
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
# key name isn't a plural. Just insert the content
# as an XML node with subnodes
else:
for d in sub_data:
self.dict_to_xml(elm, d)
# Normal element - just insert the data.
else:
if key in self.BOOLEAN_FIELDS:
val = 'true' if sub_data else 'false'
elif key in self.DATE_FIELDS:
val = sub_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
val = six.text_type(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if isinstance(data, list) or isinstance(data, tuple):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
# In python3 this seems to return a bytestring
return six.u(tostring(root_elm))
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert data['Status'] == 'OK', "Expected the API to say OK but received %s" % data['Status']
try:
return data[resource_name]
except KeyError:
return data
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
timeout = kwargs.pop('timeout', None)
uri, params, method, body, headers, singleobject = func(*args, **kwargs)
if headers is None:
headers = {}
# Use the JSON API by default, but remember we might request a PDF (application/pdf)
# so don't force the Accept header.
if 'Accept' not in headers:
headers['Accept'] = 'application/json'
# Set a user-agent so Xero knows the traffic is coming from pyxero
# or individual user/partner
headers['User-Agent'] = self.user_agent
response = getattr(requests, method)(
uri, data=body, headers=headers, auth=self.credentials.oauth,
params=params, timeout=timeout)
if response.status_code == 200:
# If we haven't got XML or JSON, assume we're being returned a binary file
if not response.headers['content-type'].startswith('application/json'):
return response.content
return self._parse_api_response(response, self.name)
elif response.status_code == 204:
return response.content
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = '/'.join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update(params if params else {})
return uri, uri_params, 'get', None, headers, True
def _get_attachments(self, id):
"""Retrieve a list of attachments associated with this Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments']) + '/'
return uri, {}, 'get', None, None, False
def _get_attachment_data(self, id, filename):
"""
Retrieve the contents of a specific attachment (identified by filename).
"""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return uri, {}, 'get', None, None, False
def get_attachment(self, id, filename, file):
"""
Retrieve the contents of a specific attachment (identified by filename).
Writes data to file object, returns length of data written.
"""
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def save_or_put(self, data, method='post', headers=None, summarize_errors=True):
uri = '/'.join([self.base_url, self.name])
body = {'xml': self._prepare_data_for_save(data)}
params = self.extra_params.copy()
if not summarize_errors:
params['summarizeErrors'] = 'false'
return uri, params, method, body, headers, False
def _save(self, data):
return self.save_or_put(data, method='post')
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method='put', summarize_errors=summarize_errors)
def _delete(self, id):
uri = '/'.join([self.base_url, self.name, id])
return uri, {}, 'delete', None, None, False
def _put_attachment_data(self, id, filename, data, content_type, include_online=False):
"""Upload an attachment to the Xero object."""
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
params = {'IncludeOnline': 'true'} if include_online else {}
headers = {'Content-Type': content_type, 'Content-Length': str(len(data))}
return uri, params, 'put', data, headers, False
def put_attachment(self, id, filename, file, content_type, include_online=False):
"""Upload an attachment to the Xero object (from file object)."""
self.put_attachment_data(id, filename, file.read(), content_type,
include_online=include_online)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
val = '"%s"' % val
return {'If-Modified-Since': val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = '/'.join([self.base_url, self.name])
if kwargs:
if 'since' in kwargs:
val = kwargs['since']
headers = self.prepare_filtering_date(val)
del kwargs['since']
def get_filter_params(key, value):
last_key = key.split('_')[-1]
if last_key.upper().endswith('ID'):
return 'Guid("%s")' % six.text_type(value)
if key in self.BOOLEAN_FIELDS:
return 'true' if value else 'false'
elif key in self.DATE_FIELDS:
return 'DateTime(%s,%s,%s)' % (value.year, value.month, value.day)
elif key in self.DATETIME_FIELDS:
return value.isoformat()
else:
return '"%s"' % six.text_type(value)
def generate_param(key, value):
parts = key.split("__")
field = key.replace('_', '.')
fmt = '%s==%s'
if len(parts) == 2:
# support filters:
# Name__Contains=John becomes Name.Contains("John")
if parts[1] in ["contains", "startswith", "endswith"]:
field = parts[0]
fmt = ''.join(['%s.', parts[1], '(%s)'])
elif parts[1] in self.OPERATOR_MAPPINGS:
field = parts[0]
key = field
fmt = '%s' + self.OPERATOR_MAPPINGS[parts[1]] + '%s'
elif parts[1] in ["isnull"]:
sign = '=' if value else '!'
return '%s%s=null' % (parts[0], sign)
field = field.replace('_', '.')
return fmt % (
field,
get_filter_params(key, value)
)
# Move any known parameter names to the query string
KNOWN_PARAMETERS = ['order', 'offset', 'page', 'includeArchived']
for param in KNOWN_PARAMETERS:
if param in kwargs:
params[param] = kwargs.pop(param)
filter_params = []
if 'raw' in kwargs:
raw = kwargs.pop('raw')
filter_params.append(raw)
# Treat any remaining arguments as filter predicates
# Xero will break if you search without a check for null in the first position:
# http://developer.xero.com/documentation/getting-started/http-requests-and-responses/#title3
sortedkwargs = sorted(six.iteritems(kwargs),
key=lambda item: -1 if 'isnull' in item[0] else 0)
for key, value in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params['where'] = '&&'.join(filter_params)
return uri, params, 'get', None, headers, False
def _all(self):
uri = '/'.join([self.base_url, self.name])
return uri, {}, 'get', None, None, False
| {
"content_hash": "d005dc5c805474ec5660e033c2ecfe72",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 105,
"avg_line_length": 36.06970509383378,
"alnum_prop": 0.5398394529507953,
"repo_name": "wegotpop/pyxero",
"id": "57234f4bc7d44174316ff8a6d006e8b78c3a64a6",
"size": "13454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xero/basemanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "89041"
}
],
"symlink_target": ""
} |
"""Leetcode 958. Check Completeness of a Binary Tree
Medium
URL: https://leetcode.com/problems/check-completeness-of-a-binary-tree/
Given a binary tree, determine if it is a complete binary tree.
Definition of a complete binary tree from Wikipedia:
In a complete binary tree every level, except possibly the last, is completely
filled, and all nodes in the last level are as far left as possible.
It can have between 1 and 2h nodes inclusive at the last level h.
Example 1:
Input: [1,2,3,4,5,6]
Output: true
Explanation: Every level before the last is full (ie. levels with node-values {1} and {2, 3}), and all nodes in the last level ({4, 5, 6}) are as far left as possible.
Example 2:
Input: [1,2,3,4,5,null,7]
Output: false
Explanation: The node with value 7 isn't as far left as possible.
Note:
The tree will have between 1 and 100 nodes.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class SolutionLevelorderOneByOneIter(object):
def isCompleteTree(self, root):
"""
:type root: TreeNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(logn) for complete tree, O(n) for singly linked list.
"""
from collections import deque
# Apply level-order traversal with queue to collect all nodes one by ones.
queue = deque([root])
# Stop collection until met empty node.
while queue and queue[-1]:
current = queue.pop()
if not current:
break
queue.appendleft(current.left)
queue.appendleft(current.right)
# Pop empty nodes until met node, then check if queue is empty or not.
while queue and not queue[-1]:
queue.pop()
return not queue
def main():
# Input: [1,2,3,4,5,6]
# Output: true
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
print SolutionLevelorderOneByOneIter().isCompleteTree(root)
# Input: [1,2,3,4,5,null,7]
# Output: false
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.right = TreeNode(7)
print SolutionLevelorderOneByOneIter().isCompleteTree(root)
if __name__ == '__main__':
main()
| {
"content_hash": "517473b841e5b416c9f4fd0d0d5ab0c8",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 167,
"avg_line_length": 28.681818181818183,
"alnum_prop": 0.6446117274167987,
"repo_name": "bowen0701/algorithms_data_structures",
"id": "95b0395e5072c0805496d8f0dcd1ea3aea89a229",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lc0958_check_completeness_of_a_binary_tree.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "108750"
}
],
"symlink_target": ""
} |
"""Generate an Abstract Syntax Tree (AST) for C++."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
# TODO:
# * Tokens should never be exported, need to convert to Nodes
# (return types, parameters, etc.)
# * Handle static class data for templatized classes
# * Handle casts (both C++ and C-style)
# * Handle conditions and loops (if/else, switch, for, while/do)
#
# TODO much, much later:
# * Handle #define
# * exceptions
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
import sys
import traceback
from cpp import keywords
from cpp import tokenize
from cpp import utils
if not hasattr(builtins, 'reversed'):
# Support Python 2.3 and earlier.
def reversed(seq):
for i in range(len(seq)-1, -1, -1):
yield seq[i]
if not hasattr(builtins, 'next'):
# Support Python 2.5 and earlier.
def next(obj):
return obj.next()
VISIBILITY_PUBLIC, VISIBILITY_PROTECTED, VISIBILITY_PRIVATE = range(3)
FUNCTION_NONE = 0x00
FUNCTION_CONST = 0x01
FUNCTION_VIRTUAL = 0x02
FUNCTION_PURE_VIRTUAL = 0x04
FUNCTION_CTOR = 0x08
FUNCTION_DTOR = 0x10
FUNCTION_ATTRIBUTE = 0x20
FUNCTION_UNKNOWN_ANNOTATION = 0x40
FUNCTION_THROW = 0x80
FUNCTION_OVERRIDE = 0x100
"""
These are currently unused. Should really handle these properly at some point.
TYPE_MODIFIER_INLINE = 0x010000
TYPE_MODIFIER_EXTERN = 0x020000
TYPE_MODIFIER_STATIC = 0x040000
TYPE_MODIFIER_CONST = 0x080000
TYPE_MODIFIER_REGISTER = 0x100000
TYPE_MODIFIER_VOLATILE = 0x200000
TYPE_MODIFIER_MUTABLE = 0x400000
TYPE_MODIFIER_MAP = {
'inline': TYPE_MODIFIER_INLINE,
'extern': TYPE_MODIFIER_EXTERN,
'static': TYPE_MODIFIER_STATIC,
'const': TYPE_MODIFIER_CONST,
'register': TYPE_MODIFIER_REGISTER,
'volatile': TYPE_MODIFIER_VOLATILE,
'mutable': TYPE_MODIFIER_MUTABLE,
}
"""
_INTERNAL_TOKEN = 'internal'
_NAMESPACE_POP = 'ns-pop'
# TODO(nnorwitz): use this as a singleton for templated_types, etc
# where we don't want to create a new empty dict each time. It is also const.
class _NullDict(object):
__contains__ = lambda self: False
keys = values = items = iterkeys = itervalues = iteritems = lambda self: ()
# TODO(nnorwitz): move AST nodes into a separate module.
class Node(object):
"""Base AST node."""
def __init__(self, start, end):
self.start = start
self.end = end
def IsDeclaration(self):
"""Returns bool if this node is a declaration."""
return False
def IsDefinition(self):
"""Returns bool if this node is a definition."""
return False
def IsExportable(self):
"""Returns bool if this node exportable from a header file."""
return False
def Requires(self, node):
"""Does this AST node require the definition of the node passed in?"""
return False
def XXX__str__(self):
return self._StringHelper(self.__class__.__name__, '')
def _StringHelper(self, name, suffix):
if not utils.DEBUG:
return '%s(%s)' % (name, suffix)
return '%s(%d, %d, %s)' % (name, self.start, self.end, suffix)
def __repr__(self):
return str(self)
class Define(Node):
def __init__(self, start, end, name, definition):
Node.__init__(self, start, end)
self.name = name
self.definition = definition
def __str__(self):
value = '%s %s' % (self.name, self.definition)
return self._StringHelper(self.__class__.__name__, value)
class Include(Node):
def __init__(self, start, end, filename, system):
Node.__init__(self, start, end)
self.filename = filename
self.system = system
def __str__(self):
fmt = '"%s"'
if self.system:
fmt = '<%s>'
return self._StringHelper(self.__class__.__name__, fmt % self.filename)
class Goto(Node):
def __init__(self, start, end, label):
Node.__init__(self, start, end)
self.label = label
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.label))
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
# TODO(nnorwitz): impl.
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr))
class Return(Expr):
pass
class Delete(Expr):
pass
class Friend(Expr):
def __init__(self, start, end, expr, namespace):
Expr.__init__(self, start, end, expr)
self.namespace = namespace[:]
class Using(Node):
def __init__(self, start, end, names):
Node.__init__(self, start, end)
self.names = names
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.names))
class Parameter(Node):
def __init__(self, start, end, name, parameter_type, default):
Node.__init__(self, start, end)
self.name = name
self.type = parameter_type
self.default = default
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def __str__(self):
name = str(self.type)
suffix = '%s %s' % (name, self.name)
if self.default:
suffix += ' = ' + ''.join([d.name for d in self.default])
return self._StringHelper(self.__class__.__name__, suffix)
class _GenericDeclaration(Node):
def __init__(self, start, end, name, namespace):
Node.__init__(self, start, end)
self.name = name
self.namespace = namespace[:]
def FullName(self):
prefix = ''
if self.namespace and self.namespace[-1]:
prefix = '::'.join(self.namespace) + '::'
return prefix + self.name
def _TypeStringHelper(self, suffix):
if self.namespace:
names = [n or '<anonymous>' for n in self.namespace]
suffix += ' in ' + '::'.join(names)
return self._StringHelper(self.__class__.__name__, suffix)
# TODO(nnorwitz): merge with Parameter in some way?
class VariableDeclaration(_GenericDeclaration):
def __init__(self, start, end, name, var_type, initial_value, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.type = var_type
self.initial_value = initial_value
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
return self.type.name == node.name
def ToString(self):
"""Return a string that tries to reconstitute the variable decl."""
suffix = '%s %s' % (self.type, self.name)
if self.initial_value:
suffix += ' = ' + self.initial_value
return suffix
def __str__(self):
return self._StringHelper(self.__class__.__name__, self.ToString())
class Typedef(_GenericDeclaration):
def __init__(self, start, end, name, alias, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.alias = alias
def IsDefinition(self):
return True
def IsExportable(self):
return True
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
name = node.name
for token in self.alias:
if token is not None and name == token.name:
return True
return False
def __str__(self):
suffix = '%s, %s' % (self.name, self.alias)
return self._TypeStringHelper(suffix)
class _NestedType(_GenericDeclaration):
def __init__(self, start, end, name, fields, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.fields = fields
def IsDefinition(self):
return True
def IsExportable(self):
return True
def __str__(self):
suffix = '%s, {%s}' % (self.name, self.fields)
return self._TypeStringHelper(suffix)
class Union(_NestedType):
pass
class Enum(_NestedType):
pass
class Class(_GenericDeclaration):
def __init__(self, start, end, name, bases, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
self.bases = bases
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.bases is None and self.body is None
def IsDefinition(self):
return not self.IsDeclaration()
def IsExportable(self):
return not self.IsDeclaration()
def Requires(self, node):
# TODO(nnorwitz): handle namespaces, etc.
if self.bases:
for token_list in self.bases:
# TODO(nnorwitz): bases are tokens, do name comparision.
for token in token_list:
if token.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
name = self.name
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = '%s, %s, %s' % (name, self.bases, self.body)
return self._TypeStringHelper(suffix)
class Struct(Class):
pass
class Function(_GenericDeclaration):
def __init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace):
_GenericDeclaration.__init__(self, start, end, name, namespace)
converter = TypeConverter(namespace)
self.return_type = converter.CreateReturnType(return_type)
self.parameters = converter.ToParameters(parameters)
self.modifiers = modifiers
self.body = body
self.templated_types = templated_types
def IsDeclaration(self):
return self.body is None
def IsDefinition(self):
return self.body is not None
def IsExportable(self):
if self.return_type and 'static' in self.return_type.modifiers:
return False
return None not in self.namespace
def Requires(self, node):
if self.parameters:
# TODO(nnorwitz): parameters are tokens, do name comparision.
for p in self.parameters:
if p.name == node.name:
return True
# TODO(nnorwitz): search in body too.
return False
def __str__(self):
# TODO(nnorwitz): add templated_types.
suffix = ('%s %s(%s), 0x%02x, %s' %
(self.return_type, self.name, self.parameters,
self.modifiers, self.body))
return self._TypeStringHelper(suffix)
class Method(Function):
def __init__(self, start, end, name, in_class, return_type, parameters,
modifiers, templated_types, body, namespace):
Function.__init__(self, start, end, name, return_type, parameters,
modifiers, templated_types, body, namespace)
# TODO(nnorwitz): in_class could also be a namespace which can
# mess up finding functions properly.
self.in_class = in_class
class Type(_GenericDeclaration):
"""Type used for any variable (eg class, primitive, struct, etc)."""
def __init__(self, start, end, name, templated_types, modifiers,
reference, pointer, array):
"""
Args:
name: str name of main type
templated_types: [Class (Type?)] template type info between <>
modifiers: [str] type modifiers (keywords) eg, const, mutable, etc.
reference, pointer, array: bools
"""
_GenericDeclaration.__init__(self, start, end, name, [])
self.templated_types = templated_types
if not name and modifiers:
self.name = modifiers.pop()
self.modifiers = modifiers
self.reference = reference
self.pointer = pointer
self.array = array
def __str__(self):
prefix = ''
if self.modifiers:
prefix = ' '.join(self.modifiers) + ' '
name = str(self.name)
if self.templated_types:
name += '<%s>' % self.templated_types
suffix = prefix + name
if self.reference:
suffix += '&'
if self.pointer:
suffix += '*'
if self.array:
suffix += '[]'
return self._TypeStringHelper(suffix)
# By definition, Is* are always False. A Type can only exist in
# some sort of variable declaration, parameter, or return value.
def IsDeclaration(self):
return False
def IsDefinition(self):
return False
def IsExportable(self):
return False
class TypeConverter(object):
def __init__(self, namespace_stack):
self.namespace_stack = namespace_stack
def _GetTemplateEnd(self, tokens, start):
count = 1
end = start
while 1:
token = tokens[end]
end += 1
if token.name == '<':
count += 1
elif token.name == '>':
count -= 1
if count == 0:
break
return tokens[start:end-1], end
def ToType(self, tokens):
"""Convert [Token,...] to [Class(...), ] useful for base classes.
For example, code like class Foo : public Bar<x, y> { ... };
the "Bar<x, y>" portion gets converted to an AST.
Returns:
[Class(...), ...]
"""
result = []
name_tokens = []
reference = pointer = array = False
def AddType(templated_types):
# Partition tokens into name and modifier tokens.
names = []
modifiers = []
for t in name_tokens:
if keywords.IsKeyword(t.name):
modifiers.append(t.name)
else:
names.append(t.name)
name = ''.join(names)
if name_tokens:
result.append(Type(name_tokens[0].start, name_tokens[-1].end,
name, templated_types, modifiers,
reference, pointer, array))
del name_tokens[:]
i = 0
end = len(tokens)
while i < end:
token = tokens[i]
if token.name == '<':
new_tokens, new_end = self._GetTemplateEnd(tokens, i+1)
AddType(self.ToType(new_tokens))
# If there is a comma after the template, we need to consume
# that here otherwise it becomes part of the name.
i = new_end
reference = pointer = array = False
elif token.name == ',':
AddType([])
reference = pointer = array = False
elif token.name == '*':
pointer = True
elif token.name == '&':
reference = True
elif token.name == '[':
pointer = True
elif token.name == ']':
pass
else:
name_tokens.append(token)
i += 1
if name_tokens:
# No '<' in the tokens, just a simple name and no template.
AddType([])
return result
def DeclarationToParts(self, parts, needs_name_removed):
name = None
default = []
if needs_name_removed:
# Handle default (initial) values properly.
for i, t in enumerate(parts):
if t.name == '=':
default = parts[i+1:]
name = parts[i-1].name
if name == ']' and parts[i-2].name == '[':
name = parts[i-3].name
i -= 1
parts = parts[:i-1]
break
else:
if parts[-1].token_type == tokenize.NAME:
name = parts.pop().name
else:
# TODO(nnorwitz): this is a hack that happens for code like
# Register(Foo<T>); where it thinks this is a function call
# but it's actually a declaration.
name = '???'
modifiers = []
type_name = []
other_tokens = []
templated_types = []
i = 0
end = len(parts)
while i < end:
p = parts[i]
if keywords.IsKeyword(p.name):
modifiers.append(p.name)
elif p.name == '<':
templated_tokens, new_end = self._GetTemplateEnd(parts, i+1)
templated_types = self.ToType(templated_tokens)
i = new_end - 1
# Don't add a spurious :: to data members being initialized.
next_index = i + 1
if next_index < end and parts[next_index].name == '::':
i += 1
elif p.name in ('[', ']', '='):
# These are handled elsewhere.
other_tokens.append(p)
elif p.name not in ('*', '&', '>'):
# Ensure that names have a space between them.
if (type_name and type_name[-1].token_type == tokenize.NAME and
p.token_type == tokenize.NAME):
type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
type_name.append(p)
else:
other_tokens.append(p)
i += 1
type_name = ''.join([t.name for t in type_name])
return name, type_name, templated_types, modifiers, default, other_tokens
def ToParameters(self, tokens):
if not tokens:
return []
result = []
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
def AddParameter(end):
if default:
del default[0] # Remove flag.
parts = self.DeclarationToParts(type_modifiers, True)
(name, type_name, templated_types, modifiers,
unused_default, unused_other_tokens) = parts
parameter_type = Type(first_token.start, first_token.end,
type_name, templated_types, modifiers,
reference, pointer, array)
p = Parameter(first_token.start, end, name,
parameter_type, default)
result.append(p)
template_count = 0
for s in tokens:
if not first_token:
first_token = s
if s.name == '<':
template_count += 1
elif s.name == '>':
template_count -= 1
if template_count > 0:
type_modifiers.append(s)
continue
if s.name == ',':
AddParameter(s.start)
name = type_name = ''
type_modifiers = []
pointer = reference = array = False
first_token = None
default = []
elif s.name == '*':
pointer = True
elif s.name == '&':
reference = True
elif s.name == '[':
array = True
elif s.name == ']':
pass # Just don't add to type_modifiers.
elif s.name == '=':
# Got a default value. Add any value (None) as a flag.
default.append(None)
elif default:
default.append(s)
else:
type_modifiers.append(s)
AddParameter(tokens[-1].end)
return result
def CreateReturnType(self, return_type_seq):
if not return_type_seq:
return None
start = return_type_seq[0].start
end = return_type_seq[-1].end
_, name, templated_types, modifiers, default, other_tokens = \
self.DeclarationToParts(return_type_seq, False)
names = [n.name for n in other_tokens]
reference = '&' in names
pointer = '*' in names
array = '[' in names
return Type(start, end, name, templated_types, modifiers,
reference, pointer, array)
def GetTemplateIndices(self, names):
# names is a list of strings.
start = names.index('<')
end = len(names) - 1
while end > 0:
if names[end] == '>':
break
end -= 1
return start, end+1
class AstBuilder(object):
def __init__(self, token_stream, filename, in_class='', visibility=None,
namespace_stack=[]):
self.tokens = token_stream
self.filename = filename
# TODO(nnorwitz): use a better data structure (deque) for the queue.
# Switching directions of the "queue" improved perf by about 25%.
# Using a deque should be even better since we access from both sides.
self.token_queue = []
self.namespace_stack = namespace_stack[:]
self.in_class = in_class
if in_class is None:
self.in_class_name_only = None
else:
self.in_class_name_only = in_class.split('::')[-1]
self.visibility = visibility
self.in_function = False
self.current_token = None
# Keep the state whether we are currently handling a typedef or not.
self._handling_typedef = False
self.converter = TypeConverter(self.namespace_stack)
def HandleError(self, msg, token):
printable_queue = list(reversed(self.token_queue[-20:]))
sys.stderr.write('Got %s in %s @ %s %s\n' %
(msg, self.filename, token, printable_queue))
def Generate(self):
while 1:
token = self._GetNextToken()
if not token:
break
# Get the next token.
self.current_token = token
# Dispatch on the next token type.
if token.token_type == _INTERNAL_TOKEN:
if token.name == _NAMESPACE_POP:
self.namespace_stack.pop()
continue
try:
result = self._GenerateOne(token)
if result is not None:
yield result
except:
self.HandleError('exception', token)
raise
def _CreateVariable(self, pos_token, name, type_name, type_modifiers,
ref_pointer_name_seq, templated_types, value=None):
reference = '&' in ref_pointer_name_seq
pointer = '*' in ref_pointer_name_seq
array = '[' in ref_pointer_name_seq
var_type = Type(pos_token.start, pos_token.end, type_name,
templated_types, type_modifiers,
reference, pointer, array)
return VariableDeclaration(pos_token.start, pos_token.end,
name, var_type, value, self.namespace_stack)
def _GenerateOne(self, token):
if token.token_type == tokenize.NAME:
if (keywords.IsKeyword(token.name) and
not keywords.IsBuiltinType(token.name)):
method = getattr(self, 'handle_' + token.name)
return method()
elif token.name == self.in_class_name_only:
# The token name is the same as the class, must be a ctor if
# there is a paren. Otherwise, it's the return type.
# Peek ahead to get the next token to figure out which.
next = self._GetNextToken()
self._AddBackToken(next)
if next.token_type == tokenize.SYNTAX and next.name == '(':
return self._GetMethod([token], FUNCTION_CTOR, None, True)
# Fall through--handle like any other method.
# Handle data or function declaration/definition.
syntax = tokenize.SYNTAX
temp_tokens, last_token = \
self._GetVarTokensUpTo(syntax, '(', ';', '{', '[')
temp_tokens.insert(0, token)
if last_token.name == '(':
# If there is an assignment before the paren,
# this is an expression, not a method.
expr = bool([e for e in temp_tokens if e.name == '='])
if expr:
new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.append(last_token)
temp_tokens.extend(new_temp)
last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
if last_token.name == '[':
# Handle array, this isn't a method, unless it's an operator.
# TODO(nnorwitz): keep the size somewhere.
# unused_size = self._GetTokensUpTo(tokenize.SYNTAX, ']')
temp_tokens.append(last_token)
if temp_tokens[-2].name == 'operator':
temp_tokens.append(self._GetNextToken())
else:
temp_tokens2, last_token = \
self._GetVarTokensUpTo(tokenize.SYNTAX, ';')
temp_tokens.extend(temp_tokens2)
if last_token.name == ';':
# Handle data, this isn't a method.
parts = self.converter.DeclarationToParts(temp_tokens, True)
(name, type_name, templated_types, modifiers, default,
unused_other_tokens) = parts
t0 = temp_tokens[0]
names = [t.name for t in temp_tokens]
if templated_types:
start, end = self.converter.GetTemplateIndices(names)
names = names[:start] + names[end:]
default = ''.join([t.name for t in default])
return self._CreateVariable(t0, name, type_name, modifiers,
names, templated_types, default)
if last_token.name == '{':
self._AddBackTokens(temp_tokens[1:])
self._AddBackToken(last_token)
method_name = temp_tokens[0].name
method = getattr(self, 'handle_' + method_name, None)
if not method:
# Must be declaring a variable.
# TODO(nnorwitz): handle the declaration.
return None
return method()
return self._GetMethod(temp_tokens, 0, None, False)
elif token.token_type == tokenize.SYNTAX:
if token.name == '~' and self.in_class:
# Must be a dtor (probably not in method body).
token = self._GetNextToken()
# self.in_class can contain A::Name, but the dtor will only
# be Name. Make sure to compare against the right value.
if (token.token_type == tokenize.NAME and
token.name == self.in_class_name_only):
return self._GetMethod([token], FUNCTION_DTOR, None, True)
# TODO(nnorwitz): handle a lot more syntax.
elif token.token_type == tokenize.PREPROCESSOR:
# TODO(nnorwitz): handle more preprocessor directives.
# token starts with a #, so remove it and strip whitespace.
name = token.name[1:].lstrip()
if name.startswith('include'):
# Remove "include".
name = name[7:].strip()
assert name
# Handle #include \<newline> "header-on-second-line.h".
if name.startswith('\\'):
name = name[1:].strip()
assert name[0] in '<"', token
assert name[-1] in '>"', token
system = name[0] == '<'
filename = name[1:-1]
return Include(token.start, token.end, filename, system)
if name.startswith('define'):
# Remove "define".
name = name[6:].strip()
assert name
value = ''
for i, c in enumerate(name):
if c.isspace():
value = name[i:].lstrip()
name = name[:i]
break
return Define(token.start, token.end, name, value)
if name.startswith('if') and name[2:3].isspace():
condition = name[3:].strip()
if condition.startswith('0') or condition.startswith('(0)'):
self._SkipIf0Blocks()
return None
def _GetTokensUpTo(self, expected_token_type, expected_token):
return self._GetVarTokensUpTo(expected_token_type, expected_token)[0]
def _GetVarTokensUpTo(self, expected_token_type, *expected_tokens):
last_token = self._GetNextToken()
tokens = []
while (last_token.token_type != expected_token_type or
last_token.name not in expected_tokens):
tokens.append(last_token)
last_token = self._GetNextToken()
return tokens, last_token
# TODO(nnorwitz): remove _IgnoreUpTo() it shouldn't be necesary.
def _IgnoreUpTo(self, token_type, token):
unused_tokens = self._GetTokensUpTo(token_type, token)
def _SkipIf0Blocks(self):
count = 1
while 1:
token = self._GetNextToken()
if token.token_type != tokenize.PREPROCESSOR:
continue
name = token.name[1:].lstrip()
if name.startswith('endif'):
count -= 1
if count == 0:
break
elif name.startswith('if'):
count += 1
def _GetMatchingChar(self, open_paren, close_paren, GetNextToken=None):
if GetNextToken is None:
GetNextToken = self._GetNextToken
# Assumes the current token is open_paren and we will consume
# and return up to the close_paren.
count = 1
token = GetNextToken()
while 1:
if token.token_type == tokenize.SYNTAX:
if token.name == open_paren:
count += 1
elif token.name == close_paren:
count -= 1
if count == 0:
break
yield token
token = GetNextToken()
yield token
def _GetParameters(self):
return self._GetMatchingChar('(', ')')
def GetScope(self):
return self._GetMatchingChar('{', '}')
def _GetNextToken(self):
if self.token_queue:
return self.token_queue.pop()
return next(self.tokens)
def _AddBackToken(self, token):
if token.whence == tokenize.WHENCE_STREAM:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue.insert(0, token)
else:
assert token.whence == tokenize.WHENCE_QUEUE, token
self.token_queue.append(token)
def _AddBackTokens(self, tokens):
if tokens:
if tokens[-1].whence == tokenize.WHENCE_STREAM:
for token in tokens:
token.whence = tokenize.WHENCE_QUEUE
self.token_queue[:0] = reversed(tokens)
else:
assert tokens[-1].whence == tokenize.WHENCE_QUEUE, tokens
self.token_queue.extend(reversed(tokens))
def GetName(self, seq=None):
"""Returns ([tokens], next_token_info)."""
GetNextToken = self._GetNextToken
if seq is not None:
it = iter(seq)
GetNextToken = lambda: next(it)
next_token = GetNextToken()
tokens = []
last_token_was_name = False
while (next_token.token_type == tokenize.NAME or
(next_token.token_type == tokenize.SYNTAX and
next_token.name in ('::', '<'))):
# Two NAMEs in a row means the identifier should terminate.
# It's probably some sort of variable declaration.
if last_token_was_name and next_token.token_type == tokenize.NAME:
break
last_token_was_name = next_token.token_type == tokenize.NAME
tokens.append(next_token)
# Handle templated names.
if next_token.name == '<':
tokens.extend(self._GetMatchingChar('<', '>', GetNextToken))
last_token_was_name = True
next_token = GetNextToken()
return tokens, next_token
def GetMethod(self, modifiers, templated_types):
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(')
assert len(return_type_and_name) >= 1
return self._GetMethod(return_type_and_name, modifiers, templated_types,
False)
def _GetMethod(self, return_type_and_name, modifiers, templated_types,
get_paren):
template_portion = None
if get_paren:
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
if token.name == '<':
# Handle templatized dtors.
template_portion = [token]
template_portion.extend(self._GetMatchingChar('<', '>'))
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '(', token
name = return_type_and_name.pop()
# Handle templatized ctors.
if name.name == '>':
index = 1
while return_type_and_name[index].name != '<':
index += 1
template_portion = return_type_and_name[index:] + [name]
del return_type_and_name[index:]
name = return_type_and_name.pop()
elif name.name == ']':
rt = return_type_and_name
assert rt[-1].name == '[', return_type_and_name
assert rt[-2].name == 'operator', return_type_and_name
name_seq = return_type_and_name[-2:]
del return_type_and_name[-2:]
name = tokenize.Token(tokenize.NAME, 'operator[]',
name_seq[0].start, name.end)
# Get the open paren so _GetParameters() below works.
unused_open_paren = self._GetNextToken()
# TODO(nnorwitz): store template_portion.
return_type = return_type_and_name
indices = name
if return_type:
indices = return_type[0]
# Force ctor for templatized ctors.
if name.name == self.in_class and not modifiers:
modifiers |= FUNCTION_CTOR
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
# Handling operator() is especially weird.
if name.name == 'operator' and not parameters:
token = self._GetNextToken()
assert token.name == '(', token
parameters = list(self._GetParameters())
del parameters[-1] # Remove trailing ')'.
token = self._GetNextToken()
while token.token_type == tokenize.NAME:
modifier_token = token
token = self._GetNextToken()
if modifier_token.name == 'const':
modifiers |= FUNCTION_CONST
elif modifier_token.name == '__attribute__':
# TODO(nnorwitz): handle more __attribute__ details.
modifiers |= FUNCTION_ATTRIBUTE
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'throw':
modifiers |= FUNCTION_THROW
assert token.name == '(', token
# Consume everything between the (parens).
unused_tokens = list(self._GetMatchingChar('(', ')'))
token = self._GetNextToken()
elif modifier_token.name == 'override':
modifiers |= FUNCTION_OVERRIDE
elif modifier_token.name == modifier_token.name.upper():
# HACK(nnorwitz): assume that all upper-case names
# are some macro we aren't expanding.
modifiers |= FUNCTION_UNKNOWN_ANNOTATION
else:
self.HandleError('unexpected token', modifier_token)
assert token.token_type == tokenize.SYNTAX, token
# Handle ctor initializers.
if token.name == ':':
# TODO(nnorwitz): anything else to handle for initializer list?
while token.name != ';' and token.name != '{':
token = self._GetNextToken()
# Handle pointer to functions that are really data but look
# like method declarations.
if token.name == '(':
if parameters[0].name == '*':
# name contains the return type.
name = parameters.pop()
# parameters contains the name of the data.
modifiers = [p.name for p in parameters]
# Already at the ( to open the parameter list.
function_parameters = list(self._GetMatchingChar('(', ')'))
del function_parameters[-1] # Remove trailing ')'.
# TODO(nnorwitz): store the function_parameters.
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
return self._CreateVariable(indices, name.name, indices.name,
modifiers, '', None)
# At this point, we got something like:
# return_type (type::*name_)(params);
# This is a data member called name_ that is a function pointer.
# With this code: void (sq_type::*field_)(string&);
# We get: name=void return_type=[] parameters=sq_type ... field_
# TODO(nnorwitz): is return_type always empty?
# TODO(nnorwitz): this isn't even close to being correct.
# Just put in something so we don't crash and can move on.
real_name = parameters[-1]
modifiers = [p.name for p in self._GetParameters()]
del modifiers[-1] # Remove trailing ')'.
return self._CreateVariable(indices, real_name.name, indices.name,
modifiers, '', None)
if token.name == '{':
body = list(self.GetScope())
del body[-1] # Remove trailing '}'.
else:
body = None
if token.name == '=':
token = self._GetNextToken()
if token.name == 'default' or token.name == 'delete':
# Ignore explicitly defaulted and deleted special members
# in C++11.
token = self._GetNextToken()
else:
# Handle pure-virtual declarations.
assert token.token_type == tokenize.CONSTANT, token
assert token.name == '0', token
modifiers |= FUNCTION_PURE_VIRTUAL
token = self._GetNextToken()
if token.name == '[':
# TODO(nnorwitz): store tokens and improve parsing.
# template <typename T, size_t N> char (&ASH(T (&seq)[N]))[N];
tokens = list(self._GetMatchingChar('[', ']'))
token = self._GetNextToken()
assert token.name == ';', (token, return_type_and_name, parameters)
# Looks like we got a method, not a function.
if len(return_type) > 2 and return_type[-1].name == '::':
return_type, in_class = \
self._GetReturnTypeAndClassName(return_type)
return Method(indices.start, indices.end, name.name, in_class,
return_type, parameters, modifiers, templated_types,
body, self.namespace_stack)
return Function(indices.start, indices.end, name.name, return_type,
parameters, modifiers, templated_types, body,
self.namespace_stack)
def _GetReturnTypeAndClassName(self, token_seq):
# Splitting the return type from the class name in a method
# can be tricky. For example, Return::Type::Is::Hard::To::Find().
# Where is the return type and where is the class name?
# The heuristic used is to pull the last name as the class name.
# This includes all the templated type info.
# TODO(nnorwitz): if there is only One name like in the
# example above, punt and assume the last bit is the class name.
# Ignore a :: prefix, if exists so we can find the first real name.
i = 0
if token_seq[0].name == '::':
i = 1
# Ignore a :: suffix, if exists.
end = len(token_seq) - 1
if token_seq[end-1].name == '::':
end -= 1
# Make a copy of the sequence so we can append a sentinel
# value. This is required for GetName will has to have some
# terminating condition beyond the last name.
seq_copy = token_seq[i:end]
seq_copy.append(tokenize.Token(tokenize.SYNTAX, '', 0, 0))
names = []
while i < end:
# Iterate through the sequence parsing out each name.
new_name, next = self.GetName(seq_copy[i:])
assert new_name, 'Got empty new_name, next=%s' % next
# We got a pointer or ref. Add it to the name.
if next and next.token_type == tokenize.SYNTAX:
new_name.append(next)
names.append(new_name)
i += len(new_name)
# Now that we have the names, it's time to undo what we did.
# Remove the sentinel value.
names[-1].pop()
# Flatten the token sequence for the return type.
return_type = [e for seq in names[:-1] for e in seq]
# The class name is the last name.
class_name = names[-1]
return return_type, class_name
def handle_bool(self):
pass
def handle_char(self):
pass
def handle_int(self):
pass
def handle_long(self):
pass
def handle_short(self):
pass
def handle_double(self):
pass
def handle_float(self):
pass
def handle_void(self):
pass
def handle_wchar_t(self):
pass
def handle_unsigned(self):
pass
def handle_signed(self):
pass
def _GetNestedType(self, ctor):
name = None
name_tokens, token = self.GetName()
if name_tokens:
name = ''.join([t.name for t in name_tokens])
# Handle forward declarations.
if token.token_type == tokenize.SYNTAX and token.name == ';':
return ctor(token.start, token.end, name, None,
self.namespace_stack)
if token.token_type == tokenize.NAME and self._handling_typedef:
self._AddBackToken(token)
return ctor(token.start, token.end, name, None,
self.namespace_stack)
# Must be the type declaration.
fields = list(self._GetMatchingChar('{', '}'))
del fields[-1] # Remove trailing '}'.
if token.token_type == tokenize.SYNTAX and token.name == '{':
next = self._GetNextToken()
new_type = ctor(token.start, token.end, name, fields,
self.namespace_stack)
# A name means this is an anonymous type and the name
# is the variable declaration.
if next.token_type != tokenize.NAME:
return new_type
name = new_type
token = next
# Must be variable declaration using the type prefixed with keyword.
assert token.token_type == tokenize.NAME, token
return self._CreateVariable(token, token.name, name, [], '', None)
def handle_struct(self):
# Special case the handling typedef/aliasing of structs here.
# It would be a pain to handle in the class code.
name_tokens, var_token = self.GetName()
if name_tokens:
next_token = self._GetNextToken()
is_syntax = (var_token.token_type == tokenize.SYNTAX and
var_token.name[0] in '*&')
is_variable = (var_token.token_type == tokenize.NAME and
next_token.name == ';')
variable = var_token
if is_syntax and not is_variable:
variable = next_token
temp = self._GetNextToken()
if temp.token_type == tokenize.SYNTAX and temp.name == '(':
# Handle methods declared to return a struct.
t0 = name_tokens[0]
struct = tokenize.Token(tokenize.NAME, 'struct',
t0.start-7, t0.start-2)
type_and_name = [struct]
type_and_name.extend(name_tokens)
type_and_name.extend((var_token, next_token))
return self._GetMethod(type_and_name, 0, None, False)
assert temp.name == ';', (temp, name_tokens, var_token)
if is_syntax or (is_variable and not self._handling_typedef):
modifiers = ['struct']
type_name = ''.join([t.name for t in name_tokens])
position = name_tokens[0]
return self._CreateVariable(position, variable.name, type_name,
modifiers, var_token.name, None)
name_tokens.extend((var_token, next_token))
self._AddBackTokens(name_tokens)
else:
self._AddBackToken(var_token)
return self._GetClass(Struct, VISIBILITY_PUBLIC, None)
def handle_union(self):
return self._GetNestedType(Union)
def handle_enum(self):
return self._GetNestedType(Enum)
def handle_auto(self):
# TODO(nnorwitz): warn about using auto? Probably not since it
# will be reclaimed and useful for C++0x.
pass
def handle_register(self):
pass
def handle_const(self):
pass
def handle_inline(self):
pass
def handle_extern(self):
pass
def handle_static(self):
pass
def handle_virtual(self):
# What follows must be a method.
token = token2 = self._GetNextToken()
if token.name == 'inline':
# HACK(nnorwitz): handle inline dtors by ignoring 'inline'.
token2 = self._GetNextToken()
if token2.token_type == tokenize.SYNTAX and token2.name == '~':
return self.GetMethod(FUNCTION_VIRTUAL + FUNCTION_DTOR, None)
assert token.token_type == tokenize.NAME or token.name == '::', token
return_type_and_name = self._GetTokensUpTo(tokenize.SYNTAX, '(') # )
return_type_and_name.insert(0, token)
if token2 is not token:
return_type_and_name.insert(1, token2)
return self._GetMethod(return_type_and_name, FUNCTION_VIRTUAL,
None, False)
def handle_volatile(self):
pass
def handle_mutable(self):
pass
def handle_public(self):
assert self.in_class
self.visibility = VISIBILITY_PUBLIC
def handle_protected(self):
assert self.in_class
self.visibility = VISIBILITY_PROTECTED
def handle_private(self):
assert self.in_class
self.visibility = VISIBILITY_PRIVATE
def handle_friend(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
t0 = tokens[0]
return Friend(t0.start, t0.end, tokens, self.namespace_stack)
def handle_static_cast(self):
pass
def handle_const_cast(self):
pass
def handle_dynamic_cast(self):
pass
def handle_reinterpret_cast(self):
pass
def handle_new(self):
pass
def handle_delete(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Delete(tokens[0].start, tokens[0].end, tokens)
def handle_typedef(self):
token = self._GetNextToken()
if (token.token_type == tokenize.NAME and
keywords.IsKeyword(token.name)):
# Token must be struct/enum/union/class.
method = getattr(self, 'handle_' + token.name)
self._handling_typedef = True
tokens = [method()]
self._handling_typedef = False
else:
tokens = [token]
# Get the remainder of the typedef up to the semi-colon.
tokens.extend(self._GetTokensUpTo(tokenize.SYNTAX, ';'))
# TODO(nnorwitz): clean all this up.
assert tokens
name = tokens.pop()
indices = name
if tokens:
indices = tokens[0]
if not indices:
indices = token
if name.name == ')':
# HACK(nnorwitz): Handle pointers to functions "properly".
if (len(tokens) >= 4 and
tokens[1].name == '(' and tokens[2].name == '*'):
tokens.append(name)
name = tokens[3]
elif name.name == ']':
# HACK(nnorwitz): Handle arrays properly.
if len(tokens) >= 2:
tokens.append(name)
name = tokens[1]
new_type = tokens
if tokens and isinstance(tokens[0], tokenize.Token):
new_type = self.converter.ToType(tokens)[0]
return Typedef(indices.start, indices.end, name.name,
new_type, self.namespace_stack)
def handle_typeid(self):
pass # Not needed yet.
def handle_typename(self):
pass # Not needed yet.
def _GetTemplatedTypes(self):
result = {}
tokens = list(self._GetMatchingChar('<', '>'))
len_tokens = len(tokens) - 1 # Ignore trailing '>'.
i = 0
while i < len_tokens:
key = tokens[i].name
i += 1
if keywords.IsKeyword(key) or key == ',':
continue
type_name = default = None
if i < len_tokens:
i += 1
if tokens[i-1].name == '=':
assert i < len_tokens, '%s %s' % (i, tokens)
default, unused_next_token = self.GetName(tokens[i:])
i += len(default)
else:
if tokens[i-1].name != ',':
# We got something like: Type variable.
# Re-adjust the key (variable) and type_name (Type).
key = tokens[i-1].name
type_name = tokens[i-2]
result[key] = (type_name, default)
return result
def handle_template(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '<', token
templated_types = self._GetTemplatedTypes()
# TODO(nnorwitz): for now, just ignore the template params.
token = self._GetNextToken()
if token.token_type == tokenize.NAME:
if token.name == 'class':
return self._GetClass(Class, VISIBILITY_PRIVATE, templated_types)
elif token.name == 'struct':
return self._GetClass(Struct, VISIBILITY_PUBLIC, templated_types)
elif token.name == 'friend':
return self.handle_friend()
self._AddBackToken(token)
tokens, last = self._GetVarTokensUpTo(tokenize.SYNTAX, '(', ';')
tokens.append(last)
self._AddBackTokens(tokens)
if last.name == '(':
return self.GetMethod(FUNCTION_NONE, templated_types)
# Must be a variable definition.
return None
def handle_true(self):
pass # Nothing to do.
def handle_false(self):
pass # Nothing to do.
def handle_asm(self):
pass # Not needed yet.
def handle_class(self):
return self._GetClass(Class, VISIBILITY_PRIVATE, None)
def _GetBases(self):
# Get base classes.
bases = []
while 1:
token = self._GetNextToken()
assert token.token_type == tokenize.NAME, token
# TODO(nnorwitz): store kind of inheritance...maybe.
if token.name not in ('public', 'protected', 'private'):
# If inheritance type is not specified, it is private.
# Just put the token back so we can form a name.
# TODO(nnorwitz): it would be good to warn about this.
self._AddBackToken(token)
else:
# Check for virtual inheritance.
token = self._GetNextToken()
if token.name != 'virtual':
self._AddBackToken(token)
else:
# TODO(nnorwitz): store that we got virtual for this base.
pass
base, next_token = self.GetName()
bases_ast = self.converter.ToType(base)
assert len(bases_ast) == 1, bases_ast
bases.append(bases_ast[0])
assert next_token.token_type == tokenize.SYNTAX, next_token
if next_token.name == '{':
token = next_token
break
# Support multiple inheritance.
assert next_token.name == ',', next_token
return bases, token
def _GetClass(self, class_type, visibility, templated_types):
class_name = None
class_token = self._GetNextToken()
if class_token.token_type != tokenize.NAME:
assert class_token.token_type == tokenize.SYNTAX, class_token
token = class_token
else:
# Skip any macro (e.g. storage class specifiers) after the
# 'class' keyword.
next_token = self._GetNextToken()
if next_token.token_type == tokenize.NAME:
self._AddBackToken(next_token)
else:
self._AddBackTokens([class_token, next_token])
name_tokens, token = self.GetName()
class_name = ''.join([t.name for t in name_tokens])
bases = None
if token.token_type == tokenize.SYNTAX:
if token.name == ';':
# Forward declaration.
return class_type(class_token.start, class_token.end,
class_name, None, templated_types, None,
self.namespace_stack)
if token.name in '*&':
# Inline forward declaration. Could be method or data.
name_token = self._GetNextToken()
next_token = self._GetNextToken()
if next_token.name == ';':
# Handle data
modifiers = ['class']
return self._CreateVariable(class_token, name_token.name,
class_name,
modifiers, token.name, None)
else:
# Assume this is a method.
tokens = (class_token, token, name_token, next_token)
self._AddBackTokens(tokens)
return self.GetMethod(FUNCTION_NONE, None)
if token.name == ':':
bases, token = self._GetBases()
body = None
if token.token_type == tokenize.SYNTAX and token.name == '{':
assert token.token_type == tokenize.SYNTAX, token
assert token.name == '{', token
ast = AstBuilder(self.GetScope(), self.filename, class_name,
visibility, self.namespace_stack)
body = list(ast.Generate())
if not self._handling_typedef:
token = self._GetNextToken()
if token.token_type != tokenize.NAME:
assert token.token_type == tokenize.SYNTAX, token
assert token.name == ';', token
else:
new_class = class_type(class_token.start, class_token.end,
class_name, bases, None,
body, self.namespace_stack)
modifiers = []
return self._CreateVariable(class_token,
token.name, new_class,
modifiers, token.name, None)
else:
if not self._handling_typedef:
self.HandleError('non-typedef token', token)
self._AddBackToken(token)
return class_type(class_token.start, class_token.end, class_name,
bases, templated_types, body, self.namespace_stack)
def handle_namespace(self):
token = self._GetNextToken()
# Support anonymous namespaces.
name = None
if token.token_type == tokenize.NAME:
name = token.name
token = self._GetNextToken()
self.namespace_stack.append(name)
assert token.token_type == tokenize.SYNTAX, token
# Create an internal token that denotes when the namespace is complete.
internal_token = tokenize.Token(_INTERNAL_TOKEN, _NAMESPACE_POP,
None, None)
internal_token.whence = token.whence
if token.name == '=':
# TODO(nnorwitz): handle aliasing namespaces.
name, next_token = self.GetName()
assert next_token.name == ';', next_token
self._AddBackToken(internal_token)
else:
assert token.name == '{', token
tokens = list(self.GetScope())
# Replace the trailing } with the internal namespace pop token.
tokens[-1] = internal_token
# Handle namespace with nothing in it.
self._AddBackTokens(tokens)
return None
def handle_using(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert tokens
return Using(tokens[0].start, tokens[0].end, tokens)
def handle_explicit(self):
assert self.in_class
# Nothing much to do.
# TODO(nnorwitz): maybe verify the method name == class name.
# This must be a ctor.
return self.GetMethod(FUNCTION_CTOR, None)
def handle_this(self):
pass # Nothing to do.
def handle_operator(self):
# Pull off the next token(s?) and make that part of the method name.
pass
def handle_sizeof(self):
pass
def handle_case(self):
pass
def handle_switch(self):
pass
def handle_default(self):
token = self._GetNextToken()
assert token.token_type == tokenize.SYNTAX
assert token.name == ':'
def handle_if(self):
pass
def handle_else(self):
pass
def handle_return(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
if not tokens:
return Return(self.current_token.start, self.current_token.end, None)
return Return(tokens[0].start, tokens[0].end, tokens)
def handle_goto(self):
tokens = self._GetTokensUpTo(tokenize.SYNTAX, ';')
assert len(tokens) == 1, str(tokens)
return Goto(tokens[0].start, tokens[0].end, tokens[0].name)
def handle_try(self):
pass # Not needed yet.
def handle_catch(self):
pass # Not needed yet.
def handle_throw(self):
pass # Not needed yet.
def handle_while(self):
pass
def handle_do(self):
pass
def handle_for(self):
pass
def handle_break(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def handle_continue(self):
self._IgnoreUpTo(tokenize.SYNTAX, ';')
def BuilderFromSource(source, filename):
"""Utility method that returns an AstBuilder from source code.
Args:
source: 'C++ source code'
filename: 'file1'
Returns:
AstBuilder
"""
return AstBuilder(tokenize.GetTokens(source), filename)
def PrintIndentifiers(filename, should_print):
"""Prints all identifiers for a C++ source file.
Args:
filename: 'file1'
should_print: predicate with signature: bool Function(token)
"""
source = utils.ReadFile(filename, False)
if source is None:
sys.stderr.write('Unable to find: %s\n' % filename)
return
#print('Processing %s' % actual_filename)
builder = BuilderFromSource(source, filename)
try:
for node in builder.Generate():
if should_print(node):
print(node.name)
except KeyboardInterrupt:
return
except:
pass
def PrintAllIndentifiers(filenames, should_print):
"""Prints all identifiers for each C++ source file in filenames.
Args:
filenames: ['file1', 'file2', ...]
should_print: predicate with signature: bool Function(token)
"""
for path in filenames:
PrintIndentifiers(path, should_print)
def main(argv):
for filename in argv[1:]:
source = utils.ReadFile(filename)
if source is None:
continue
print('Processing %s' % filename)
builder = BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# Already printed a warning, print the traceback and continue.
traceback.print_exc()
else:
if utils.DEBUG:
for ast in entire_ast:
print(ast)
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "31102fcd0600ce7b4edf4c468404371e",
"timestamp": "",
"source": "github",
"line_count": 1716,
"max_line_length": 82,
"avg_line_length": 36.20804195804196,
"alnum_prop": 0.539278644198735,
"repo_name": "NervanaSystems/aeon",
"id": "4e0aca423144de214cb7b9e538318c8aee5c0ec0",
"size": "62772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gtest/googlemock/scripts/generator/cpp/ast.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17477"
},
{
"name": "C++",
"bytes": "4814939"
},
{
"name": "CMake",
"bytes": "57609"
},
{
"name": "M4",
"bytes": "25387"
},
{
"name": "Makefile",
"bytes": "25787"
},
{
"name": "Python",
"bytes": "462948"
},
{
"name": "Shell",
"bytes": "26792"
}
],
"symlink_target": ""
} |
def extractAviantlWordpressCom(item):
'''
Parser for 'aviantl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "00187a772eeef7acc2a39f39f78cc65c",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6311030741410488,
"repo_name": "fake-name/ReadableWebProxy",
"id": "2bb285743b98dfc3f842e7d7f2566376b4c2fbb1",
"size": "554",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractAviantlWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import warnings
import sys
import argparse
from etk.extractors.cve_extractor import CVEExtractor
cve_extractor = CVEExtractor()
def add_arguments(parser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
parser.description = 'Examples:\n' \
'python -m etk bitcoin_address_extractor /tmp/input.txt\n' \
'cat /tmp/input.txt | python -m etk bitcoin_address_extractor'
parser.add_argument('input_file', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
def run(args):
"""
Args:
args (argparse.Namespace)
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for line in args.input_file:
extractions = cve_extractor.extract(line)
for e in extractions:
print(e.value)
| {
"content_hash": "12df8e537b3005a362f5349ad7964dcc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 96,
"avg_line_length": 26.303030303030305,
"alnum_prop": 0.6105990783410138,
"repo_name": "usc-isi-i2/etk",
"id": "e2200b514d7a06fe8b8a6d55ac816a0561e2e136",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etk/cli/cve_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "1048891"
},
{
"name": "Julia",
"bytes": "874347"
},
{
"name": "Jupyter Notebook",
"bytes": "123779"
},
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "807682"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
import sahara.exceptions as e
from sahara.i18n import _
from sahara.service.edp import api
from sahara.utils import edp
JOB_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 50,
"format": "valid_name"
},
"description": {
"type": "string"
},
"type": {
"type": "string",
"enum": edp.JOB_TYPES_ALL,
},
"mains": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "string",
"minLength": 1,
}
},
"libs": {
"type": "array",
"uniqueItems": True,
"items": {
"type": "string",
"minLength": 1,
}
},
"streaming": {
"type": "boolean"
}
},
"additionalProperties": False,
"required": [
"name",
"type",
]
}
def _check_binaries(values):
for job_binary in values:
if not api.get_job_binary(job_binary):
raise e.NotFoundException(job_binary,
_("Job binary '%s' does not exist"))
def check_mains_libs(data, **kwargs):
mains = data.get("mains", [])
libs = data.get("libs", [])
job_type, subtype = edp.split_job_type(data.get("type"))
streaming = (job_type == edp.JOB_TYPE_MAPREDUCE and
subtype == edp.JOB_SUBTYPE_STREAMING)
# These types must have a value in mains and may also use libs
if job_type in [edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE,
edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK]:
if not mains:
if job_type == edp.JOB_TYPE_SPARK:
msg = _(
"%s job requires main application jar") % data.get("type")
else:
msg = _("%s flow requires main script") % data.get("type")
raise e.InvalidDataException(msg)
# Check for overlap
if set(mains).intersection(set(libs)):
raise e.InvalidDataException(_("'mains' and 'libs' overlap"))
else:
# Java and MapReduce require libs, but MapReduce.Streaming does not
if not streaming and not libs:
raise e.InvalidDataException(_("%s flow requires libs") %
data.get("type"))
if mains:
raise e.InvalidDataException(_("%s flow does not use mains") %
data.get("type"))
# Make sure that all referenced binaries exist
_check_binaries(mains)
_check_binaries(libs)
| {
"content_hash": "c306af4f04d252e0c8a08fcf78f876c1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 30.49438202247191,
"alnum_prop": 0.4848931466470155,
"repo_name": "redhat-openstack/sahara",
"id": "3d51c9673e33ebce349ef74a162383e4977acbb5",
"size": "3297",
"binary": false,
"copies": "4",
"ref": "refs/heads/master-patches",
"path": "sahara/service/validations/edp/job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3609"
},
{
"name": "Mako",
"bytes": "1528"
},
{
"name": "PigLatin",
"bytes": "792"
},
{
"name": "Python",
"bytes": "2771576"
},
{
"name": "Shell",
"bytes": "42673"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
# Create your views here.
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) | {
"content_hash": "e76962821410aab2e6ce9e37cbe34227",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 82,
"avg_line_length": 33.29824561403509,
"alnum_prop": 0.6743940990516333,
"repo_name": "kapucko/fit2gether",
"id": "b707c2c5aa54020b9d1d57bdde4e1bc381bdbad3",
"size": "1898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45817"
},
{
"name": "HTML",
"bytes": "25976"
},
{
"name": "JavaScript",
"bytes": "90276"
},
{
"name": "Python",
"bytes": "50624"
}
],
"symlink_target": ""
} |
import rospy
from monitored_navigation.monitor_state import MonitorState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from std_msgs.msg import Bool
class MonitorStuckOnCarpet(MonitorState):
def __init__(self):
self.goal_z=0
self.current_z=0
self.n_fails=0
self.MAX_FAILS=100
rospy.Subscriber("/cmd_vel", Twist, self.vel_callback)
rospy.Subscriber("/odom", Odometry, self.odom_callback)
self.pub = rospy.Publisher('/monitored_navigation/stuck_on_carpet', Bool, queue_size=1)
self.pub_msg=Bool(False)
MonitorState.__init__(self, "/monitored_navigation/stuck_on_carpet", Bool,self.monitor_cb, input_keys=['n_fails'], output_keys=['n_fails'])
def vel_callback(self,msg):
self.goal_z=msg.angular.z
if self.goal_z != 0 and self.current_z==0:
self.n_fails=self.n_fails+1
else:
self.n_fails=0
if self.n_fails>self.MAX_FAILS:
self.pub_msg.data=True
else:
self.pub_msg.data=False
self.pub.publish(self.pub_msg)
def odom_callback(self,msg):
self.current_z=msg.twist.twist.angular.z
""" Test the message and decide exit or not """
def monitor_cb(self, ud, msg):
if msg.data:
ud.n_fails+=1
return not msg.data
| {
"content_hash": "2a5b7a7abc3df7de6b119b89c24033b8",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 147,
"avg_line_length": 25.8,
"alnum_prop": 0.5983086680761099,
"repo_name": "strands-project/strands_recovery_behaviours",
"id": "fe09b64fb2cf01267fb540d32d761d52464efc07",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/hydro-devel",
"path": "strands_monitored_nav_states/src/strands_monitored_nav_states/monitor_stuck_on_carpet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8990"
},
{
"name": "CMake",
"bytes": "19939"
},
{
"name": "Python",
"bytes": "65303"
}
],
"symlink_target": ""
} |
import matplotlib.pylab as plt
import numpy as np
from renderer import PlottingFun as pf
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
pf.AxisFormat()
ax.plot(np.arange(0,100), np.arange(0,100), 'b-', linewidth =2)
plt.show() | {
"content_hash": "ee45cc6a0d6366e8d3f24e00b0045113",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.692,
"repo_name": "bps10/emmetrop",
"id": "7380c83ae34bf7ea596ecd9e9640b80b0e9d15c5",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/pyplots/AxisFormatDemo2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8170"
},
{
"name": "DOT",
"bytes": "4917"
},
{
"name": "Python",
"bytes": "165616"
},
{
"name": "Shell",
"bytes": "5109"
}
],
"symlink_target": ""
} |
"""
Bayesian Estimation of MSMs
< http://msmbuilder.org/latest/examples/bayesian-msm.html>
"""
import numpy as np
from matplotlib import pyplot as plt
plt.style.use("ggplot")
from mdtraj.utils import timing
from msmbuilder.example_datasets import load_doublewell
from msmbuilder.cluster import NDGrid
from msmbuilder.msm import BayesianMarkovStateModel, MarkovStateModel
trjs = load_doublewell(random_state=0)['trajectories']
plt.hist(np.concatenate(trjs), bins=50, log=True)
plt.ylabel('Frequency')
plt.show()
| {
"content_hash": "81f69f58b4465adb21a516831e7c62ee",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 25.8,
"alnum_prop": 0.7926356589147286,
"repo_name": "jeiros/Scripts",
"id": "dfc34539ce9de5d350acd9e8400063b383caa58c",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MSManalysis/examples/bayesian_estimation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "385"
},
{
"name": "DIGITAL Command Language",
"bytes": "1886"
},
{
"name": "Python",
"bytes": "130032"
},
{
"name": "R",
"bytes": "38505"
},
{
"name": "Shell",
"bytes": "55655"
},
{
"name": "Vim script",
"bytes": "96"
}
],
"symlink_target": ""
} |
'''
Created on Apr 21, 2014
@author: ronald
'''
import math
from counter import *
LOG = 'logaritmic'
SQRT = 'sqrt'
TRUNC = 'truncation'
def makeJSON(NG):
""" input: (str) formato: lista de {name:ngram,size:score}
output: string en formato json
descrip: construye dict tipo json
"""
json = []
for (k,v) in NG.iteritems():
req = ' '.join(k)
json.append('{"name":"%s","size":%d}' % ( req,round(v) ) )
json = "[" + ',\n'.join(json) + "]"
return json
def scale(NG,out_range=[0,75], scales=[], inflectionTH=20, width=0.1):
""" input: dict ngram:TFIDF | rango de valores de salida | scalas a aplicar | umbral de inflexion
output: lista de tuples (scaled,ngram)
descrip: escalamiento (log + truncation) y redondea los pesos para poder graficarlos
scale infletion thresh: 20
"""
temp = []
for (k,v) in NG.iteritems():
temp.append( (v,k) )
NG = temp
NG.sort(reverse=True)
#escalamiento logaritmico
if LOG in scales:
NG = [ (math.log(k[0]),k[1]) for k in NG ]
if SQRT in scales:
NG = [ (math.sqrt(k[0]),k[1]) for k in NG ]
# escalar tfidf -> [1-75] (default)
scaled = []
# descendente
mn = NG[-1][0]
mx = NG[0][0]
domain = [mn,mx]
for ngram in reversed(NG):
w = ngram[0]
newW = 1.0*(out_range[1]-out_range[0])*(w-domain[0])/float(domain[1]-domain[0]) + out_range[0]
scaled.append( newW )
if TRUNC in scales:
inflection_point = -1
for i in range(len(scaled)-1,0,-1):
if( scaled[i]<inflectionTH ): #scale inflection threshold
inflection_point = i
break
if(inflection_point != -1):
fixed = []
for i in range(inflection_point+1):
#temp = scaled[i]
fixed.append( width*scaled[inflection_point] *
( (scaled[i] - scaled[0])/(scaled[inflection_point]-scaled[0]) )**3 +
(1 - width)*scaled[inflection_point] )
for i in range(inflection_point+1):
scaled[i] = fixed[i]
scaled = list(reversed(scaled))
res = {}
for (i,v) in enumerate(NG):
insert(res,tuple(v[1]),scaled[i])
return res
def makeText(NG, separator=':'):
""" input: dict ngram:score
output: string formato ngram:NUM
descrip: construye texto para usar en pag. de wordcloud. score entrada debe ser float
"""
res = []
temp = [(v,k) for (k,v) in NG.iteritems()]
temp.sort(reverse=True)
for t in temp:
k = t[1]
v = t[0]
req = ' '.join(k)
res.append('%s%s%.6f' % ( req,separator,v ) )
res = '\n'.join(res)
return res
def writeOutput(source_dir,results_dir,scaling=False,scales=[],normalize=False,handfiltered=True,score=FREQ_DOC, join=False,text=True,json=True):
""" input: source_dir : (str) path absoluto de .csv filtrados a mano
results_dir: (str) path absoluto donde grabar resultados
scaling : (bool) aplicar scalamiento
scales : (list) identificadores de scalamientos a aplicar
normalize : normalizar scores de listas (usado en tf-idf)
handfiltered : (bool) usar lista filtrada a mano (True) | usar lista original de ngramas (False)
join : (bool) escribir archivo con lista unida de ngramas
text : (bool) escribir docs .txt con formato para WordClouds Online (wordle,...)
json : (bool) escribir docs formato json
output: None
"""
unigrams = []
bigrams = []
trigrams = []
if not handfiltered:
# Usando originales
if score == FREQ_DOC or score == FREQ_TOTAL:
unigrams = readLatin(os.path.join(source_dir,'freq_unigrams.csv')).split('\n')
bigrams = readLatin(os.path.join(source_dir,'freq_bigrams.csv' )).split('\n')
trigrams = readLatin(os.path.join(source_dir,'freq_trigrams.csv')).split('\n')
else:
# score = TFIF
unigrams = readLatin(os.path.join(source_dir,'tfidf_unigrams.csv')).split('\n')
bigrams = readLatin(os.path.join(source_dir,'tfidf_bigrams.csv' )).split('\n')
trigrams = readLatin(os.path.join(source_dir,'tfidf_trigrams.csv')).split('\n')
unigrams = readList(unigrams,header=True)
bigrams = readList(bigrams,header=True)
trigrams = readList(trigrams,header=True)
else:
# usando hand-filtered
unigrams = readLatin(os.path.join(source_dir,'ug_handfiltered.csv')).split('\n')
bigrams = readLatin(os.path.join(source_dir,'bg_handfiltered.csv')).split('\n')
trigrams = readLatin(os.path.join(source_dir,'tg_handfiltered.csv')).split('\n')
unigrams = readList(unigrams)
bigrams = readList(bigrams)
trigrams = readList(trigrams)
joined = []
if normalize:
unigrams = normalizeFeature(unigrams)
bigrams = normalizeFeature(bigrams)
trigrams = normalizeFeature(trigrams)
if join:
joined = joinFeatures(unigrams, bigrams, trigrams)
if scaling:
unigrams = scale(unigrams,scales=scales)
bigrams = scale(bigrams ,scales=scales)
trigrams = scale(trigrams,scales=scales)
joined = scale(joined,scales=scales)
####################################################################################
if json:
jsonUG = makeJSON(unigrams)
jsonBG = makeJSON(bigrams)
jsonTG = makeJSON(trigrams)
jsonJ = makeJSON(joined)
open(os.path.join(results_dir ,'unigram.json'),'w').write(jsonUG)
open(os.path.join(results_dir ,'bigram.json' ),'w').write(jsonBG)
open(os.path.join(results_dir ,'trigram.json'),'w').write(jsonTG)
if json:
open(os.path.join(results_dir ,'joined.json' ),'w').write(jsonJ)
if text:
textUG = makeText(unigrams)
textBG = makeText(bigrams)
textTG = makeText(trigrams)
textJ = makeText(joined)
open(os.path.join(results_dir ,'unigram.txt'),'w').write(textUG)
open(os.path.join(results_dir ,'bigram.txt' ),'w').write(textBG)
open(os.path.join(results_dir ,'trigram.txt'),'w').write(textTG)
if json:
open(os.path.join(results_dir ,'joined.txt' ),'w').write(textJ)
if __name__=="__main__":
# results & source dirs
source_mit = os.path.join(UTIL_DIR,'syllabus/results/MIT')
source_uni = os.path.join(UTIL_DIR,'syllabus/results/UNI')
""" Cambia handfiltered = True cuando tengas la lista filtrada a mano
*formato nombre de lista filtrada a mano:
"<ug,bg,tg>_handfiltered.csv"
"""
writeOutput(source_mit,source_mit,handfiltered=False,score=TFIDF,join=True,json=True)
writeOutput(source_uni,source_uni,handfiltered=False,score=TFIDF,join=True,json=True)
results_mercado = os.path.join(UTIL_DIR,'syllabus/mercado')
writeOutput(results_mercado,results_mercado,handfiltered=False,score=TFIDF, join=True,json=True)
"""
# solo para joined _ raw data
OUTPUT_DIR = ''
open(OUTPUT_DIR + 'joined_rawdata.txt' ,'w').write(textJ)
""" | {
"content_hash": "06438d8ef6a5d9076da09ceb34c592c6",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 145,
"avg_line_length": 35.83173076923077,
"alnum_prop": 0.5727894807460083,
"repo_name": "ronaldahmed/labor-market-demand-analysis",
"id": "26b7a08358499df54245e3e05fb466d034fed28d",
"size": "7453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rule based major_extractor/libraries/tuple2json.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "125767"
},
{
"name": "CSS",
"bytes": "32608"
},
{
"name": "HTML",
"bytes": "235912337"
},
{
"name": "JavaScript",
"bytes": "23952"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Python",
"bytes": "1028412"
},
{
"name": "R",
"bytes": "38334"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exercise', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='exercise',
old_name='type_of_game',
new_name='type_of_exercise',
),
migrations.AddField(
model_name='plan',
name='name',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| {
"content_hash": "a183eb01321444057f5da07d3c471c29",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 23.565217391304348,
"alnum_prop": 0.559040590405904,
"repo_name": "DADco/convalesense-web",
"id": "6011273fc3ccbc7e5071781e93deed54bd81e0b5",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convalesense/exercise/migrations/0002_auto_20170126_1124.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5264"
},
{
"name": "HTML",
"bytes": "30851"
},
{
"name": "JavaScript",
"bytes": "3917"
},
{
"name": "Python",
"bytes": "80988"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
} |
"""Tests for f_net.input_pipeline."""
import functools
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow_datasets as tfds
from f_net import input_pipeline
import sentencepiece as spm
class MockTokenizer(spm.SentencePieceProcessor):
"""Mock tokenizer returning pre-specified tokens."""
def EncodeAsIds(self, text):
del text # Ignore input and return dummy output
return np.random.randint(5, 20, size=3)
def pad_id(self):
return 1
def eos_id(self):
return 2
def bos_id(self):
return 3
def PieceToId(self, text):
del text # Ignore input and return dummy output
return np.random.randint(5, 20)
def GetPieceSize(self):
return 20
class InputPipelineTest(parameterized.TestCase):
@parameterized.parameters("glue/cola", "glue/sst2", "glue/mrpc", "glue/qqp",
"glue/stsb", "glue/mnli", "glue/qnli", "glue/rte",
"glue/wnli")
def test_glue_inputs(self, dataset_name):
batch_size = 2
max_seq_length = 4
glue_pipeline = functools.partial(
input_pipeline.glue_inputs,
split=tfds.Split.TRAIN,
batch_size=batch_size,
tokenizer=MockTokenizer(),
max_seq_length=max_seq_length)
with tfds.testing.mock_data(num_examples=10):
for batch, _ in zip(glue_pipeline(dataset_name=dataset_name), range(1)):
self.assertSetEqual(
set(batch.keys()), {"input_ids", "type_ids", "idx", "label"})
self.assertTupleEqual(batch["input_ids"].shape,
(batch_size, max_seq_length))
self.assertTupleEqual(batch["type_ids"].shape,
(batch_size, max_seq_length))
self.assertTupleEqual(batch["idx"].shape, (batch_size,))
self.assertTupleEqual(batch["label"].shape, (batch_size,))
# TODO(b/181607810): Modify C4 pipeline to load smaller batches of text
# so that we can test it.
if __name__ == "__main__":
absltest.main()
| {
"content_hash": "021082b88c4d3416122a7ee723236e87",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 29.52857142857143,
"alnum_prop": 0.6415094339622641,
"repo_name": "google-research/google-research",
"id": "04529373f2c02e8ea41d2e1b942d935b654b077f",
"size": "2675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f_net/input_pipeline_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
import json
import gconf
import MySQLdb
from dbutils import execute_sql
def get_users():
sql = 'select * from user'
rt = []
columns=("id","username","password","age")
count, rt_list = execute_sql(sql)
for line in rt_list:
rt.append(dict(zip(columns, line)))
return rt
def validate_login(username, password):
sql = 'select * from user where username=%s and password=md5(%s)'
args = (username,password)
count, rt_list = execute_sql(sql,args)
return count != 0
def validate_user(username):
sql = 'select * from user where username=%s'
args = username
count, rt_list = execute_sql(sql,args)
return count == 0
def get_user(username):
users = get_users()
user_dict={}
for user in users:
if user.get('username') == username:
user_dict['username'] = username
user_dict['password'] = user['password']
user_dict['age'] = user['age']
return user_dict
def user_add(username,password,age):
if validate_user(username):
sql = 'insert into user(username, password, age) values(%s, md5(%s), %s)'
args = (username, password, age)
fetch = False
count, rt_list = execute_sql(sql, args, fetch)
return count != 0
else:
return False
def user_del(username):
sql = 'delete from user where id = (select id from (select id from user where username = %s) temp)'
args = username
fetch = False
count, rt_list = execute_sql(sql, args, fetch)
return count != 0
def user_edit(username, password, age):
sql = 'update user set password=md5(%s), age=%s where id = (select id from (select id from user where username = %s) temp)'
args = (password, age, username)
fetch = False
count, rt_list = execute_sql(sql, args, fetch)
return count != 0
if __name__ == '__main__':
print user_edit(username='hjun', password='222', age=666) | {
"content_hash": "30112fd472a7015e04a9e9c75ad62309",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 124,
"avg_line_length": 28.548387096774192,
"alnum_prop": 0.672316384180791,
"repo_name": "51reboot/actual_09_homework",
"id": "2bd6fd9b280abcbbfadd28a9a32f8a61a41376a0",
"size": "1788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "06/hjun/app1/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.