text stringlengths 4 1.02M | meta dict |
|---|---|
test.assert_equals(compute_sum(1), 1)
test.assert_equals(compute_sum(2), 3)
test.assert_equals(compute_sum(3), 6)
test.assert_equals(compute_sum(4), 10)
test.assert_equals(compute_sum(10), 46)
test.assert_equals(compute_sum(12), 51)
| {
"content_hash": "1e7f250b3904d7e84fe64eb9de1dc7a2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 38.833333333333336,
"alnum_prop": 0.7424892703862661,
"repo_name": "RevansChen/online-judge",
"id": "70c78848ce2f93e2cf1b501fa870a7d6e19e2c96",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Codewars/6kyu/twisted-sum/Python/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Brainfuck",
"bytes": "102"
},
{
"name": "C",
"bytes": "6829"
},
{
"name": "C#",
"bytes": "19758"
},
{
"name": "C++",
"bytes": "9439"
},
{
"name": "Clojure",
"bytes": "75"
},
{
"name": "CoffeeScript",
"bytes": "903"
},
{
"name": "Crystal",
"bytes": "52"
},
{
"name": "Dart",
"bytes": "182"
},
{
"name": "Elixir",
"bytes": "1027"
},
{
"name": "Erlang",
"bytes": "132"
},
{
"name": "F#",
"bytes": "40"
},
{
"name": "Go",
"bytes": "83"
},
{
"name": "Haskell",
"bytes": "102"
},
{
"name": "Java",
"bytes": "11057"
},
{
"name": "JavaScript",
"bytes": "44773"
},
{
"name": "Kotlin",
"bytes": "82"
},
{
"name": "Lua",
"bytes": "93"
},
{
"name": "PHP",
"bytes": "2875"
},
{
"name": "Python",
"bytes": "563400"
},
{
"name": "R",
"bytes": "265"
},
{
"name": "Ruby",
"bytes": "7171"
},
{
"name": "Rust",
"bytes": "74"
},
{
"name": "Scala",
"bytes": "84"
},
{
"name": "Shell",
"bytes": "438"
},
{
"name": "Swift",
"bytes": "6597"
},
{
"name": "TSQL",
"bytes": "3531"
},
{
"name": "TypeScript",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from django.contrib.auth.views import login, logout
from django.core.urlresolvers import resolve, reverse
from django.test import TestCase
from accounts.forms import UserForm, UserEditFrom, UserResisterFrom
from accounts.models import User
from accounts.views import register_user, edit_user, view_user_data
class UrlResolveTestsUser(TestCase):
def test_url_resolves_to_login(self):
found = resolve('/login/')
self.assertEqual(found.func, login)
def test_url_resolves_to_logout(self):
found = resolve('/logout/')
self.assertEqual(found.func, logout)
def test_url_resolves_to_register(self):
found = resolve('/register/')
self.assertEqual(found.func, register_user)
def test_url_resolves_to_edit(self):
found = resolve('/register/1/')
self.assertEqual(found.func, edit_user)
def test_url_resolves_to_user_data(self):
found = resolve('/user/')
self.assertEqual(found.func, view_user_data)
class UserModelsTest(TestCase):
def create_user(self, username='', password=''):
user = User(username=username)
user.set_password(password)
user.save()
def test_user_is_empty(self):
users = User.objects.all()
self.assertEqual(users.count(), 0)
def test_user_is_not_empty(self):
username = 'test_user'
password = 'test_password'
self.create_user(username, password)
users = User.objects.all()
self.assertEqual(users.count(), 1)
def test_user_saving_and_retrieving(self):
username = 'test_user'
password = 'test_password'
self.create_user(username=username, password=password)
user = User.objects.all()[0]
self.assertEqual(user.username, username)
self.assertIs(user.check_password(password), True)
class UserFormTest(TestCase):
def test_valid(self):
params = dict(username='test_user', password='test_password',
first_name='test_first', last_name='test_last')
user = User()
form = UserForm(params, instance=user)
self.assertTrue(form.is_valid())
def test_invalid(self):
params = dict()
user = User()
form = UserForm(params, instance=user)
self.assertFalse(form.is_valid())
class UserEditFormTest(TestCase):
def test_valid(self):
params = dict(username='test_user', first_name='test_first', last_name='test_last')
form = UserEditFrom(params)
self.assertTrue(form.is_valid())
def test_invalid(self):
params = dict()
form = UserEditFrom(params)
self.assertFalse(form.is_valid())
class UserResisterFromTest(TestCase):
def test_valid(self):
params = dict(username='test_user', first_name='test_first', last_name='test_last',
password1='test_password', password2='test_password')
form = UserResisterFrom(params)
self.assertTrue(form.is_valid())
def test_invalid(self):
params = dict()
form = UserResisterFrom(params)
self.assertFalse(form.is_valid())
def test_invalid_of_password(self):
params = dict(username='test_user', first_name='test_first', last_name='test_last',
password1='test_password', password2='test_password_mistakes')
form = UserResisterFrom(params)
self.assertFalse(form.is_valid())
class ViewTest(TestCase):
def test_status_in_register_view(self):
response = self.client.get(reverse('register'))
self.assertEqual(response.status_code, 200)
def test_status_in_edit_view(self):
response = self.client.get(reverse('edit', kwargs={'user_id': 1}), follow=True)
self.assertEqual(response.status_code, 200)
| {
"content_hash": "1c316c263445aeb7fa1958a998c23c34",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 91,
"avg_line_length": 34.0990990990991,
"alnum_prop": 0.6457067371202113,
"repo_name": "hayashizakitakaaki/Introduction_mysite",
"id": "0c84b26974637fb983a39326ed8c86b33afb3406",
"size": "3809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1764"
},
{
"name": "HTML",
"bytes": "32756"
},
{
"name": "JavaScript",
"bytes": "160"
},
{
"name": "Python",
"bytes": "96749"
}
],
"symlink_target": ""
} |
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import numpy as np
import tvm
from .. import symbol as _sym
from .._base import string_types
from .common import SymbolTable
__all__ = ['from_coreml']
def NeuralNetworkImageScaler(op, insym, symtab):
# this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
bias = symtab.new_const(biases)
ret = _sym.__mul_scalar__(insym, scalar=op.channelScale)
ret = _sym.broadcast_add(ret, bias)
return ret
def NeuralNetworkMeanImage(op, insym, symtab):
# this changes the symbol
ret = _sym.elemwise_sub(insym, scalar=op.meanImage)
return ret
def ConvolutionLayerParams(op, insym, symtab):
"""Convolution layer params."""
weights = symtab.new_const(np.array(list(op.weights.floatValue)).reshape(
tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))))
if op.hasBias:
biases = symtab.new_const(list(op.bias.floatValue))
dilation = list(op.dilationFactor)
if not dilation:
dilation = [1, 1]
params = {'channels':op.outputChannels,
'kernel_size':list(op.kernelSize),
'strides':list(op.stride),
'dilation': dilation,
'use_bias': op.hasBias,
'groups':op.nGroups}
if op.WhichOneof('ConvolutionPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j, "Asymmetry padding not supported"
if padding:
params['padding'] = padding
elif op.WhichOneof('ConvolutionPaddingType') == 'same':
kernel = params['kernel_size']
pad_h = kernel[0] - 1
pad_w = kernel[1] - 1
pad_t = pad_h // 2
pad_l = pad_w // 2
pad_b = pad_h - pad_t
pad_r = pad_w - pad_l
assert pad_t == pad_r and pad_l == pad_b, "Asymmetry padding not supported"
params['padding'] = [pad_t, pad_l]
else:
raise NotImplementedError("Valid/Same convolution padding implemented")
if op.hasBias:
pos = [insym, weights, biases]
else:
pos = [insym, weights]
if op.isDeconvolution:
ret = _sym.conv2d_transpose(*pos, **params)
else:
ret = _sym.conv2d(*pos, **params)
# consume padding layer
if symtab.in_padding:
params['padding'] = [sum(x) for x in zip(params.get('padding', [0, 0]), symtab.paddings)]
symtab.clear_padding()
return ret
def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise NotImplementedError("instance normalization not implemented")
else:
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
'beta':symtab.new_const(list(op.beta.floatValue)),
'moving_mean':symtab.new_const(list(op.mean.floatValue)),
'moving_var': symtab.new_const(list(op.variance.floatValue)),
'epsilon': op.epsilon}
return _sym.batch_norm(data=insym, **params)
def ActivationParams(op, insym, symtab):
"""Get activation parameters"""
whichActivation = op.WhichOneof('NonlinearityType')
par = getattr(op, whichActivation)
if whichActivation == 'linear':
return _sym.__add_scalar__(_sym.__mul_scalar__(insym, scalar=par.alpha), scalar=par.beta)
elif whichActivation == 'ReLU':
return _sym.relu(insym)
elif whichActivation == 'leakyReLU':
return _sym.leaky_relu(insym, alpha=par.alpha)
elif whichActivation == 'thresholdedReLU':
alpha_tensor = _sym.full_like(insym, fill_value=float(par.alpha))
return _sym.elemwise_mul(insym, _sym.greater(insym, alpha_tensor))
elif whichActivation == 'PReLU':
return _sym.prelu(insym, alpha=par.alpha)
elif whichActivation == 'tanh':
return _sym.tanh(insym)
elif whichActivation == 'scaledTanh':
return _sym.__mul_scalar__(_sym.tanh(_sym.__mul_scalar__(
insym, scalar=par.beta)), scalar=par.alpha)
elif whichActivation == 'sigmoid':
return _sym.sigmoid(insym)
elif whichActivation == 'sigmoidHard':
transformX = (par.alpha * insym) + par.beta
return _sym.clip(transformX, a_min=0, a_max=1)
elif whichActivation == 'ELU':
return _sym.__mul_scalar__(_sym.__add_scalar__(
_sym.exp(insym), scalar=-1), scalar=par.alpha)
elif whichActivation == 'softsign':
return insym / (1 + (_sym.relu(insym) + _sym.relu(_sym.negative(insym))))
elif whichActivation == 'softplus':
return _sym.log(_sym.__add_scalar__(_sym.exp(insym), scalar=1))
elif whichActivation == 'parametricSoftplus':
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
return _sym.__mul_scalar__(_sym.log(_sym.__add_scalar__(
_sym.exp(insym), scalar=beta[0])), scalar=alpha[0])
alpha = np.array(alpha).reshape((len(alpha), 1, 1))
beta = np.array(beta).reshape((len(beta), 1, 1))
alphasym = symtab.new_const(alpha)
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
else:
raise NotImplementedError('%s not implemented' % whichActivation)
def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
scale = symtab.new_const(np.array(list(op.scale.floatValue)).reshape(
tuple(list(op.shapeScale) + [1, 1])))
# scale = _sym.reshape(scale, shape=tuple(list(op.shapeScale) + [1,1]))
ret = _sym.broadcast_mul(insym, scale)
if op.hasBias:
bias = symtab.new_const(np.array(list(op.bias.floatValue)).reshape(
tuple(list(op.shapeBias) + [1, 1])))
# bias = _sym.reshape(bias, shape=tuple(list(op.shapeBias) + [1,1]))
ret = _sym.broadcast_add(ret, bias)
return ret
def PoolingLayerParams(op, insym, symtab):
"""get pooling parameters"""
if op.globalPooling:
if op.type == 0:
return _sym.global_max_pool2d(insym)
elif op.type == 1:
return _sym.global_avg_pool2d(insym)
else:
raise NotImplementedError("Only max and average pooling implemented")
else:
params = {'pool_size':list(op.kernelSize),
'strides':list(op.stride)}
if op.WhichOneof('PoolingPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
params['padding'] = padding
elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel':
# I don't know if this is correct
valid = op.includeLastPixel
padding = list(valid.paddingAmounts)
params['padding'] = padding
params['ceil_mode'] = True
else:
raise NotImplementedError("Other convolution padding not implemented")
# consume padding layer
if symtab.in_padding:
params['padding'] = [sum(x) for x in zip(
params.get('padding', [0, 0]), symtab.paddings)]
symtab.clear_padding()
if op.type == 0:
return _sym.max_pool2d(insym, **params)
elif op.type == 1:
return _sym.avg_pool2d(insym, **params)
else:
raise NotImplementedError("Only max and average pooling implemented")
def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
def InnerProductLayerParams(op, insym, symtab):
weights = symtab.new_const(np.array(op.weights.floatValue).reshape(
(op.outputChannels, op.inputChannels)))
par = {'weight':weights, 'use_bias':False, 'units':op.outputChannels}
if op.hasBias:
bias = symtab.new_const(np.array(op.bias.floatValue))
par['bias'] = bias
par['use_bias'] = True
return _sym.dense(data=insym, **par)
def AddLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
ret = insyms[0]
for i in range(1, len(insyms)):
ret = _sym.elemwise_add(ret, insyms[i])
if op.alpha > 0:
ret = _sym.__add_scalar__(ret, scalar=op.alpha)
return ret
def MultiplyLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
ret = insyms[0]
for i in range(1, len(insyms)):
ret = _sym.elemwise_mul(ret, insyms[i])
if op.alpha != 1:
ret = _sym.__mul_scalar__(ret, scalar=op.alpha)
return ret
def ConcatLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
if op.sequenceConcat:
raise NotImplementedError("Sequence Concat not supported")
ret = _sym.concatenate(*insyms, axis=1)
return ret
def FlattenLayerParams(op, insym, symtab):
if op.mode == 1:
insym = _sym.transpose(_sym.reshape(insym, shape=(0, 0, -1)), axes=(0, 2, 1))
return _sym.flatten(insym)
def PaddingLayerParams(op, insym, symtab):
"""Hacking for padding layer params."""
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise NotImplementedError("Padding value {} not supported.".format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
symtab.set_padding(padding)
else:
raise NotImplementedError("Only constant padding is supported now.")
return insym
def PermuteLayerParams(op, insym, symtab):
axes = tuple(op.axis)
return _sym.transpose(insym, axes=axes)
def UpsampleLayerParams(op, insym, symtab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise NotImplementedError("Upsampling only supported with same \
height and width scaling factor.")
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)
def L2NormalizeLayerParams(op, insym, symtab):
return _sym.l2_normalize(insym, eps=op.epsilon, axis=1)
def LRNLayerParams(op, insym, symtab):
par = {}
par['size'] = op.localSize
par['bias'] = op.k
par['alpha'] = op.alpha
par['beta'] = op.beta
par['axis'] = 1 #default layout is nchw
return _sym.lrn(data=insym, **par)
def AverageLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(insyms)
_sum = insyms[0]
for i in range(1, count):
_sum = _sym.broadcast_add(_sum, insyms[i])
return _sum / count
def MaxLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = insyms[0]
for i in range(1, len(insyms)):
_max = _sym.broadcast_max(_max, insyms[i])
return _max
def MinLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = insyms[0]
for i in range(1, len(insyms)):
_min = _sym.broadcast_min(_min, insyms[i])
return _min
_convert_map = {
'NeuralNetworkMeanImage': NeuralNetworkMeanImage,
'NeuralNetworkImageScaler': NeuralNetworkImageScaler,
'ConvolutionLayerParams':ConvolutionLayerParams,
'BatchnormLayerParams':BatchnormLayerParams,
'ActivationParams':ActivationParams,
'ScaleLayerParams':ScaleLayerParams,
'PoolingLayerParams':PoolingLayerParams,
'SoftmaxLayerParams':SoftmaxLayerParams,
'InnerProductLayerParams':InnerProductLayerParams,
'AddLayerParams':AddLayerParams,
'MultiplyLayerParams':MultiplyLayerParams,
'FlattenLayerParams':FlattenLayerParams,
'ConcatLayerParams':ConcatLayerParams,
'PaddingLayerParams':PaddingLayerParams,
'PermuteLayerParams':PermuteLayerParams,
'UpsampleLayerParams':UpsampleLayerParams,
'L2NormalizeLayerParams':L2NormalizeLayerParams,
'LRNLayerParams':LRNLayerParams,
'AverageLayerParams':AverageLayerParams,
'MaxLayerParams':MaxLayerParams,
'MinLayerParams':MinLayerParams,
}
def coreml_op_to_nnvm(op, inname, outname, symtab):
"""Convert coreml layer to nnvm layer.
Parameters
----------
coremlop: a coreml protobuf bit
prevsym: previous nnvm symbol
Returns:
-------
nnvm.sym.Symbol
Converted symbol
"""
classname = type(op).__name__
if classname not in _convert_map:
raise NotImplementedError("%s is not supported" % (classname))
if isinstance(inname, string_types):
insym = symtab.get_var(inname)
else:
insym = [symtab.get_var(i) for i in inname]
ret = _convert_map[classname](op, insym, symtab)
if outname:
symtab.set_var(outname, ret)
if classname != 'PaddingLayerParams':
assert not symtab.in_padding, "Previous padding not consumed by conv/pool"
def from_coreml(model):
"""Convert from coreml model into NNVM format.
Parameters
----------
model:
coremltools.models.MLModel of a NeuralNetworkClassifier
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import coremltools as cm
except ImportError:
raise ImportError('The coremltools package must be installed')
assert isinstance(model, cm.models.MLModel)
spec = model.get_spec()
modeltype = spec.WhichOneof('Type')
assert modeltype in ['neuralNetworkClassifier', 'neuralNetwork', 'neuralNetworkRegressor']
cc = getattr(spec, modeltype)
symtab = SymbolTable()
for i in spec.description.input:
symtab.get_var(i.name, must_contain=False)
for pp in cc.preprocessing:
whichpp = pp.WhichOneof('preprocessor')
ppmethod = getattr(pp, whichpp)
# the NeuralNetworkImageScalar doesn't seem to have a featureName?
if whichpp == 'scaler':
for i in spec.description.input:
coreml_op_to_nnvm(ppmethod, i.name, i.name, symtab)
else:
coreml_op_to_nnvm(ppmethod, pp.featureName, pp.featureName, symtab)
for l in cc.layers:
layertype = l.WhichOneof('layer')
layerop = getattr(l, layertype)
assert len(l.output) == 1
if len(l.input) == 1:
coreml_op_to_nnvm(layerop, l.input[0], l.output[0], symtab)
else:
coreml_op_to_nnvm(layerop, list(l.input), l.output[0], symtab)
returns = [symtab.get_var(i.name, must_contain=False) for i in spec.description.output]
tvmparams = {k:tvm.nd.array(np.array(v, dtype=np.float32)) for k, v in symtab.params.items()}
# for now return first output
return returns[0], tvmparams
| {
"content_hash": "353974f9af8d97c51c50f22484467d80",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 97,
"avg_line_length": 37.799019607843135,
"alnum_prop": 0.6348722604072105,
"repo_name": "mlperf/training_results_v0.6",
"id": "7dfd54317b5542af9f15db32cda7dd9386e4ab63",
"size": "15470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/nnvm/python/nnvm/frontend/coreml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
import django.contrib.postgres.indexes
from django.db import migrations
import saleor.account.models
class Migration(migrations.Migration):
dependencies = [
("account", "0052_customerevent_app"),
]
operations = [
migrations.RemoveIndex(
model_name="user",
name="account_use_email_d707ff_gin",
),
migrations.AlterField(
model_name="address",
name="phone",
field=saleor.account.models.PossiblePhoneNumberField(
blank=True, db_index=True, default="", max_length=128, region=None
),
),
migrations.AddIndex(
model_name="address",
index=django.contrib.postgres.indexes.GinIndex(
fields=["first_name", "last_name", "city", "country"],
name="address_search_gin",
opclasses=[
"gin_trgm_ops",
"gin_trgm_ops",
"gin_trgm_ops",
"gin_trgm_ops",
],
),
),
migrations.AddIndex(
model_name="user",
index=django.contrib.postgres.indexes.GinIndex(
fields=["email", "first_name", "last_name"],
name="user_search_gin",
opclasses=["gin_trgm_ops", "gin_trgm_ops", "gin_trgm_ops"],
),
),
]
| {
"content_hash": "f903dca02a90dda231cbd5322ffd0b7f",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 82,
"avg_line_length": 30.652173913043477,
"alnum_prop": 0.5014184397163121,
"repo_name": "mociepka/saleor",
"id": "9714086a8c6d57b4d30306a958f536a7ebc03bcb",
"size": "1459",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "saleor/account/migrations/0053_auto_20210719_1048.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiArtifact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'checksum': 'str',
'id': 'str',
'names': 'list[str]'
}
attribute_map = {
'name': 'name',
'checksum': 'checksum',
'id': 'id',
'names': 'names'
}
def __init__(self, name=None, checksum=None, id=None, names=None): # noqa: E501
"""ApiArtifact - a model defined in Swagger""" # noqa: E501
self._name = None
self._checksum = None
self._id = None
self._names = None
self.discriminator = None
if name is not None:
self.name = name
if checksum is not None:
self.checksum = checksum
if id is not None:
self.id = id
if names is not None:
self.names = names
@property
def name(self):
"""Gets the name of this ApiArtifact. # noqa: E501
Name of the artifact. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. This field is deprecated in favor of the plural `names` field; it continues to exist here to allow existing BuildProvenance serialized to json in google.devtools.containeranalysis.v1alpha1.BuildDetails.provenance_bytes to deserialize back into proto. # noqa: E501
:return: The name of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ApiArtifact.
Name of the artifact. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. This field is deprecated in favor of the plural `names` field; it continues to exist here to allow existing BuildProvenance serialized to json in google.devtools.containeranalysis.v1alpha1.BuildDetails.provenance_bytes to deserialize back into proto. # noqa: E501
:param name: The name of this ApiArtifact. # noqa: E501
:type: str
"""
self._name = name
@property
def checksum(self):
"""Gets the checksum of this ApiArtifact. # noqa: E501
Hash or checksum value of a binary, or Docker Registry 2.0 digest of a container. # noqa: E501
:return: The checksum of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""Sets the checksum of this ApiArtifact.
Hash or checksum value of a binary, or Docker Registry 2.0 digest of a container. # noqa: E501
:param checksum: The checksum of this ApiArtifact. # noqa: E501
:type: str
"""
self._checksum = checksum
@property
def id(self):
"""Gets the id of this ApiArtifact. # noqa: E501
:return: The id of this ApiArtifact. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiArtifact.
:param id: The id of this ApiArtifact. # noqa: E501
:type: str
"""
self._id = id
@property
def names(self):
"""Gets the names of this ApiArtifact. # noqa: E501
Related artifact names. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. Note that a single Artifact ID can have multiple names, for example if two tags are applied to one image. # noqa: E501
:return: The names of this ApiArtifact. # noqa: E501
:rtype: list[str]
"""
return self._names
@names.setter
def names(self, names):
"""Sets the names of this ApiArtifact.
Related artifact names. This may be the path to a binary or jar file, or in the case of a container build, the name used to push the container image to Google Container Registry, as presented to `docker push`. Note that a single Artifact ID can have multiple names, for example if two tags are applied to one image. # noqa: E501
:param names: The names of this ApiArtifact. # noqa: E501
:type: list[str]
"""
self._names = names
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiArtifact, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiArtifact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "76a1ab0979ea5b9c8dcaf23f6a9133ff",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 481,
"avg_line_length": 33.964467005076145,
"alnum_prop": 0.5957256015543267,
"repo_name": "grafeas/client-python",
"id": "f70019c95c7be20517b76954cfb0e4af80ac4c4a",
"size": "6708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grafeas/models/api_artifact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "558375"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
""" Message Parsing
Template-specific Message Parsers are defined here.
@copyright: 2013-2020 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3Parser"]
from gluon import current
from s3.s3parser import S3Parsing
# =============================================================================
class S3Parser(object):
"""
Message Parsing Template.
"""
# -------------------------------------------------------------------------
@staticmethod
def parse_rss(message):
"""
Parse Feeds into the CMS Module
"""
db = current.db
s3db = current.s3db
table = s3db.msg_rss
record = db(table.message_id == message.message_id).select(table.channel_id,
table.title,
table.from_address,
table.body,
table.date,
table.location_id,
table.tags,
table.author,
limitby=(0, 1)
).first()
if not record:
return
post_table = s3db.cms_post
# Is this an Update or a Create?
body = record.body or record.title
url = record.from_address
if url:
doc_table = s3db.doc_document
exists = db(doc_table.url == url).select(doc_table.doc_id,
limitby=(0, 1)
).first()
if exists:
exists = db(post_table.doc_id == exists.doc_id).select(post_table.id,
limitby=(0, 1)
).first()
else:
# Use Body
exists = db(post_table.body == body).select(post_table.id,
limitby=(0, 1)
).first()
channel_id = record.channel_id
tags = record.tags
author = record.author
if author:
ptable = s3db.pr_person
# https://code.google.com/p/python-nameparser/
from nameparser import HumanName
name = HumanName(author)
first_name = name.first
middle_name = name.middle
last_name = name.last
query = (ptable.first_name == first_name) & \
(ptable.middle_name == middle_name) & \
(ptable.last_name == last_name)
pexists = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if pexists:
person_id = pexists.id
else:
person_id = ptable.insert(first_name = first_name,
middle_name = middle_name,
last_name = last_name)
s3db.update_super(ptable, dict(id=person_id))
else:
person_id = None
if exists:
post_id = exists.id
db(post_table.id == post_id).update(title = record.title,
body = body,
date = record.date,
location_id = record.location_id,
person_id = person_id,
)
# Read existing Tags (which came from remote)
ttable = db.cms_tag
ltable = db.cms_tag_post
query = (ltable.post_id == post_id) & \
(ltable.mci == 1) & \
(ltable.tag_id == ttable.id)
rows = db(query).select(ttable.name)
# Compare these to tags in current version of post
old_tags = [r.name for r in rows]
new_tags = []
delete_tags = []
for tag in tags:
if tag not in old_tags:
new_tags.append(tag)
for tag in old_tags:
if tag not in tags:
delete_tags.append(tag)
if new_tags or delete_tags:
lookup_tags = []
lookup_tags.extend(new_tags)
lookup_tags.extend(delete_tags)
_tags = db(ttable.name.belongs(lookup_tags)).select(ttable.id,
ttable.name,
).as_dict(key="name")
for t in new_tags:
tag = _tags.get(t, None)
if tag:
tag_id = tag["id"]
else:
tag_id = ttable.insert(name = t)
ltable.insert(post_id = post_id,
tag_id = tag_id,
mci = 1, # This is an imported record, not added natively
)
for t in delete_tags:
tag = _tags.get(t, None)
if tag:
query = (ltable.post_id == post_id) & \
(ltable.tag_id == tag["id"]) & \
(ltable.mci == 1) & \
(ltable.deleted == False)
db(query).delete()
else:
# Default to 'News' series
table = db.cms_series
series_id = db(table.name == "News").select(table.id,
cache=s3db.cache,
limitby=(0, 1)
).first().id
post_id = post_table.insert(title = record.title,
body = body,
date = record.date,
location_id = record.location_id,
person_id = person_id,
series_id = series_id,
mci = 1, # This is an imported record, not added natively
)
record = dict(id=post_id)
s3db.update_super(post_table, record)
# Source link
if url:
doc_table.insert(doc_id = record["doc_id"],
url = url,
)
# Is this feed associated with an Org/Network?
def lookup_pe(channel_id):
ctable = s3db.msg_rss_channel
channel_url = db(ctable.channel_id == channel_id).select(ctable.url,
limitby=(0, 1)
).first().url
ctable = s3db.pr_contact
ptable = s3db.pr_pentity
query = (ctable.contact_method == "RSS") & \
(ctable.value == channel_url) & \
(ctable.pe_id == ptable.pe_id)
pe = db(query).select(ptable.pe_id,
ptable.instance_type,
limitby=(0, 1)
).first()
if pe:
pe_type = pe.instance_type
otable = s3db[pe_type]
org_id = db(otable.pe_id == pe.pe_id).select(otable.id,
limitby=(0, 1),
).first().id
return pe_type, org_id
else:
return None, None
pe_type, org_id = current.cache.ram("pe_channel_%s" % channel_id,
lambda: lookup_pe(channel_id),
time_expire=120
)
if pe_type == "org_organisation":
s3db.cms_post_organisation.insert(post_id=post_id,
organisation_id=org_id,
)
elif pe_type == "org_group":
s3db.cms_post_organisation_group.insert(post_id=post_id,
group_id=org_id,
)
if tags:
ttable = db.cms_tag
ltable = db.cms_tag_post
_tags = db(ttable.name.belongs(tags)).select(ttable.id,
ttable.name,
).as_dict(key="name")
for t in tags:
tag = _tags.get(t, None)
if tag:
tag_id = tag["id"]
else:
tag_id = ttable.insert(name = t)
ltable.insert(post_id = post_id,
tag_id = tag_id,
mci = 1, # This is an imported record, not added natively
)
# No Reply
return
# END =========================================================================
| {
"content_hash": "fe291a14066a5148b80dd9d4a44c9af6",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 97,
"avg_line_length": 44.93172690763052,
"alnum_prop": 0.38130139435109045,
"repo_name": "flavour/eden",
"id": "a8befa9be09358a3199e88378bf04ae0a9da34b9",
"size": "11257",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "modules/templates/historic/MAVC/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "3351335"
},
{
"name": "HTML",
"bytes": "1367727"
},
{
"name": "JavaScript",
"bytes": "20109418"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "31407527"
},
{
"name": "Ruby",
"bytes": "8291"
},
{
"name": "Shell",
"bytes": "5059"
},
{
"name": "XSLT",
"bytes": "3274119"
}
],
"symlink_target": ""
} |
"""
tests.test_file - nose tests for ende.File module
project : Ende
version : 0.1.0
status : development
modifydate : 2015-05-06 19:30:00 -0700
createdate : 2015-05-05 05:36:00 -0700
website : https://github.com/tmthydvnprt/ende
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, project
credits :
"""
# test dependancies
import os
import unittest
import random
import numpy as np
from filecmp import dircmp
from difflib import ndiff, get_close_matches
from os import path
from shutil import rmtree
# testing dependancies
from ende.Util import open_tar
from ende.File import encrypt_folders, decrypt_folders
# test constants
IGNORES = ['.DS_Store', '.localized']
ALPHABET = np.array(list('`1234567890-=~!@#$%^&*()_+qwertyuiop[]\\QWERTYUIOP{}|adfghjkl;"ASDFGHJKL:\'zxcvbnm,./'))
BASE64_ALPHABET = np.array(list('0123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM-_'))
TEST_COUNT = 1
# test helper functions
def list_diff(list1, list2):
"""return list1 items not in list2"""
return [x for x in list1 if x not in set(list2)]
def get_file_diffs(file1, file2):
"""ndiff file compare"""
left_file = open(file1, 'r').read()
right_file = open(file2, 'r').read()
return [(i, s) for i, s in enumerate(ndiff(left_file, right_file)) if s[0] != ' ']
def get_dir_diffs(dcmp):
"""recursive directory and file compare"""
diffs = []
# compare different files
for name in dcmp.diff_files:
diffs.append({
'name' : name,
'left' : dcmp.left,
'right': dcmp.right,
'diffs': get_file_diffs(dcmp.left+'/'+name, dcmp.right+'/'+name)
})
# compare common subdirectories
for sub_dcmp in dcmp.subdirs.values():
diffs.extend(get_dir_diffs(sub_dcmp))
# check for close file and subdirectory matches
close_dirs = []
close_files = []
for left in dcmp.left_only:
match = get_close_matches(left, dcmp.right_only, 1)
if match:
close_paths = (dcmp.left+'/'+left, dcmp.right+'/'+match[0])
if all([path.isdir(x) for x in close_paths]):
close_dirs.append(close_paths)
else:
close_files.append(close_paths)
# compare close subdirectory matches
for left_dir, right_dir in close_dirs:
diffs.extend(get_dir_diffs(dircmp(left_dir, right_dir, IGNORES)))
# compare close file matches
for left_file, right_file in close_files:
diffs.append({
'name' : (path.basename(left_file), path.basename(right_file)),
'left' : path.dirname(left_file),
'right': path.dirname(right_file),
'diffs': get_file_diffs(left_file, right_file)
})
# add no match files and directories to diffs
for no_match in list_diff(dcmp.left_only, [path.basename(x[0]) for x in close_files+close_dirs]):
diffs.append({
'name' : no_match,
'left' : dcmp.left,
'right': '',
'diffs': []
})
for no_match in list_diff(dcmp.right_only, [path.basename(x[1]) for x in close_files+close_dirs]):
diffs.append({
'name' : no_match,
'left' : '',
'right': dcmp.right,
'diffs': []
})
return diffs
def compare_files_and_folders(dir1, dir2):
"""deep compare directories and files"""
return get_dir_diffs(dircmp(dir1, dir2, IGNORES))
# test cases
class EncryptFolderTests(unittest.TestCase):
"""test encrypt_folders() and decrypt_folders() of ende.File module"""
def setUp(self):
"""set up tests"""
os.chdir('tests')
open_tar('test_dir_data.tar')
os.chdir('..')
self.test_dir = 'tests/test_dir'
self.password = ''.join(np.random.choice(ALPHABET, random.randint(8, 24)))
self.en_dir = encrypt_folders(self.test_dir, self.password, self.test_dir+'_encrypted')
self.de_dir = decrypt_folders(self.en_dir, self.password, self.test_dir+'_decrypted')
def tearDown(self):
"""tear down tests"""
temp_files = ['test_dir', 'test_dir_encrypted', 'test_dir_decrypted', 'test_dir_copy', 'test_dir2']
os.chdir('tests')
for temp_file in temp_files:
if path.isdir(temp_file):
rmtree(temp_file)
os.chdir('..')
def test_a0(self):
""" test directory compare passes"""
cmpr = compare_files_and_folders(self.test_dir, self.test_dir+'_copy')
self.assertEqual(len(cmpr), 0)
def test_a01(self):
""" test directory compare fails"""
cmpr = compare_files_and_folders(self.test_dir, self.test_dir+'2')
self.assertNotEqual(len(cmpr), 0)
def test_a1(self):
""" test one directory and one password never encrypt the same"""
for _ in range(TEST_COUNT):
en_dir = encrypt_folders(self.test_dir, self.password, self.test_dir+'_encrypted_2')
cmpr = compare_files_and_folders(self.en_dir, en_dir)
rmtree(en_dir)
self.assertNotEqual(len(cmpr), 0)
def test_a2(self):
"""test one directory and one password always decrypt the same"""
for _ in range(TEST_COUNT):
en_dir = encrypt_folders(self.test_dir, self.password, self.test_dir+'_encrypted_2')
de_dir = decrypt_folders(en_dir, self.password, self.test_dir+'_decrypted_2')
cmpr = compare_files_and_folders(self.de_dir, de_dir)
rmtree(en_dir)
rmtree(de_dir)
self.assertEqual(len(cmpr), 0)
def test_a3(self):
"""test many directory and random passwords encrypt/decrypt correctly"""
for _ in range(TEST_COUNT):
en_dir = encrypt_folders(self.test_dir, self.password, self.test_dir+'_encrypted_2')
de_dir = decrypt_folders(en_dir, self.password, self.test_dir+'_decrypted_2')
cmpr = compare_files_and_folders(self.test_dir, de_dir)
rmtree(en_dir)
rmtree(de_dir)
self.assertEqual(len(cmpr), 0)
def test_a4(self):
"""test many directory with bad passwords"""
for _ in range(TEST_COUNT):
password1 = ''.join(np.random.choice(ALPHABET, random.randint(2, 24)))
password2 = ''.join(np.random.choice(ALPHABET, random.randint(2, 24)))
en_dir = encrypt_folders(self.test_dir, password1, self.test_dir+'_encrypted_2')
de_dir = decrypt_folders(en_dir, password2, self.test_dir+'_decrypted_2')
cmpr = compare_files_and_folders(self.test_dir, de_dir)
rmtree(en_dir)
rmtree(de_dir)
self.assertNotEqual(len(cmpr), 0)
| {
"content_hash": "d8e705e62ed4631ca6c4edd83b5198c8",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 114,
"avg_line_length": 34.83076923076923,
"alnum_prop": 0.6074793875147232,
"repo_name": "tmthydvnprt/ende",
"id": "f50874d241d8f43bd9404a042e8aabd451afe215",
"size": "6792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_file.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51253"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
} |
'''OpenGL extension EXT.swap_control_tear
This module customises the behaviour of the
OpenGL.raw.GLX.EXT.swap_control_tear to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/swap_control_tear.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.EXT.swap_control_tear import *
from OpenGL.raw.GLX.EXT.swap_control_tear import _EXTENSION_NAME
def glInitSwapControlTearEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | {
"content_hash": "da57505556c5c6d523f9ada53cf28687",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 33.78260869565217,
"alnum_prop": 0.7902187902187903,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "127710ad9314a8fa0ce945acd8f3525c53c33a19",
"size": "777",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLX/EXT/swap_control_tear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
"""
Views which allow users to create and activate accounts.
"""
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.middleware.csrf import get_token
from signbank.registration.forms import RegistrationForm, EmailAuthenticationForm
from signbank.registration.models import RegistrationProfile
from signbank.dictionary.models import Dataset
from django.contrib import messages
from django.template.loader import render_to_string
from datetime import date
import json
def activate(request, activation_key, template_name='registration/activate.html'):
"""
Activates a ``User``'s account, if their key is valid and hasn't
expired.
By default, uses the template ``registration/activate.html``; to
change this, pass the name of a template as the keyword argument
``template_name``.
Context:
account
The ``User`` object corresponding to the account, if the
activation was successful. ``False`` if the activation was
not successful.
expiration_days
The number of days for which activation keys stay valid
after registration.
Template:
registration/activate.html or ``template_name`` keyword
argument.
"""
activation_key = activation_key.lower() # Normalize before trying anything with it.
account = RegistrationProfile.objects.activate_user(activation_key)
return render(request, template_name,
{ 'account': account,
'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })
def register(request, success_url=settings.URL + settings.PREFIX_URL + '/accounts/register/complete/',
form_class=RegistrationForm, profile_callback=None,
template_name='registration/registration_form.html'):
"""
Allows a new user to register an account.
Following successful registration, redirects to either
``/accounts/register/complete/`` or, if supplied, the URL
specified in the keyword argument ``success_url``.
By default, ``registration.forms.RegistrationForm`` will be used
as the registration form; to change this, pass a different form
class as the ``form_class`` keyword argument. The form class you
specify must have a method ``save`` which will create and return
the new ``User``, and that method must accept the keyword argument
``profile_callback`` (see below).
To enable creation of a site-specific user profile object for the
new user, pass a function which will create the profile object as
the keyword argument ``profile_callback``. See
``RegistrationManager.create_inactive_user`` in the file
``models.py`` for details on how to write this function.
By default, uses the template
``registration/registration_form.html``; to change this, pass the
name of a template as the keyword argument ``template_name``.
Context:
form
The registration form.
Template:
registration/registration_form.html or ``template_name``
keyword argument.
"""
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
new_user = form.save(profile_callback=profile_callback)
request.session['username'] = new_user.username
request.session['first_name'] = new_user.first_name
request.session['last_name'] = new_user.last_name
request.session['email'] = new_user.email
groups_of_user = [ g.name.replace('_',' ') for g in new_user.groups.all() ]
request.session['groups'] = groups_of_user
if hasattr(settings, 'SHOW_DATASET_INTERFACE_OPTIONS') and settings.SHOW_DATASET_INTERFACE_OPTIONS:
list_of_datasets = request.POST.getlist('dataset[]')
if '' in list_of_datasets:
list_of_datasets.remove('')
from django.contrib.auth.models import Group, User
group_manager = Group.objects.get(name='Dataset_Manager')
motivation = request.POST.get('motivation_for_use', '') # motivation is a required field in the form
# send email to each of the dataset owners
for dataset_name in list_of_datasets:
# the datasets are selected via a pulldown list, they should exist
dataset_obj = Dataset.objects.get(name=dataset_name)
owners_of_dataset = dataset_obj.owners.all()
for owner in owners_of_dataset:
groups_of_user = owner.groups.all()
if not group_manager in groups_of_user:
# this owner can't manage users
continue
from django.core.mail import send_mail
current_site = Site.objects.get_current()
subject = render_to_string('registration/dataset_access_email_subject.txt',
context={'dataset': dataset_name,
'site': current_site})
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('registration/dataset_access_email.txt',
context={'dataset': dataset_name,
'new_user_username': new_user.username,
'new_user_firstname': new_user.first_name,
'new_user_lastname': new_user.last_name,
'new_user_email': new_user.email,
'motivation': motivation,
'site': current_site})
# for debug purposes on local machine
# print('owner of dataset: ', owner.username, ' with email: ', owner.email)
# print('message: ', message)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [owner.email])
request.session['requested_datasets'] = list_of_datasets
return HttpResponseRedirect(success_url)
else:
# error messages
messages.add_message(request, messages.ERROR, ('Error processing your request.'))
# for ff in form.visible_fields():
# if ff.errors:
# print('form error in field ', ff.name, ': ', ff.errors)
# messages.add_message(request, messages.ERROR, ff.errors)
# create a new empty form, this deletes the erroneous fields
# form = form_class()
else:
form = form_class()
return render(request,template_name,{ 'form': form })
# a copy of the login view since we need to change the form to allow longer
# userids (> 30 chars) since we're using email addresses
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.cache import never_cache
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
def mylogin(request, template_name='registration/login.html', redirect_field_name='/signs/recently_added/'):
"Displays the login form and handles the login action."
redirect_to = request.GET[REDIRECT_FIELD_NAME] if REDIRECT_FIELD_NAME in request.GET else ''
error_message = ''
if request.method == "POST":
if REDIRECT_FIELD_NAME in request.POST:
redirect_to = request.POST[REDIRECT_FIELD_NAME]
form = EmailAuthenticationForm(data=request.POST)
if form.is_valid():
#Count the number of logins
profile = form.get_user().user_profile_user
profile.number_of_logins += 1
profile.save()
#Expiry date cannot be in the past
if profile.expiry_date != None and date.today() > profile.expiry_date:
form = EmailAuthenticationForm(request)
error_message = _('This account has expired. Please contact o.crasborn@let.ru.nl.')
else:
# Light security check -- make sure redirect_to isn't garbage.
if not redirect_to or '//' in redirect_to or ' ' in redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
from django.contrib.auth import login
login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
# For logging in API clients
if "api" in request.GET and request.GET['api'] == 'yes':
return HttpResponse(json.dumps({'success': 'true'}), content_type='application/json')
return HttpResponseRedirect(redirect_to)
else:
if "api" in request.GET and request.GET['api'] == 'yes':
return HttpResponse(json.dumps({'success': 'false'}), content_type='application/json')
error_message = _('The username or password is incorrect.')
else:
form = EmailAuthenticationForm(request)
request.session.set_test_cookie()
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
# For logging in API clients
if request.method == "GET" and "api" in request.GET and request.GET['api'] == 'yes':
token = get_token(request)
return HttpResponse(json.dumps({'csrfmiddlewaretoken': token}), content_type='application/json')
return render(request,template_name, {
'form': form,
REDIRECT_FIELD_NAME: settings.URL+redirect_to,
'site': current_site,
'site_name': current_site.name,
'allow_registration': settings.ALLOW_REGISTRATION,
'error_message': error_message})
mylogin = never_cache(mylogin)
| {
"content_hash": "77ddf2cd9db73dcbf7ca55614ba23b30",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 117,
"avg_line_length": 43.8,
"alnum_prop": 0.5972222222222222,
"repo_name": "Woseseltops/signbank",
"id": "0c1269a920cbd8904bd460e086d7d6dc988da430",
"size": "10512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signbank/registration/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "439986"
},
{
"name": "HTML",
"bytes": "187182"
},
{
"name": "JavaScript",
"bytes": "709951"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "513314"
}
],
"symlink_target": ""
} |
import nibabel as nib
import numpy as np
from nilabels.tools.aux_methods.utils_rotations import get_small_orthogonal_rotation
from nilabels.tools.aux_methods.utils_path import get_pfi_in_pfi_out, connect_path_tail_head
from nilabels.tools.aux_methods.utils_nib import modify_image_data_type, \
modify_affine_transformation, replace_translational_part
class HeaderController(object):
"""
Facade of the methods in tools. symmetrizer, for work with paths to images rather than
with data. Methods under LabelsManagerManipulate are taking in general
one or more input manipulate them according to some rule and save the
output in the output_data_folder or in the specified paths.
"""
def __init__(self, input_data_folder=None, output_data_folder=None):
self.pfo_in = input_data_folder
self.pfo_out = output_data_folder
def modify_image_type(self, filename_in, filename_out, new_dtype, update_description=None, verbose=1):
"""
Change data type and optionally update the nifti field descriptor.
:param filename_in: path to filename input
:param filename_out: path to filename output
:param new_dtype: numpy data type compatible input
:param update_description: string with the new 'descrip' nifti header value.
:param verbose:
:return: image with new dtype and descriptor updated.
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
new_im = modify_image_data_type(im, new_dtype=new_dtype, update_descrip_field_header=update_description, verbose=verbose)
nib.save(new_im, pfi_out)
def modify_affine(self, filename_in, affine_in, filename_out, q_form=True, s_form=True,
multiplication_side='left'):
"""
Modify the affine transformation by substitution or by left or right multiplication
:param filename_in: path to filename input
:param affine_in: path to affine matrix input, or nd.array or .npy array
:param filename_out: path to filename output
:param q_form: affect the q_form (True)
:param s_form: affect the s_form (True)
:param multiplication_side: multiplication_side: can be lef, right, or replace.
:return: save new image with the updated affine transformation
NOTE: please see the documentation http://nipy.org/nibabel/nifti_images.html#choosing-image-affine for more on the
relationships between s_form affine, q_form affine and fall-back header affine.
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
if isinstance(affine_in, str):
if affine_in.endswith('.txt'):
aff = np.loadtxt(connect_path_tail_head(self.pfo_in, affine_in))
else:
aff = np.load(connect_path_tail_head(self.pfo_in, affine_in))
elif isinstance(affine_in, np.ndarray):
aff = affine_in
else:
raise IOError('parameter affine_in can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the affine transformation.')
im = nib.load(pfi_in)
new_im = modify_affine_transformation(im, aff, q_form=q_form, s_form=s_form,
multiplication_side=multiplication_side)
nib.save(new_im, pfi_out)
def apply_small_rotation(self, filename_in, filename_out, angle=np.pi/6, principal_axis='pitch',
respect_to_centre=True):
"""
:param filename_in: path to filename input
:param filename_out: path to filename output
:param angle: rotation angle in radiants
:param principal_axis: 'yaw', 'pitch' or 'roll'
:param respect_to_centre: by default is True. If False, respect to the origin.
:return:
"""
if isinstance(angle, list):
assert isinstance(principal_axis, list)
assert len(principal_axis) == len(angle)
rot = np.identity(4)
for pa, an in zip(principal_axis, angle):
aff = get_small_orthogonal_rotation(theta=an, principal_axis=pa)
rot = rot.dot(aff)
else:
rot = get_small_orthogonal_rotation(theta=angle, principal_axis=principal_axis)
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
if respect_to_centre:
fov_centre = im.affine.dot(np.array(list(np.array(im.shape[:3]) / float(2)) + [1]))
transl = np.eye(4)
transl[:3, 3] = fov_centre[:3]
transl_inv = np.eye(4)
transl_inv[:3, 3] = -1 * fov_centre[:3]
rt = transl.dot(rot.dot(transl_inv))
new_aff = rt.dot(im.affine)
else:
new_aff = im.affine[:]
new_aff[:3, :3] = rot[:3, :3].dot(new_aff[:3, :3])
new_im = modify_affine_transformation(im_input=im, new_aff=new_aff, q_form=True, s_form=True,
multiplication_side='replace')
nib.save(new_im, pfi_out)
def modify_translational_part(self, filename_in, filename_out, new_translation):
"""
:param filename_in: path to filename input
:param filename_out: path to filename output
:param new_translation: translation that will replace the existing one.
:return:
"""
pfi_in, pfi_out = get_pfi_in_pfi_out(filename_in, filename_out, self.pfo_in, self.pfo_out)
im = nib.load(pfi_in)
if isinstance(new_translation, str):
if new_translation.endswith('.txt'):
tr = np.loadtxt(connect_path_tail_head(self.pfo_in, new_translation))
else:
tr = np.load(connect_path_tail_head(self.pfo_in, new_translation))
elif isinstance(new_translation, np.ndarray):
tr = new_translation
elif isinstance(new_translation, list):
tr = np.array(new_translation)
else:
raise IOError('parameter new_translation can be path to an affine matrix .txt or .npy or the numpy array'
'corresponding to the new intended translational part.')
new_im = replace_translational_part(im, tr)
nib.save(new_im, pfi_out)
| {
"content_hash": "43bf022c5ac432b025ea0fd78d365036",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 129,
"avg_line_length": 44.397260273972606,
"alnum_prop": 0.6201789571120024,
"repo_name": "SebastianoF/LabelsManager",
"id": "cf92bec6d20ec163dab02a89a48253852700dd43",
"size": "6482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nilabels/agents/header_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255204"
}
],
"symlink_target": ""
} |
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from keystoneclient import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.domains import constants
LOG = logging.getLogger(__name__)
class ViewGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:admin:domains:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, domain):
step = 'update_group_members'
base_url = reverse(self.url, args=[domain.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class CreateDomainLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Domain")
url = constants.DOMAINS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (('identity', 'identity:create_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class EditDomainLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = constants.DOMAINS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (('identity', 'identity:update_domain'),)
def allowed(self, request, domain):
return api.keystone.keystone_can_edit_domain()
class DeleteDomainsAction(tables.DeleteAction):
name = "delete"
data_type_singular = _("Domain")
data_type_plural = _("Domains")
policy_rules = (('identity', 'identity:delete_domain'),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_domain()
def delete(self, request, obj_id):
domain = self.table.get_object_by_id(obj_id)
if domain.enabled:
msg = _('Domain "%s" must be disabled before it can be deleted.') \
% domain.name
messages.error(request, msg)
raise exceptions.ClientException(409, msg)
else:
LOG.info('Deleting domain "%s".' % obj_id)
api.keystone.domain_delete(request, obj_id)
class DomainFilterAction(tables.FilterAction):
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
return multidomain_support
def filter(self, table, domains, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(domain):
if q in domain.name.lower():
return True
return False
return filter(comp, domains)
class SetDomainContext(tables.Action):
name = "set_domain_context"
verbose_name = _("Set Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
multidomain_support = getattr(settings,
'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT',
False)
if not multidomain_support:
return False
ctx = request.session.get("domain_context", None)
if ctx and datum.id == ctx:
return False
return True
def single(self, table, request, obj_id):
if ('domain_context' not in request.session or
request.session['domain_context'] != obj_id):
try:
domain = api.keystone.domain_get(request, obj_id)
request.session['domain_context'] = obj_id
request.session['domain_context_name'] = domain.name
messages.success(request,
_('Domain Context updated to Domain %s.') %
domain.name)
except Exception:
messages.error(request,
_('Unable to set Domain Context.'))
class UnsetDomainContext(tables.Action):
name = "clear_domain_context"
verbose_name = _("Clear Domain Context")
url = constants.DOMAINS_INDEX_URL
preempt = True
requires_input = False
policy_rules = (('identity', 'admin_required'),)
def allowed(self, request, datum):
ctx = request.session.get("domain_context", None)
return ctx is not None
def single(self, table, request, obj_id):
if 'domain_context' in request.session:
request.session.pop("domain_context")
request.session.pop("domain_context_name")
messages.success(request, _('Domain Context cleared.'))
class DomainsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Domain ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True)
class Meta:
name = "domains"
verbose_name = _("Domains")
row_actions = (SetDomainContext, ViewGroupsLink, EditDomainLink,
DeleteDomainsAction)
table_actions = (DomainFilterAction, CreateDomainLink,
DeleteDomainsAction, UnsetDomainContext)
| {
"content_hash": "b4299881f5eb28723f1b902a89a646dd",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 33.49090909090909,
"alnum_prop": 0.6071299312341658,
"repo_name": "JioCloud/horizon",
"id": "60ce476df17ae128e41389061202adf84dc58b4c",
"size": "6160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/domains/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "296932"
},
{
"name": "JavaScript",
"bytes": "713370"
},
{
"name": "Python",
"bytes": "3614755"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
from jsontowidget import widget_from_json
class Survey(object):
def __init__(self, json_survey, **kwargs):
super(Survey, self).__init__(**kwargs)
self.survey_file = json_survey
self.questionnaires = {}
self.prev_questionnaires = []
self.load_questionnaires()
def load_questionnaires(self):
json_data = self.survey_file['survey']
qs = self.questionnaires
for each in json_data:
qs[each] = Questionnaire(each, self)
def get_header_definitions(self, questionnaire):
q = self.questionnaires[questionnaire]
return q.headers
def get_subject_fields(self, questionnaire):
q = self.questionnaires[questionnaire]
return q.subject_fields
def get_next_page(self, questionnaire, current_page):
q = self.questionnaires[questionnaire]
page_order = q.page_order
if current_page is None:
return page_order[0]
else:
index = page_order.index(current_page)
if index+1 < len(page_order):
return page_order[index+1]
else:
return None
def get_prev_page(self, questionnaire, current_page):
q = self.questionnaires[questionnaire]
page_order = q.page_order
if current_page is None:
return page_order[-1]
else:
index = page_order.index(current_page)
if index-1 >= 0:
return page_order[index-1]
else:
return None
def get_next_questionnaire(self, current_questionnaire):
q = self.questionnaires[current_questionnaire]
return q.next_questionnaire
def get_allow_forward(self, current_questionnaire):
q = self.questionnaires[current_questionnaire]
return q.allow_forward
def store_current_questionnaire(self, current_questionnaire):
self.prev_questionnaires.append(current_questionnaire)
def get_previous_questionnaire(self):
try:
return self.prev_questionnaires[-1]
except:
return None
def pop_previous_questionnaire(self):
try:
return self.prev_questionnaires.pop()
except:
return None
def get_allow_add_subjects(self, questionnaire):
try:
return self.questionnaires[questionnaire].add_subjects
except:
return False
class Questionnaire(object):
def __init__(self, name, survey, **kwargs):
super(Questionnaire, self).__init__(**kwargs)
self.survey = survey
self.page_order = []
self.headers = []
self.name = name
json_data = survey.survey_file['survey'][name]
self.load_pages(name, survey)
self.load_headers(name, survey)
self.load_subject_fields(name, survey)
if 'next_questionnaire' in json_data:
self.next_questionnaire = json_data["next_questionnaire"]
else:
self.next_questionnaire = None
if 'add_subjects' in json_data:
self.add_subjects = json_data["add_subjects"]
else:
self.add_subjects = False
if 'allow_forward' in json_data:
self.allow_forward = json_data["allow_forward"]
else:
self.allow_forward = False
if 'demographic' in json_data:
self.demographic = json_data['demographic']
if 'demographic_restrictions' in json_data:
self.demographic_restrictions = json_data['demographic_restrictions']
def load_subject_fields(self, name, survey):
json_data = survey.survey_file['survey'][name]
self.subject_fields = json_data['subject_fields']
def load_headers(self, name, survey):
json_data = survey.survey_file['survey'][name]
self.headers = json_data['headers']
def load_pages(self, name, survey):
json_data = survey.survey_file['survey'][name]
pages_json = json_data['pages']
pages = self.pages = {}
self.page_order = json_data['page_order']
for each in pages_json:
p = Page(each, name, survey)
pages[each] = p
class Page(object):
def __init__(self, name, questionnaire_name, survey, **kwargs):
super(Page, self).__init__(**kwargs)
self.q_name = questionnaire_name
self.survey = survey
self.name = name
self.question_order = []
self.load_questions(name, questionnaire_name, survey)
def load_questions(self, name, q_name, survey):
json_data = survey.survey_file['survey'][q_name]['pages'][name]
questions_json = json_data['questions']
questions = self.questions = {}
self.question_order = json_data['question_order']
if 'disable_binds' in json_data:
self.disable_binds = disable_binds = json_data['disable_binds']
else:
self.disable_binds = disable_binds = []
for each in questions_json:
q = Question(each, questions_json[each])
questions[each] = q
for bind in disable_binds:
a, b = bind
q1 = questions[a]
q2 = questions[b]
wid1 = q1.widget
wid2 = q2.widget
wid1.bind(answer=q2.call_disable_bind)
wid2.bind(answer=q1.call_disable_bind)
class Question(object):
def __init__(self, question_name, question_json, **kwargs):
super(Question, self).__init__(**kwargs)
self.widget = wid = widget_from_json(question_json)
wid.question_name = question_name
def call_disable_bind(self, instance, value):
if instance.validate_question():
self.widget.disabled = True
else:
self.widget.disabled = False
| {
"content_hash": "75508b2c581e9133ce747f363fdbb966",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 81,
"avg_line_length": 33.81609195402299,
"alnum_prop": 0.5943235893949694,
"repo_name": "Kovak/KivySurvey",
"id": "a926f9e948cb38f504a0eddcfaffb7526ddd704c",
"size": "5884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy_survey/survey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79134"
}
],
"symlink_target": ""
} |
from wdl.types import *
class CoercionException(Exception): pass
class EvalException(Exception): pass
def assert_type(value, types): return value.type.__class__ in types
def coerce(value, wdl_type):
if isinstance(wdl_type, WdlArrayType):
return WdlArray.coerce(value, wdl_type.subtype)
if isinstance(wdl_type, WdlStringType): return WdlString.coerce(value)
if isinstance(wdl_type, WdlIntegerType): return WdlInteger.coerce(value)
if isinstance(wdl_type, WdlFloatType): return WdlFloat.coerce(value)
if isinstance(wdl_type, WdlBooleanType): return WdlBoolean.coerce(value)
if isinstance(wdl_type, WdlFileType): return WdlFile.coerce(value)
raise CoercionException("Could not coerce {} into a WDL {}".format(value, wdl_type.wdl_string()))
class WdlValue(object):
def __init__(self, value):
self.value = value
self.check_compatible(value)
def __str__(self):
return '[{}: {}]'.format(self.type, str(self.value))
def as_string(self): return str(self.value)
def __str__(self): return '[Wdl{}: {}]'.format(self.type.wdl_string(), self.as_string())
def __eq__(self, rhs): return (self.__class__, self.value) == (rhs.__class__, rhs.value)
def __hash__(self): return hash((self.__class__, self.value))
def __invalid(self, symbol, rhs):
raise EvalException('Cannot perform operation: {} {} {}'.format(self.type.wdl_string(), symbol, rhs.type.wdl_string()))
def __invalid_unary(self, symbol):
raise EvalException('Cannot perform operation: {} {}'.format(symbol, self.type.wdl_string()))
def add(self, rhs): return self.__invalid('+', rhs)
def subtract(self, rhs): return self.__invalid('-', rhs)
def multiply(self, rhs): return self.__invalid('*', rhs)
def divide(self, rhs): return self.__invalid('/', rhs)
def mod(self, rhs): return self.__invalid('%', rhs)
def equal(self, rhs): return self.__invalid('==', rhs)
def not_equal(self, rhs): return self.equal(rhs).logical_not()
def greater_than(self, rhs): return self.__invalid('>', rhs)
def greater_than_or_equal(self, rhs): return self.greater_than(rhs).logical_or(self.equal(rhs))
def less_than(self, rhs): return self.__invalid('<', rhs)
def less_than_or_equal(self, rhs): return self.less_than(rhs).logical_or(self.equal(rhs))
def logical_or(self, rhs): return self.__invalid('||', rhs)
def logical_and(self, rhs): return self.__invalid('&&', rhs)
def logical_not(self): return self.__invalid_unary('!')
def unary_plus(self): return self.__invalid_unary('+')
def unary_negation(self): return self.__invalid_unary('-')
class WdlUndefined(WdlValue):
def __init__(self): self.type = None
def __str__(self): return repr(self)
class WdlString(WdlValue):
type = WdlStringType()
def check_compatible(self, value):
try:
if not isinstance(value, str): value = value.encode('utf-8')
except AttributeError:
pass
try:
if not isinstance(value, str): value = value.decode('utf-8')
except AttributeError:
pass
if not isinstance(value, str):
raise EvalException("WdlString must hold a python 'str': {} ({})".format(value, value.__class__))
@staticmethod
def coerce(value):
if isinstance(value, WdlString): return value
if value.__class__ in [WdlString, WdlInteger, WdlFloat, WdlFile]: return WdlString(str(value.value))
if value.__class__ in [str, int, float]: return WdlString(str(value))
raise CoercionException('Could not coerce {} into a WDL String'.format(value))
def add(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType, WdlStringType, WdlFileType]):
return WdlString(self.value + str(rhs.value))
super(WdlString, self).add(rhs)
def equal(self, rhs):
if assert_type(rhs, [WdlStringType]):
return WdlBoolean(self.value == rhs.value)
super(WdlString, self).equal(rhs)
def greater_than(self, rhs):
if assert_type(rhs, [WdlStringType]):
return WdlBoolean(self.value > rhs.value)
super(WdlString, self).equal(rhs)
def less_than(self, rhs):
if assert_type(rhs, [WdlStringType]):
return WdlBoolean(self.value < rhs.value)
super(WdlString, self).equal(rhs)
class WdlInteger(WdlValue):
type = WdlIntegerType()
def check_compatible(self, value):
if not isinstance(value, int):
raise EvalException("WdlInteger must hold a python 'int'")
@staticmethod
def coerce(value):
if isinstance(value, WdlString): value = value.value
if isinstance(value, WdlInteger): return value
if isinstance(value, int): return WdlInteger(value)
if isinstance(value, str):
try: return WdlInteger(int(value))
except ValueError: raise CoercionException('Could not coerce string {} into a WDL Integer'.format(value))
raise CoercionException('Could not coerce {} into a WDL Integer'.format(value))
def add(self, rhs):
if assert_type(rhs, [WdlIntegerType]):
return WdlInteger(self.value + rhs.value)
if assert_type(rhs, [WdlFloatType]):
return WdlFloat(self.value + rhs.value)
if assert_type(rhs, [WdlStringType]):
return WdlString(str(self.value) + rhs.value)
super(WdlInteger, self).add(rhs)
def subtract(self, rhs):
if assert_type(rhs, [WdlIntegerType]):
return WdlInteger(self.value - rhs.value)
if assert_type(rhs, [WdlFloatType]):
return WdlFloat(self.value - rhs.value)
super(WdlInteger, self).subtract(rhs)
def multiply(self, rhs):
if assert_type(rhs, [WdlIntegerType]):
return WdlInteger(self.value * rhs.value)
if assert_type(rhs, [WdlFloatType]):
return WdlFloat(self.value * rhs.value)
super(WdlInteger, self).multiply(rhs)
def divide(self, rhs):
if assert_type(rhs, [WdlIntegerType]):
return WdlInteger(int(self.value / rhs.value))
if assert_type(rhs, [WdlFloatType]):
return WdlFloat(self.value / rhs.value)
super(WdlInteger, self).divide(rhs)
def mod(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlBooleanType]):
return WdlInteger(self.value % rhs.value)
if assert_type(rhs, [WdlFloatType]):
return WdlFloat(self.value % rhs.value)
super(WdlInteger, self).mod(rhs)
def equal(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value == rhs.value)
super(WdlInteger, self).equal(rhs)
def greater_than(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value > rhs.value)
super(WdlInteger, self).greater_than(rhs)
def less_than(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value < rhs.value)
super(WdlInteger, self).less_than(rhs)
def unary_negation(self):
return WdlInteger(-self.value)
def unary_plus(self):
return WdlInteger(+self.value)
class WdlBoolean(WdlValue):
type = WdlBooleanType()
def check_compatible(self, value):
if not isinstance(value, bool):
raise EvalException("WdlBoolean must hold a python 'bool'")
@staticmethod
def coerce(value):
if isinstance(value, WdlBoolean): return value
if value in [True, False]: return WdlBoolean(value)
if isinstance(value, str) and value.lower() in ['true', 'false']:
return WdlBoolean(value.lower() == 'true')
raise CoercionException('Could not coerce {} into a WDL Boolean'.format(value))
def greater_than(self, rhs):
if assert_type(rhs, [WdlBooleanType]):
return WdlBoolean(self.value > rhs.value)
super(WdlBoolean, self).greater_than(rhs)
def less_than(self, rhs):
if assert_type(rhs, [WdlBooleanType]):
return WdlBoolean(self.value < rhs.value)
super(WdlBoolean, self).less_than(rhs)
def equal(self, rhs):
if assert_type(rhs, [WdlBooleanType]):
return WdlBoolean(self.value == rhs.value)
super(WdlBoolean, self).equal(rhs)
def logical_or(self, rhs):
if assert_type(rhs, [WdlBooleanType]):
return WdlBoolean(self.value or rhs.value)
super(WdlBoolean, self).logical_or(rhs)
def logical_and(self, rhs):
if assert_type(rhs, [WdlBooleanType]):
return WdlBoolean(self.value and rhs.value)
super(WdlBoolean, self).logical_and(rhs)
def logical_not(self):
return WdlBoolean(not self.value)
class WdlFloat(WdlValue):
type = WdlFloatType()
def check_compatible(self, value):
if not isinstance(value, float):
raise EvalException("WdlFloat must hold a python 'float'")
@staticmethod
def coerce(value):
if isinstance(value, WdlString): value = value.value
if isinstance(value, WdlFloat): return value
if isinstance(value, float): return WdlFloat(value)
if isinstance(value, int): return WdlFloat(float(value))
if isinstance(value, str):
try: return WdlFloat(float(value))
except ValueError: raise CoercionException('Could not coerce string {} into a WDL Float'.format(value))
raise CoercionException('Could not coerce {} into a WDL Float'.format(value))
def add(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlFloat(self.value + rhs.value)
if assert_type(rhs, [WdlStringType]):
return WdlString(str(self.value) + rhs.value)
super(WdlFloat, self).add(rhs)
def subtract(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlFloat(self.value - rhs.value)
super(WdlFloat, self).subtract(rhs)
def multiply(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlFloat(self.value * rhs.value)
super(WdlFloat, self).multiply(rhs)
def divide(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlFloat(self.value / rhs.value)
super(WdlFloat, self).divide(rhs)
def mod(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlFloat(self.value % rhs.value)
super(WdlFloat, self).mod(rhs)
def equal(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value == rhs.value)
super(WdlFloat, self).greater_than(rhs)
def greater_than(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value > rhs.value)
super(WdlFloat, self).greater_than(rhs)
def less_than(self, rhs):
if assert_type(rhs, [WdlIntegerType, WdlFloatType]):
return WdlBoolean(self.value < rhs.value)
super(WdlFloat, self).less_than(rhs)
def unary_negation(self):
return WdlFloat(-self.value)
def unary_plus(self):
return WdlFloat(+self.value)
class WdlFile(WdlString):
type = WdlFileType()
def check_compatible(self, value):
try:
if not isinstance(value, str): value = value.encode('utf-8')
except AttributeError:
pass
try:
if not isinstance(value, str): value = value.decode('utf-8')
except AttributeError:
pass
if not isinstance(value, str):
raise EvalException("WdlString must hold a python 'str': {} ({})".format(value, value.__class__))
@staticmethod
def coerce(value):
if isinstance(value, WdlFile): return value
if isinstance(value, WdlString): return WdlFile(value.value)
if isinstance(value, str): return WdlFile(value)
raise CoercionException('Could not coerce {} into a WDL File'.format(value))
def add(self, rhs):
if assert_type(rhs, [WdlFileType, WdlStringType]):
return WdlFile(self.value + str(rhs.value))
super(WdlFile, self).add(rhs)
def equal(self, rhs):
if assert_type(rhs, [WdlFileType, WdlStringType]):
return WdlBoolean(self.value == rhs.value)
super(WdlFile, self).equal(rhs)
def greater_than(self, rhs):
if assert_type(rhs, [WdlFileType]):
return WdlBoolean(self.value > rhs.value)
super(WdlFile, self).equal(rhs)
def less_than(self, rhs):
if assert_type(rhs, [WdlFileType]):
return WdlBoolean(self.value < rhs.value)
super(WdlFile, self).equal(rhs)
class WdlArray(WdlValue):
def __init__(self, subtype, value):
if not isinstance(value, list):
raise EvalException("WdlArray must be a Python 'list'")
if not all(type(x.type) == type(subtype) for x in value):
raise EvalException("WdlArray must contain elements of the same type: {}".format(value))
self.type = WdlArrayType(subtype)
self.subtype = subtype
self.value = value
@staticmethod
def coerce(value, subtype):
if isinstance(value, WdlArray): value = value.value
if not isinstance(value, list):
raise CoercionException('Only lists can be coerced into a WDL Arrays (got {})'.format(value))
try:
return WdlArray(subtype, [coerce(x, subtype) for x in value])
except CoercionException as e:
raise CoercionException('Could not coerce {} into a WDL Array[{}]. {}'.format(value, subtype.wdl_string(), e))
raise CoercionException('Could not coerce {} into a WDL Array[{}]'.format(value, subtype.wdl_string()))
def __str__(self):
return '[{}: {}]'.format(self.type.wdl_string(), ', '.join([str(x) for x in self.value]))
class WdlMap(WdlValue):
def __init__(self, key_type, value_type, value):
if not isinstance(value, dict):
raise EvalException("WdlMap must be a Python 'dict'")
if not isinstance(key_type, WdlPrimitiveType):
raise EvalException("WdlMap must contain WdlPrimitive keys")
if not isinstance(value_type, WdlPrimitiveType):
raise EvalException("WdlMap must contain WdlPrimitive values")
if not all(isinstance(k.type, key_type.__class__) for k in value.keys()):
raise EvalException("WdlMap must contain keys of the same type: {}".format(value))
if not all(isinstance(v.type, value_type.__class__) for v in value.values()):
raise EvalException("WdlMap must contain values of the same type: {}".format(value))
(k, v) = list(value.items())[0]
self.type = WdlMapType(key_type, value_type)
self.key_type = key_type
self.value_type = value_type
self.value = value
class WdlObject(WdlValue):
def __init__(self, dictionary):
for k, v in dictionary.items():
self.set(k, v)
def set(self, key, value):
self.__dict__[key] = value
def get(self, key):
return self.__dict__[key]
def __str__(self):
return '[WdlObject: {}]'.format(str(self.__dict__))
| {
"content_hash": "dd34d2b2b2e89ad1850ceada6d9f1f87",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 127,
"avg_line_length": 45.03235294117647,
"alnum_prop": 0.6330089478152962,
"repo_name": "broadinstitute/pywdl",
"id": "82a7b71aa1c3101813d14072d800eb52121cd580",
"size": "15311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wdl/values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "263742"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
"""
Run the maze game
"""
import argparse
import sys
from mazegame import *
def main():
parser = argparse.ArgumentParser(description='Play different types of maze games')
parser.add_argument('-r','--random',help='Play the random game',action='store_true')
parser.add_argument('-f','--file',help='Play the file-based game',action='store_true')
parser.add_argument('-i','--interactive',help='Play the interactive game',action='store_true')
parser.add_argument('-c','--concurrent',help='Concurrently solve using multiple processes', action='store_true')
parser.add_argument('-b','--best',help='In concurrent mode, choose best result', action='store_true')
parser.add_argument('-d','--dimension',help='Matrix dimension (required for random games)',type=int,default=10)
args = parser.parse_args()
if args.random:
dim = args.dimension
game = RandomMazeGame2(int(dim))
game.runGame(args.concurrent, args.best)
elif args.file:
game = FilebasedMazeGame()
game.runGame(args.concurrent, args.best)
elif args.interactive:
game = InteractiveMazeGame()
game.runGame(args.concurrent, args.best)
if __name__ == "__main__":
if len(sys.argv)==1:
sys.argv.append('-h')
main()
| {
"content_hash": "42d8c59a118f63e3aa00c9e96aeb9ba9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 116,
"avg_line_length": 32.41463414634146,
"alnum_prop": 0.6403310759969902,
"repo_name": "pythonhacker/pyconindia2017concurrency",
"id": "cd8494440521ffded0c40d316cce18f783ead855",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maze/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14268"
},
{
"name": "HTML",
"bytes": "58875"
},
{
"name": "JavaScript",
"bytes": "17599"
},
{
"name": "Python",
"bytes": "102373"
}
],
"symlink_target": ""
} |
from twisted.python import log
from tastypie import authorization
from django.conf.urls import *
import json, os
from ....libs import LisaFactorySingleton, LisaProtocolSingleton
from ....ConfigManager import ConfigManagerSingleton
from tastypie import resources as tastyresources
from tastypie.utils import trailing_slash
from wit import Wit
configuration = ConfigManagerSingleton.get().getConfiguration()
dir_path = ConfigManagerSingleton.get().getPath()
from .mixins import CustomApiKeyAuthentication
from tastypie.authentication import MultiAuthentication, SessionAuthentication
class Lisa(object):
def __init__(self):
return None
class LisaResource(tastyresources.Resource):
class Meta:
resource_name = 'lisa'
allowed_methods = ()
authorization = authorization.Authorization()
object_class = Lisa
authentication = MultiAuthentication(CustomApiKeyAuthentication())
extra_actions = [
{
'name': 'configuration',
'http_method': 'GET',
'resource_type': 'list',
'fields': {}
},
{
'name': 'version',
'http_method': 'GET',
'resource_type': 'list',
'fields': {}
},
{
'name': 'engine/reload',
'http_method': 'GET',
'resource_type': 'list',
'fields': {}
},
{
'name': 'scheduler/reload',
'http_method': 'GET',
'resource_type': 'list',
'fields': {}
},
{
'name': 'witintents',
'http_method': 'GET',
'resource_type': 'list',
'fields': {}
},
{
'name': 'speak',
'http_method': 'POST',
'resource_type': 'list',
'fields': {
'message': {
'type': 'string',
'required': True,
'description': 'The message to transmit to client(s)',
'paramType': 'body'
},
'clients_zone': {
'type': 'list',
'required': True,
'description': "Provide a list of zones : ['all','WebSocket','Bedroom'] ...",
'paramType': 'body'
}
}
},
{
'name': 'tts-google',
'http_method': 'POST',
'resource_type': 'list',
'fields': {
'message': {
'type': 'string',
'required': True,
'description': 'The message to vocalize',
'paramType': 'body'
},
'lang': {
'type': 'string',
'required': True,
'description': 'Lang of the message',
'paramType': 'body'
}
}
},
{
'name': 'tts-pico',
'http_method': 'POST',
'resource_type': 'list',
'fields': {
'message': {
'type': 'string',
'required': True,
'description': 'The message to vocalize',
'paramType': 'body'
},
'lang': {
'type': 'string',
'required': True,
'description': 'Lang of the message',
'paramType': 'body'
}
}
}
]
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/configuration%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('configuration'), name="api_lisa_configuration"),
url(r"^(?P<resource_name>%s)/version%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('version'), name="api_lisa_version"),
url(r"^(?P<resource_name>%s)/engine/reload%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('engine_reload'), name="api_lisa_engine_reload"),
url(r"^(?P<resource_name>%s)/scheduler/reload%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('scheduler_reload'), name="api_lisa_scheduler_reload"),
url(r"^(?P<resource_name>%s)/tts-google%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('tts_google'), name="api_lisa_tts_google"),
url(r"^(?P<resource_name>%s)/tts-pico%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('tts_pico'), name="api_lisa_tts_pico"),
url(r"^(?P<resource_name>%s)/speak%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('speak'), name="api_lisa_speak"),
url(r"^(?P<resource_name>%s)/witintents%s" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('witintents'), name="api_lisa_witintents"),
]
def speak(self, request, **kwargs):
self.method_check(request, allowed=['post', 'get'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json'))
message = data.get('message', '')
clients_zone = data.get('clients_zone', '')
"""
print request.body
if request.method == 'POST':
message = request.POST.get("message")
clients_zone = request.POST.getlist("clients_zone")
else:
message = request.GET.get("message")
clients_zone = request.GET.getlist("clients_zone")
"""
jsondata = json.dumps({
'body': message,
'clients_zone': clients_zone,
'from': "API",
'type': "chat"
})
LisaProtocolSingleton.get().answerToClient(jsondata=jsondata)
self.log_throttled_access(request)
return self.create_response(request, {'status': 'success', 'log': "Message sent"}, HttpAccepted)
def tts_google(self, request, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
import re
import requests
from django.http import HttpResponse
combined_sound = []
try:
if request.method == 'POST':
message = request.POST.get("message")
lang = request.POST.get("lang")
if not message:
# In case there isn't form data, let's check the body
post = json.loads(request.body)
message = post['message']
lang = post['lang']
#process text into chunks
text = message.replace('\n', '')
text_list = re.split('(\.)', text)
combined_text = []
for idx, val in enumerate(text_list):
if idx % 2 == 0:
combined_text.append(val)
else:
joined_text = ''.join((combined_text.pop(), val))
if len(joined_text) < 100:
combined_text.append(joined_text)
else:
subparts = re.split('( )', joined_text)
temp_string = ""
temp_array = []
for part in subparts:
temp_string += part
if len(temp_string) > 80:
temp_array.append(temp_string)
temp_string = ""
#append final part
temp_array.append(temp_string)
combined_text.extend(temp_array)
#download chunks and write them to the output file
for idx, val in enumerate(combined_text):
headers = {"Host": "translate.google.com",
"Referer": "https://translate.google.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.63 Safari/537.36"}
r = requests.get("https://translate.google.com/translate_tts?ie=UTF-8&tl=%s&q=%s&total=%s&idx=%s&client=t&prev=input" % (
lang, val, len(combined_text), idx), headers=headers)
combined_sound.append(r.content)
except:
log.err()
return self.create_response(request, {'status': 'failure'}, HttpNotModified)
self.log_throttled_access(request)
return HttpResponse(''.join(combined_sound), content_type="audio/mpeg", mimetype="audio/mpeg")
def tts_pico(self, request, **kwargs):
import uuid
self.method_check(request, allowed=['post', 'get'])
self.is_authenticated(request)
self.throttle_check(request)
message = request.POST.get("message")
lang = request.POST.getlist("lang")
from tastypie.http import HttpAccepted, HttpNotModified
from django.http import HttpResponse
from subprocess import call, Popen
combined_sound = []
temp = dir_path + "/tmp/" + str(uuid.uuid4()) + ".wav"
language = str(lang[0])+'-'+str(lang[0]).upper()
command = ['pico2wave', '-w', temp, '-l', language, '--', message]
try:
call(command)
#combined_sound.append(content)
except OSError:
log.err()
return self.create_response(request, { 'status' : 'failure' }, HttpNotModified)
f = open(temp,"rb")
combined_sound.append(f.read())
f.close()
os.remove(temp)
self.log_throttled_access(request)
return HttpResponse(''.join(combined_sound), content_type="audio/mpeg", mimetype="audio/mpeg")
def engine_reload(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
try:
LisaFactorySingleton.get().LisaReload()
except:
log.err()
return self.create_response(request, { 'status' : 'failure' }, HttpNotModified)
self.log_throttled_access(request)
return self.create_response(request, { 'status': 'success', 'log': "L.I.S.A Engine reloaded"}, HttpAccepted)
def witintents(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.wit = Wit(configuration['wit_server_token'])
from tastypie.http import HttpAccepted, HttpNotModified
try:
intents = self.wit.get_intents()
except:
log.err()
return self.create_response(request, { 'status' : 'failure' }, HttpNotModified)
self.log_throttled_access(request)
return self.create_response(request, { 'status': 'success', 'intents': intents}, HttpAccepted)
def scheduler_reload(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
try:
LisaFactorySingleton.get().SchedReload()
except:
log.err()
return self.create_response(request, { 'status' : 'failure' }, HttpNotModified)
self.log_throttled_access(request)
return self.create_response(request, {'status': 'success', 'log': 'L.I.S.A Task Scheduler reloaded'},
HttpAccepted)
def configuration(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
from tastypie.http import HttpAccepted, HttpNotModified
self.log_throttled_access(request)
copyconfiguration = configuration
copyconfiguration['database'] = None
return self.create_response(request, {'configuration': configuration}, HttpAccepted)
def version(self, request, **kwargs):
from tastypie.http import HttpAccepted, HttpNotModified
from pkg_resources import get_distribution
import requests
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
local_version = get_distribution('lisa-server').version
should_upgrade = False
r = requests.get('https://pypi.python.org/pypi/lisa-server/json')
if r.status_code == requests.codes.ok:
remote_version = r.json()['info']['version']
else:
return self.create_response(request, {'status': 'fail', 'log': 'Problem contacting pypi.python.org'}, HttpAccepted)
if remote_version > local_version:
should_upgrade = True
response = {
'local_version': get_distribution('lisa-server').version,
'remote_version': remote_version,
'should_upgrade': should_upgrade
}
return self.create_response(request, response, HttpAccepted)
def get_object_list(self, request):
return [Lisa()] | {
"content_hash": "204a0526a7753b4b3bd0b953abb9cb3a",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 148,
"avg_line_length": 40.69827586206897,
"alnum_prop": 0.5190990609334181,
"repo_name": "Seraf/LISA",
"id": "24de8f980fee24cb928afd03ecf4c108f4735de8",
"size": "14163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lisa/server/web/weblisa/api/apilisa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "222369"
},
{
"name": "HTML",
"bytes": "9114"
},
{
"name": "JavaScript",
"bytes": "161574"
},
{
"name": "Python",
"bytes": "121284"
},
{
"name": "Shell",
"bytes": "275"
},
{
"name": "Smarty",
"bytes": "3304"
}
],
"symlink_target": ""
} |
"""
Test signal reporting when debugging with linux core files.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LinuxCoreThreadsTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
_i386_pid = 5193
_x86_64_pid = 5222
# Thread id for the failing thread.
_i386_tid = 5195
_x86_64_tid = 5250
@skipIf(oslist=['windows'])
@skipIf(triple='^mips')
def test_i386(self):
"""Test that lldb can read the process information from an i386 linux core file."""
self.do_test("linux-i386", self._i386_pid, self._i386_tid)
@skipIf(oslist=['windows'])
@skipIf(triple='^mips')
def test_x86_64(self):
"""Test that lldb can read the process information from an x86_64 linux core file."""
self.do_test("linux-x86_64", self._x86_64_pid, self._x86_64_tid)
def do_test(self, filename, pid, tid):
target = self.dbg.CreateTarget("")
process = target.LoadCore(filename + ".core")
self.assertTrue(process, PROCESS_IS_VALID)
self.assertEqual(process.GetNumThreads(), 3)
self.assertEqual(process.GetProcessID(), pid)
for thread in process:
# Verify that if we try to read memory from a PT_LOAD that has
# p_filesz of zero that we don't get bytes from the next section
# that actually did have bytes. The addresses below were found by
# dumping the program headers of linux-i386.core and
# linux-x86_64.core and verifying that they had a p_filesz of zero.
mem_err = lldb.SBError()
if process.GetAddressByteSize() == 4:
bytes_read = process.ReadMemory(0x8048000, 4, mem_err)
else:
bytes_read = process.ReadMemory(0x400000, 4, mem_err)
self.assertEqual(bytes_read, None)
reason = thread.GetStopReason()
if( thread.GetThreadID() == tid ):
self.assertEqual(reason, lldb.eStopReasonSignal)
signal = thread.GetStopReasonDataAtIndex(1)
# Check we got signal 4 (SIGILL)
self.assertEqual(signal, 4)
else:
signal = thread.GetStopReasonDataAtIndex(1)
# Check we got no signal on the other threads
self.assertEqual(signal, 0)
self.dbg.DeleteTarget(target)
| {
"content_hash": "4019bf71336b1170a990c27949dc2e8d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 93,
"avg_line_length": 37.04477611940298,
"alnum_prop": 0.6188557614826753,
"repo_name": "endlessm/chromium-browser",
"id": "6e34c914ae289c40028922ad696a60aa938062b3",
"size": "2482",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/functionalities/postmortem/elf-core/thread_crash/TestLinuxCoreThreads.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''
Created on 16/03/2011
@author: team A
'''
import time
from sys import stdout
import os.path
import csv
import os
class StreamWatcher:
def __init__(self, _sched ,
_downloader,
_storagewrapper ,
_config,_downmeasure):
self.sched = _sched #rawserver tasks queue Instance
self.downloader = _downloader #downloader Instance
self.storagewrapper = _storagewrapper #StorageWrapper Instance
self.config = _config #Config Dictionary (Olds Argv)
self.downmeasure = _downmeasure
self.init() #Initialize self Instance
def init (self):
self.rate = int(self.config['rate'])
self.delay = int(self.config['delay'])
self.prefetchT = int(self.config['prefetchT'])
self.total_dfs = 0 #Total No. of bytes taken from server
self.p2p = 0 #Total No. of bytes downloaded so far
self.piece_size = self.storagewrapper.piece_size #"this" torrent piece size in Bytes
self.total = self.storagewrapper.total_length #"this" torrent total size in Bytes
self.numOfPieces = len(self.storagewrapper.have) #"this" torrent number of pieces
self.numOfFullPiecesFromServer = 0
self.numOfDirtyPiecesFromServer = 0
self.numOFFaildHashChecksFromServer = 0
self.startTime = time.time() #StreamWatcher Start Time
self.gap=int(self.config['gap'])
self.init_csv(self.config['out_dir']+'statistics-order-'+self.config['order']+'-gap-'+str(self.gap)+'.csv')
self.prefetch = int(((float(self.prefetchT) / 100)*self.toKbytes(self.total))/self.rate)
def init_csv (self, csv):
self.csvFile = csv
try:
if (os.path.exists(self.csvFile)):
os.remove(self.csvFile)
except ValueError:
return
def verify_vod_rate(self):
((self.prefetchT / 100)*self.total)/self.rate
t = int(time.time() - self.startTime)
Orig = int(((t - self.delay) * self.rate) / self.toKbytes(self.piece_size))
Dest = int(((t - self.delay + self.prefetch ) * self.rate) / self.toKbytes(self.piece_size))
#Orig & Dest are pieces indexes
#(Calculation is done in KB to match self.rate which is given in KB)
if Dest>self.numOfPieces-1:
Dest = self.numOfPieces - 1
if Orig > Dest:
Orig = Dest
if (not self.storagewrapper.am_I_complete()):
#Loop over the gap [Orig,Dest] to check this peer 'have' list:
for i in range(Orig,Dest+1):
#Case 1 : piece wasn't downloaded at all till now and no pending request for it also:
if (self.storagewrapper.is_unstarted(i)):
self.total_dfs += self.piece_size
self.numOfFullPiecesFromServer +=1
#Case 2 : piece is in the middle of a download from some peer\seed:
else:
if ((not self.storagewrapper.do_I_have(i)) and self.storagewrapper.dirty.has_key(i)):
holes = self.get_dirty_holes (self.storagewrapper.dirty[i])
if (holes):
self.cancel_piece_download(i)
j = iter(holes)
counter = len(holes)
while(counter>1):
chunk = j.next()
self.total_dfs += chunk[1]
counter-=1
chunk = j.next()
self.total_dfs += chunk[1]
self.numOfDirtyPiecesFromServer +=1
dfs = (self.total_dfs*100)/self.total
self.stats2csv(dfs, self.p2p)
print'ZZZ Dest = ',Dest,'Orig = ',Orig,'self.numOfPieces=',self.numOfPieces
if(Dest == (self.numOfPieces-1)):
order = int(self.config['group_size']) - int(self.config['order'])
while(order>0):
gap = self.gap
while(gap>0):
self.stats2csv(dfs, self.p2p)
gap = gap-1
order = order-1
if self.config['verbose']:
os.system("./run_all.sh stop")
else:
self.sched(self.verify_vod_rate, self.prefetch)
def get_dirty_holes(self,dirty):
if (not dirty):
return None
try:
holes = []
chunk_size = dirty[0][1]
j=0
while (j!=self.piece_size):
h = (j,chunk_size)
try:
dirty.index(h)
except ValueError:
holes.append(h)
j+=chunk_size
return holes
except ValueError:
return None
def cancel_piece_download(self,index):
pieceToCnacel = []
pieceToCnacel.append(index)
#Cancel all pending downloading request for this piece:
self.downloader.cancel_piece_download(pieceToCnacel)
def display(self) :
t = int(time.time() - self.startTime)
cur_piece = int(((t - self.delay) * self.rate) / self.toKbytes(self.piece_size))
if self.config['verbose']:
print '--------------------------------StreamWatcher-------------------------------------\r'
print 'Csv stats: ', self.csvFile,'\r'
print 'DFS is: ', self.total_dfs ,'bytes\r'
print 'DFS/Total is: ', (self.total_dfs*100)/self.total ,'%\r'
print 'FullPieces: ', self.numOfFullPiecesFromServer ,'/',self.numOfPieces ,'\r'
print 'DirtyPieces: ', self.numOfDirtyPiecesFromServer ,'/',self.numOfPieces,'\r'
print 'FaildHashChecks: ', self.numOFFaildHashChecksFromServer,'\r'
print 'Prefetching ', self.config['prefetchT'],'%\r'
if cur_piece < 0:
cur_piece = 0
print 'Playing point: ', cur_piece,'/',self.numOfPieces,'(',int(((cur_piece*100)/self.numOfPieces)),'%)\r'
stdout.flush()
def stats2csv(self,dfs,p2p):
try:
if (not os.path.exists(self.csvFile)):
FcsvWriter = csv.writer(open(self.csvFile, 'wb'))
FcsvWriter.writerow(['alg','dfs','p2p'])
no_data=0
order = int(self.config['order'])-1
while(order>0):
gap=self.gap
while(gap>0):
FcsvWriter.writerow([self.config['alg'],no_data,no_data])
gap = gap-1
order = order-1
FcsvWriter.writerow([self.config['alg'],dfs,p2p])
else:
FcsvWriter = csv.writer(open(self.csvFile, 'a'))
FcsvWriter.writerow([self.config['alg'],dfs,p2p])
except (IOError, OSError), e:
print "IO Error:" + str(e)
def toBytes (self,x):
return int(x*1024)
def toKbytes (self,x):
return int(x/1024)
| {
"content_hash": "c5acb74c409669538575a403905e7419",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 119,
"avg_line_length": 43.941520467836256,
"alnum_prop": 0.49653979238754326,
"repo_name": "ArthurWulfWhite/team-Mea-peer2peer",
"id": "25c3f7793826cd4169e129954d877cda34445b6f",
"size": "7536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BitTornado/StreamWatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "667211"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from decimal import Decimal
import random
from .. import BaseProvider
from .. import date_time
localized = True
class Provider(BaseProvider):
city_suffixes = ['Ville', ]
street_suffixes = ['Street', ]
city_formats = ('{{first_name}} {{city_suffix}}', )
street_name_formats = ('{{last_name}} {{street_suffix}}', )
street_address_formats = ('{{building_number}} {{street_name}}', )
address_formats = ('{{street_address}} {{postcode}} {{city}}', )
building_number_formats = ('##', )
postcode_formats = ('#####', )
countries = [tz['name'] for tz in date_time.Provider.countries]
@classmethod
def city_suffix(cls):
"""
:example 'town'
"""
return cls.random_element(cls.city_suffixes)
@classmethod
def street_suffix(cls):
"""
:example 'Avenue'
"""
return cls.random_element(cls.street_suffixes)
@classmethod
def building_number(cls):
"""
:example '791'
"""
return cls.numerify(cls.random_element(cls.building_number_formats))
def city(self):
"""
:example 'Sashabury'
"""
pattern = self.random_element(self.city_formats)
return self.generator.parse(pattern)
def street_name(self):
"""
:example 'Crist Parks'
"""
pattern = self.random_element(self.street_name_formats)
return self.generator.parse(pattern)
def street_address(self):
"""
:example '791 Crist Parks'
"""
pattern = self.random_element(self.street_address_formats)
return self.generator.parse(pattern)
@classmethod
def postcode(cls):
"""
:example 86039-9874
"""
return cls.bothify(cls.random_element(cls.postcode_formats)).upper()
def address(self):
"""
:example '791 Crist Parks, Sashabury, IL 86039-9874'
"""
pattern = self.random_element(self.address_formats)
return self.generator.parse(pattern)
@classmethod
def country(cls):
return cls.random_element(cls.countries)
@classmethod
def geo_coordinate(cls, center=None, radius=0.001):
"""
Optionally center the coord and pick a point within radius.
"""
if not center:
return Decimal(str(random.randint(-180000000, 180000000) / 1000000.0)).quantize(Decimal('.000001'))
else:
geo = random.uniform(center - radius, center + radius)
return Decimal(str(geo))
@classmethod
def latitude(cls):
# Latitude has a range of -90 to 90, so divide by two.
return cls.geo_coordinate() / 2
@classmethod
def longitude(cls):
return cls.geo_coordinate()
| {
"content_hash": "1f8d87efa5287f2ad36524a79f678315",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 111,
"avg_line_length": 28.07,
"alnum_prop": 0.5910224438902744,
"repo_name": "jaredculp/faker",
"id": "614a6f57a9438b0237a442993b2ca646459cbc2e",
"size": "2822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faker/providers/address/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1269641"
}
],
"symlink_target": ""
} |
"""
http://www.spoj.com/problems/ONP/
4. Transform the Expression
Problem code: ONP
Transform the algebraic expression with brackets into
RPN form (Reverse Polish Notation). Two-argument operators:
+, -, *, /, ^ (priority from the lowest to the highest), brackets ( ).
Operands: only letters: a,b,...,z.
Assume that there is only one RPN form (no expressions like a*b*c).
Input
t [the number of expressions <= 100]
expression [length <= 400]
[other expressions]
Text grouped in [ ] does not appear in the input file.
Output
The expressions in RPN form, one per line.
Example
Input:
3
(a+(b*c))
((a+b)*(z+x))
((a+t)*((b+(a+c))^(c+d)))
Output:
abc*+
ab+zx+*
at+bac++cd+^*
"""
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
precedence = {"^": 4, "*": 3, "/": 3, "+": 2, "-": 2, "(": 1}
alphabet = "abcdefghijklmnopqrstuvwxyz"
numbers = "0123456789"
def infixToRPN(expression):
"""Convert infix notation to reverse polish notion"""
stack = Stack()
RPNList = []
tokens = expression.split()
spaces = True
# If no spaces in expression then push each char in a tokens list
if len(tokens) == 1:
spaces = False
tokens = [char for char in expression]
for token in tokens:
if token in alphabet or token in numbers:
RPNList.append(token)
elif token == '(':
stack.push(token)
elif token == ')':
top = stack.pop()
while top != '(':
RPNList.append(top)
top = stack.pop()
else:
while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):
RPNList.append(stack.pop())
stack.push(token)
while not stack.isEmpty():
RPNList.append(stack.pop())
if spaces:
return " ".join(RPNList)
else:
return "".join(RPNList)
t = int(raw_input())
while t > 0:
print infixToRPN(raw_input())
t -= 1
| {
"content_hash": "5340cd44f68ff2c31c7850d29b230c9b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 20.92,
"alnum_prop": 0.624282982791587,
"repo_name": "CreaturePhil/Faith",
"id": "5d63d8a9f9eaae9bf754a247bf13ac3e9900c905",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "spoj/python/04-ONP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13631"
},
{
"name": "CSS",
"bytes": "12066"
},
{
"name": "HTML",
"bytes": "4533"
},
{
"name": "Haskell",
"bytes": "505"
},
{
"name": "JavaScript",
"bytes": "474"
},
{
"name": "Python",
"bytes": "5082"
},
{
"name": "Rust",
"bytes": "844"
}
],
"symlink_target": ""
} |
from books.tests.pagination import PaginationTestCase as pagination
from books.tests.meta import MetaTestCase as meta
from books.tests.json_builder import JSONBuilderTestCase as builder
from books.tests.api import ApiTestCase as api
from books.tests.book_store import BookStoreTestCase
from books.tests.resource_methods import ResourceHttpMethodsTestCase as httpmethods
from books.tests.forms import FormTestCase as forms
from books.tests.mobile_driven_resource import MobileDrivenResourceTestCase as mobile
from books.tests.params import *
| {
"content_hash": "eedec6465f070b4c462994cf65eb426c",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 85,
"avg_line_length": 60.111111111111114,
"alnum_prop": 0.8632162661737524,
"repo_name": "laginha/yard",
"id": "f44675f94ac51ea5f16714d77fcc5f92dcb5263d",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/store/books/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45582"
},
{
"name": "HTML",
"bytes": "4657"
},
{
"name": "JavaScript",
"bytes": "329249"
},
{
"name": "Python",
"bytes": "155154"
}
],
"symlink_target": ""
} |
from cointk.backtest import backtest
from cointk.strategies import NaiveStrategy
import random
random.seed(1)
strategy = NaiveStrategy(n_prices=1000, threshold=0.8)
backtest(strategy)
| {
"content_hash": "3f225c7c34bffb20c9e6b6620ba137f0",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 54,
"avg_line_length": 23.25,
"alnum_prop": 0.8225806451612904,
"repo_name": "CoinTK/CoinTK",
"id": "671dc4dbfd1f4b9f0957950766d1c6d10faf910a",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_backtests/naive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40451"
}
],
"symlink_target": ""
} |
"""
Challenge 12:
- Write an application that will create a route in mailgun so that when an
email is sent to <YourSSO>@apichallenges.mailgun.org it calls your
Challenge 1 script that builds 3 servers.
- Assumptions:
Assume that challenge 1 can be kicked off by accessing
http://cldsrvr.com/challenge1
- We have an internal mailgun account for this challenge.
"""
import os
import sys
import string
import pyrax
import pyrax.exceptions as exc
import time
print("Ah, no time...")
sys.exit(0)
| {
"content_hash": "145c4c5aaf390dbc2e00d340928fbf27",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 25.3,
"alnum_prop": 0.7529644268774703,
"repo_name": "theneykov/rax-api-scripts",
"id": "74aeb3ec84f198596de5005140977c6d5a0a2c5a",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "challenge12.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49748"
}
],
"symlink_target": ""
} |
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
"""
import contextlib
import errno
import functools
import glob
import mmap
import os
import random
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid
from xml.dom import minidom
import eventlet
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from eventlet import util as eventlet_util
from lxml import etree
from oslo.concurrency import processutils
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import encodeutils
from oslo.utils import excutils
from oslo.utils import importutils
from oslo.utils import strutils
from oslo.utils import timeutils
from oslo.utils import units
import six
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import flavors
from nova.compute import hvtype
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
from nova import rpc
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import dmcrypt
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue")
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.'
'LibvirtFibreChannelVolumeDriver',
'scality='
'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='DEPRECATED. Libvirt handlers for remote volumes. '
'This option is deprecated and will be removed in the '
'Kilo release.'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS. Permitted '
'options are "hardware", "os", "none" or "auto" '
'(default).'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.')
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('cipher', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('key_size', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.hardware')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('proxyclient_address', 'nova.console.serial',
group='serial_console')
CONF.import_opt('hw_disk_discard', 'nova.virt.libvirt.imagebackend',
group='libvirt')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = 'None'
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0"
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
# TODO(sdague): this should be 1.0.0, but hacked to set 1.3.0 until
# https://bugs.launchpad.net/nova/+bug/1334398
# can be diagnosed & resolved
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 3, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7)
# libvirt discard feature
MIN_LIBVIRT_DISCARD_VERSION = (1, 0, 6)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
REQ_HYPERVISOR_DISCARD = "QEMU"
# libvirt numa topology support
MIN_LIBVIRT_NUMA_TOPOLOGY_VERSION = (1, 0, 4)
def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._skip_list_all_domains = False
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._caps = None
self._vcpu_total = 0
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver(
self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt.volume_drivers, self)
self.dev_filter = pci_whitelist.get_pci_devices_filter()
self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
self._image_api = image.API()
self._events_delayed = {}
# Note(toabctl): During a reboot of a Xen domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
if CONF.libvirt.virt_type == "xen":
self._lifecycle_delay = 15
else:
self._lifecycle_delay = 0
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
if not self._sysinfo_serial_func:
raise exception.NovaException(
_("Unexpected sysinfo_serial setting '%(actual)s'. "
"Permitted values are %(expect)s'") %
{'actual': CONF.libvirt.sysinfo_serial,
'expect': ', '.join("'%s'" % k for k in
sysinfo_serial_funcs.keys())})
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
@staticmethod
def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def _has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._conn_has_min_version(self._conn, lv_ver, hv_ver, hv_type)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self.queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_delayed_cleanup(event)
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = last_close_event['reason']
_error = _("Connection to libvirt lost: %s") % reason
LOG.warn(_error)
self._wrapped_conn = None
# Disable compute service to avoid
# new instances of being scheduled on this host.
self._set_host_enabled(False, disable_reason=_error)
def _event_delayed_cleanup(self, event):
"""Cleanup possible delayed stop events."""
if (event.transition == virtevent.EVENT_LIFECYCLE_STARTED or
event.transition == virtevent.EVENT_LIFECYCLE_RESUMED):
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
if self._lifecycle_delay > 0:
if event.uuid not in self._events_delayed.keys():
id_ = greenthread.spawn_after(self._lifecycle_delay,
self.emit_event, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self.emit_event(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = eventlet_util.__original_socket__(socket.AF_INET,
socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
event_thread = native_threading.Thread(target=self._native_thread)
event_thread.setDaemon(True)
event_thread.start()
LOG.debug("Starting green dispatch thread")
eventlet.spawn(self._dispatch_thread)
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._get_host_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (arch.I686, arch.X86_64)):
LOG.warn(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def init_host(self, host):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._do_quality_warnings()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warn(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
raise exception.NovaException(
_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.') %
{'major': major, 'minor': minor, 'micro': micro})
self._init_events()
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self.uri())
wrapped_conn = None
try:
wrapped_conn = self._connect(self.uri(), self.read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = DISABLE_REASON_UNDEFINED
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
self._set_host_enabled(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
self._skip_list_all_domains = False
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self.uri(), 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self.uri(), 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
_conn = property(_get_connection)
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
LibvirtDriver._connect_auth_cb,
None]
try:
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance.name)
return True
except exception.NovaException:
return False
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self._conn.listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self._conn.numOfDomains() > 0:
for id in self._conn.listDomainsID():
try:
dom = self._lookup_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self._conn.listDefinedDomains():
try:
dom = self._lookup_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def _list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def list_instances(self):
names = []
for dom in self._list_instance_domains(only_running=False):
names.append(dom.name())
return names
def list_instance_uuids(self):
uuids = []
for dom in self._list_instance_domains(only_running=False):
uuids.append(dom.UUIDString())
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.warning(_LW("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags."
" Retrying with undefine", instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
if destroy_disks or (
migrate_data and migrate_data.get('is_shared_block_storage',
False)):
self._delete_instance_files(instance)
if CONF.serial_console.enabled:
for host, port in self._get_serial_ports_from_instance(instance):
serial_console.release_port(host=host, port=port)
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance):
"""Detaches encrypted volumes attached to instance."""
disks = jsonutils.loads(self.get_instance_disk_info(instance['name']))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_instance(self, instance, mode=None):
"""Returns an iterator over serial port(s) configured on instance.
:param mode: Should be a value in (None, bind, connect)
"""
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
for serial in tree.findall("./devices/serial"):
if serial.get("type") == "tcp":
source = serial.find("./source")
if source is not None:
if mode and source.get("mode") != mode:
continue
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['uuid']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug('Could not determine iscsi initiator name',
instance=instance)
if not self._fc_wwnns:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide node names',
instance=instance)
if not self._fc_wwpns:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide port names',
instance=instance)
connector = {'ip': CONF.my_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
# NOTE(wangpan): we get the pre-grizzly instance path firstly,
# so the backup dir of pre-grizzly instance can
# be deleted correctly with grizzly or later nova.
pre_grizzly_name = libvirt_utils.get_instance_path(instance,
forceold=True)
target = pre_grizzly_name + '_resize'
if not os.path.exists(target):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def _connect_volume(self, connection_info, disk_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
return driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
instance_name = instance.name
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
LOG.exception(_('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, domain, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while self._wait_for_block_job(domain, disk_path,
wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
instance_name = instance.name
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warn(_LW("During detach_volume, instance disappeared."))
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_LW("During detach_volume, instance disappeared."))
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
flavor, CONF.libvirt.virt_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, flavor,
CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if instance['os_type']:
metadata['properties']['os_type'] = instance['os_type']
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
base_image_ref = instance['image_ref']
base = compute_utils.get_image_metadata(
context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT)
and source_format not in ('lvm', 'rbd')
and not CONF.ephemeral_storage_encryption.enabled):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(instance, virt_dom)
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(instance,
disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(virt_dom, disk_path, out_path,
image_format)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, new_dom)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
LOG.info(_LI("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, domain,
volume_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'))
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self._has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._lookup_by_name(instance.name)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s" % xml)
LOG.debug("active disk object: %s" % active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s' % b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s' % (b.source_name, index))
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge'] # often None
if active_protocol is not None:
rebase_base = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
bandwidth = 0
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s '
% {'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top})
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, commit_flags)
if result == 0:
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._lookup_by_name(instance["name"])
state = LIBVIRT_POWER_STATE[dom.info()[0]]
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(system_meta)
if not image_meta:
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context,
self._image_api,
image_ref,
instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta=image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self._get_instance_disk_info(instance['name'], xml,
block_device_info)
instance_dir = libvirt_utils.get_instance_path(instance)
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
dom = self._lookup_by_name(instance["name"])
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
dom.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
dom.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
dom = self._lookup_by_name(instance.name)
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(instance, dom)
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(context, xml, instance,
network_info, block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, dom, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
domain = self._lookup_by_name(instance.name)
state = LIBVIRT_POWER_STATE[domain.info()[0]]
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if image_meta is not None:
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
None,
image_meta,
rescue=True)
self._create_image(context, instance, disk_info['mapping'],
suffix='.rescue', disk_images=rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance.name)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self._get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, disk_info=disk_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
virt_dom = self._lookup_by_name(instance.name)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'spice':
return (graphic.getAttribute('port'),
graphic.getAttribute('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance['name'])
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
for host, port in self._get_serial_ports_from_instance(
instance, mode='bind'):
return ctype.ConsoleSerial(host=host, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, context=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance['kernel_id']:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance['key_data'])
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(
instance,
'disk' + suffix,
image_type)
img_id = instance['image_ref']
if not injection_image.check_image_exists():
LOG.warn(_LW('Image %s not found on disk storage. '
'Continue without injecting data'),
injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True):
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
inst_type = flavors.extract_flavor(instance)
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance['root_gb'] * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
backend.cache(fetch_func=fetch_func,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
instance['os_type'])
ephemeral_gb = instance['ephemeral_gb']
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self._has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self._get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self._get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': dom.ID()})
raise
def _prepare_args_for_get_config(self, context, instance):
with utils.temporary_mutation(context, read_deleted="yes"):
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
image_ref = instance['image_ref']
image_meta = compute_utils.get_image_metadata(
context, self._image_api, image_ref, instance)
return flavor, image_meta
@staticmethod
def _has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
return True
return False
def _attach_sriov_ports(self, context, instance, dom, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
flavor, image_meta = self._prepare_args_for_get_config(context,
instance)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
flavor,
CONF.libvirt.virt_type)
LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
{'port': vif, 'dom': dom.ID()})
dom.attachDevice(cfg.to_xml())
def _detach_sriov_ports(self, instance, dom):
network_info = instance.info_cache.network_info
if network_info is None:
return
context = nova_context.get_admin_context()
if self._has_sriov_port(network_info):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if it's an older version
if not self._has_min_version(
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
reason = (_("Detaching SR-IOV ports with"
" libvirt < %(ver)s is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=network_info)
flavor, image_meta = self._prepare_args_for_get_config(context,
instance)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
flavor,
CONF.libvirt.virt_type)
dom.detachDeviceFlags(cfg.to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host: %s,'
'since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host: %s,'
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
"""
if not self._caps:
xmlstr = self._conn.getCapabilities()
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self._conn.baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
else:
raise
return self._caps
def _get_host_uuid(self):
"""Returns a UUID representing the host."""
caps = self._get_host_capabilities()
return caps.host.uuid
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image, guest_cpu_numa):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.VirtCPUTopology.get_best_config(flavor,
image)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._has_min_version(MIN_LIBVIRT_DISCARD_VERSION,
MIN_QEMU_DISCARD_VERSION,
REQ_HYPERVISOR_DISCARD):
msg = (_('Volume sets discard option, but libvirt %(libvirt)s'
' or later is required, qemu %(qemu)s'
' or later is required.') %
{'libvirt': MIN_LIBVIRT_DISCARD_VERSION,
'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._get_hypervisor_version())
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save(nova_context.get_admin_context())
for d in devices:
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
return self._get_host_uuid()
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
return str(uuid.UUID(f.read().split()[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance['uuid']
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance, flavor):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance["display_name"]
meta.creationTime = time.time()
if instance["image_ref"] not in ("", None):
meta.roottype = "image"
meta.rootid = instance["image_ref"]
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
mach_type = image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == arch.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == arch.AARCH64:
mach_type = "virt"
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warn(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warn(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _get_cpu_numa_config_from_instance(self, context, instance):
# TODO(ndipanov): Remove this check once the test_virt_drivers.py
# is using objects for all calls to _get_running_instance, as this
# will confirm all code paths are using objects.
if isinstance(instance, objects.Instance):
instance_topology = instance.numa_topology
else:
try:
instance_topology = (
objects.InstanceNUMATopology.get_by_instance_uuid(
context or nova_context.get_admin_context(),
instance['uuid']))
except exception.NumaTopologyNotFound:
return
if instance_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _get_guest_numa_config(self, context, instance, flavor,
allowed_cpus=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa = self._get_cpu_numa_config_from_instance(
context, instance)
if not guest_cpu_numa:
# No NUMA topology defined for instance
vcpus = flavor.vcpus
memory = flavor.memory_mb
if topology:
# Host is NUMA capable so try to keep the instance in a cell
viable_cells_cpus = []
for cell in topology.cells:
if vcpus <= len(cell.cpuset) and memory <= cell.memory:
viable_cells_cpus.append(cell.cpuset)
if not viable_cells_cpus:
# We can't contain the instance in a cell - do nothing for
# now.
# TODO(ndipanov): Attempt to spread the instance accross
# NUMA nodes and expose the topology to the instance as an
# optimisation
return allowed_cpus, None, None, None
else:
pin_cpuset = random.choice(viable_cells_cpus)
return pin_cpuset, None, None, None
else:
# We have no NUMA topology in the host either
return allowed_cpus, None, None, None
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = []
for host_cell in topology.cells:
for guest_cell in guest_cpu_numa.cells:
if guest_cell.id == host_cell.id:
node = vconfig.LibvirtConfigGuestNUMATuneMemNode()
node.cellid = guest_cell.id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_memnodes.append(node)
numa_mem.nodeset.append(host_cell.id)
for cpu in guest_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
pin_cpuset.cpuset = host_cell.cpuset
guest_cpu_tune.vcpupin.append(pin_cpuset)
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
return None, guest_cpu_tune, guest_cpu_numa, guest_numa_tune
else:
return allowed_cpus, None, guest_cpu_numa, None
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = vm_mode.EXE
elif virt_type == "uml":
ret = vm_mode.UML
elif virt_type == "xen":
ret = vm_mode.XEN
else:
ret = vm_mode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
def _set_kvm_timers(self, vconfig, clk, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.I686, arch.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
def _create_serial_console_devices(self, guest, instance, flavor,
image_meta):
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
flavor, image_meta)
for port in six.moves.range(num_ports):
console = vconfig.LibvirtConfigGuestSerial()
console.port = port
console.type = "tcp"
console.listen_host = (
CONF.serial_console.proxyclient_address)
console.listen_port = (
serial_console.acquire_port(
console.listen_host))
guest.add_device(console)
else:
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
def _add_video_driver(self, guest, image_meta, img_meta_prop, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif guestarch in (arch.PPC, arch.PPC64):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance['name']))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, img_meta_prop):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no')
if hw_qga.lower() == 'yes':
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
self._add_qga_device(guest, instance)
rng_is_virtio = img_meta_prop.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = rng_allowed_str.lower() == 'true'
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
context = context or nova_context.get_admin_context()
with utils.temporary_mutation(context, read_deleted="yes"):
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = virt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
cpuset, cputune, guest_cpu_numa, guest_numa_tune = \
self._get_guest_numa_config(
context, instance, flavor, allowed_cpus)
guest.cpuset = cpuset
guest.cputune = cputune
guest.numatune = guest_numa_tune
guest.metadata.append(self._get_guest_config_meta(context,
instance,
flavor))
guest.idmaps = self._get_guest_idmaps()
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_cpu_numa)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
guest.os_type = self._get_guest_os_type(virt_type)
caps = self._get_host_capabilities()
if virt_type == "xen":
if guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
# PAE only makes sense in X86
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.pae = True
if virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
else:
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance['kernel_id']:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
if virt_type not in ("lxc", "uml"):
guest.acpi = guest.apic = True
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if instance['os_type'] == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'), instance=instance)
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(vconfig, clk, image_meta)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor)
for config in storage_configs:
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type)
guest.add_device(config)
if virt_type in ("qemu", "kvm"):
# Create the serial console char devices
self._create_serial_console_devices(guest, instance, flavor,
image_meta)
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
#
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if CONF.spice.enabled and CONF.spice.agent_enabled and \
virt_type not in ('lxc', 'uml', 'xen'):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc_enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if CONF.spice.enabled and \
virt_type not in ('lxc', 'uml', 'xen'):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, img_meta_prop, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, img_meta_prop)
if virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_manager.get_instance_pci_devs(instance)) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
if 'hw_watchdog_action' in flavor.extra_specs:
LOG.warn(_LW('Old property name "hw_watchdog_action" is now '
'deprecated and will be removed in the next release. '
'Use updated property name '
'"hw:watchdog_action" instead'))
# TODO(pkholkin): accepting old property name 'hw_watchdog_action'
# should be removed in the next release
watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or
flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_ref = instance['image_ref']
image_meta = compute_utils.get_image_metadata(
context, self._image_api, image_ref, instance)
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def _lookup_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
try:
dom_info = virt_dom.info()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance['name'])
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance['name'],
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
return hardware.InstanceInfo(state=LIBVIRT_POWER_STATE[dom_info[0]],
max_mem_kb=dom_info[1],
mem_kb=dom_info[2],
num_cpu=dom_info[3],
cpu_time_ns=dom_info[4],
id=virt_dom.ID())
def _create_domain_setup_lxc(self, instance, block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
disk_info = disk_info or {}
disk_mapping = disk_info.get('mapping', [])
if self._is_booted_from_volume(instance, disk_mapping):
root_disk = block_device.get_root_bdm(block_device_mapping)
disk_path = root_disk['connection_info']['data']['device_path']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
# Get the system metadata from the instance
system_meta = utils.instance_sys_meta(instance)
use_cow = system_meta['image_disk_format'] == 'qcow2'
else:
image = self.image_backend.image(instance, 'disk')
disk_path = image.path
use_cow = CONF.use_cow_images
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk.setup_container(disk_path,
container_dir=container_dir,
use_cow=use_cow)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, block_device_info, disk_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
err = None
try:
if xml:
err = _LE('Error defining a domain with XML: %s') % xml
domain = self._conn.defineXML(xml)
if power_on:
err = _LE('Error launching a defined domain with XML: %s') \
% encodeutils.safe_decode(domain.XMLDesc(0),
errors='ignore')
domain.createWithFlags(launch_flags)
if not utils.is_neutron():
err = _LE('Error enabling hairpin mode with XML: %s') \
% encodeutils.safe_decode(domain.XMLDesc(0),
errors='ignore')
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
return domain
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
reboot=False, vifs_already_plugged=False,
disk_info=None):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, block_device_info,
disk_info):
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance['uuid']})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom in self._list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the XML from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def _get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for dom in self._list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the vpu count from domain id:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
else:
if vcpus is not None and len(vcpus) > 1:
total += len(vcpus[1])
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self._list_instance_domains(only_guests=False):
try:
dom_mem = int(dom.info()[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self._get_memory_mb_total() - avail / units.Ki
def _get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def _get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self._conn.getHostname()
if not hasattr(self, '_hypervisor_hostname'):
self._hypervisor_hostname = hostname
elif hostname != self._hypervisor_hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hypervisor_hostname,
'new': hostname})
return self._hypervisor_hostname
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
arch.canonicalize(g.arch),
hvtype.canonicalize(dt),
vm_mode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self._get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _pci_device_assignable(self, device):
if device['dev_type'] == 'type-PF':
return False
return self.dev_filter.device_assignable(device)
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._conn.listDevices('pci', 0) or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warn(_LW("URI %(uri)s does not support "
"listDevices: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_dev = self._get_pcidev_info(name)
if self._pci_device_assignable(pci_dev):
pci_info.append(pci_dev)
return jsonutils.dumps(pci_info)
def _get_host_numa_topology(self):
if not self._has_min_version(MIN_LIBVIRT_NUMA_TOPOLOGY_VERSION):
return
caps = self._get_host_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
topology = hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(
cell.id, set(cpu.id for cpu in cell.cpus),
cell.memory / units.Ki)
for cell in topology.cells])
allowed_cpus = hardware.get_vcpu_pin_set()
if allowed_cpus:
for cell in topology.cells:
cell.cpuset &= allowed_cpus
return topology
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id)
vol_stats = self.block_stats(instance['name'], mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3],
flush_operations=vol_stats[4])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance_name, disk_id):
"""Note that this function takes an instance name."""
try:
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance_name, 'disk': disk_id,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, iface_id):
"""Note that this function takes an instance name."""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(iface_id)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
# Temporary convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
data["supported_instances"] = jsonutils.dumps(
self._get_instance_capabilities())
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._get_hypervisor_type()
data["hypervisor_version"] = self._get_hypervisor_version()
data["hypervisor_hostname"] = self._get_hypervisor_hostname()
data["cpu_info"] = self._get_cpu_info()
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology.to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns
:tempfile: A dict containing the tempfile info on the destination
host
:None: 1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"image_type": CONF.libvirt.images_type,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.update({'is_shared_instance_path':
self._check_shared_storage_test_file(
dest_check_data['filename'])})
dest_check_data.update({'is_shared_block_storage':
self._is_shared_block_storage(instance, dest_check_data)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'],
block_device_info)
elif not (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (CONF.libvirt.images_type == dest_check_data.get('image_type') and
self.image_backend.backend().is_shared_block_storage()):
return True
if (dest_check_data.get('is_shared_instance_path') and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Raw, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.get('is_volume_backed') and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance['name'])))):
# pylint: disable E1120
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance['uuid'],
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openAuth(...).getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string of cpu feature from _get_cpu_info()
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type == 'xen':
return 1
info = jsonutils.loads(cpu_info)
LOG.info(_LI('Instance launched has CPU info: %s'), cpu_info)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
LOG.error(m, {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _correct_listen_addr(self, old_xml_str, listen_addrs):
# NB(sross): can't just use LibvirtConfigGuest#parse_str
# here b/c it doesn't capture the entire XML
# description
xml_doc = etree.fromstring(old_xml_str)
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return etree.tostring(xml_doc)
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc_enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
raise exception.MigrationError(
_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).'))
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if ((CONF.vnc_enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warn(_('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the '
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# Do live migration.
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance["name"])
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if migratable_flag is None or listen_addrs is None:
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._correct_listen_addr(old_xml_str,
listen_addrs)
try:
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except libvirt.libvirtError as ex:
# NOTE(mriedem): There is a bug in older versions of
# libvirt where the VIR_DOMAIN_XML_MIGRATABLE flag causes
# virDomainDefCheckABIStability to not compare the source
# and target domain xml's correctly for the CPU model.
# We try to handle that error here and attempt the legacy
# migrateToURI path, which could fail if the console
# addresses are not correct, but in that case we have the
# _check_graphics_addresses_can_live_migrate check in place
# to catch it.
# TODO(mriedem): Remove this workaround when
# Red Hat BZ #1141838 is closed.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
LOG.warn(_LW('An error occurred trying to live '
'migrate. Falling back to legacy live '
'migrate flow. Error: %s'), ex,
instance=instance)
self._check_graphics_addresses_can_live_migrate(
listen_addrs)
dom.migrateToURI(
CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
raise
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
self.get_info(instance).state
except exception.InstanceNotFound:
timer.stop()
post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def _fetch_instance_kernel_ramdisk(self, context, instance):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance['kernel_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(context,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
instance_relative_path = None
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
if not (is_shared_instance_path and is_shared_block_storage):
# NOTE(mikal): live migration of instances using config drive is
# not supported because of a bug in libvirt (read only devices
# are not copied by libvirt). See bug/1246201
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if not is_shared_instance_path:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = libvirt_utils.get_instance_path(instance)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if not is_shared_block_storage:
# Ensure images and backing files are present.
self._create_images_and_backing(context, instance,
instance_dir, disk_info)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
# destination and instance path isn't (e.g. volume backed or rbd
# backed instance), instance path on destination has to be prepared
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, vol)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
res_data = {'graphics_listen_addrs': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
return res_data
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance["os_type"],
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance['ephemeral_gb'])
elif cache_name.startswith('swap'):
inst_type = flavors.extract_flavor(instance)
swap_mb = inst_type['swap']
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
image.cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
dk_size = int(os.path.getsize(path))
elif disk_type == 'block':
dk_size = lvm.get_volume_size(path)
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_instance_disk_info(self, instance_name,
block_device_info=None):
try:
dom = self._lookup_by_name(instance_name)
xml = dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
return self._get_instance_disk_info(instance_name, xml,
block_device_info)
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
for dom in self._list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(
self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
) % {'instance_name': dom.name(),
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': dom.name()})
elif e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': dom.name()})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warn(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': dom.name(),
'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._get_hypervisor_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
# Extract node's CPU statistics.
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._conn.getInfo()[3]
return stats
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
# NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
if flavor[kind] < instance[kind]:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info_text = self.get_instance_disk_info(instance['name'],
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if (CONF.libvirt.images_type == 'lvm' and
not self._is_booted_from_volume(instance, disk_info_text)):
reason = "Migration is not supported for LVM backed instances"
raise exception.MigrationPreCheckError(reason)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
active_flavor = flavors.extract_flavor(instance)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, info):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, info, size):
"""Attempts to resize a disk to size
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt, org = [info['type']] * 2
pth = info['path']
if (size and fmt == 'qcow2' and
disk.can_resize_image(pth, size) and
disk.is_image_partitionless(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(pth, size, use_cow=use_cow)
if fmt != org:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(pth)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
if resize_instance:
self._disk_resize(info, size)
if info['type'] == 'raw' and CONF.use_cow_images:
self._disk_raw_to_qcow2(info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info,
image_meta)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance, disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
domain = self._lookup_by_name(instance['name'])
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
(state, max_mem, mem, num_cpu, cpu_time) = domain.info()
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance['launched_at'])
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
num_cpus = len(cputime)
for i in range(num_cpus):
diags.add_cpu(time=cputime[i][2])
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if len(diags.nic_details) > 0:
ret = xml_doc.findall('./devices/interface')
index = 0
for node in ret:
for child in node.getchildren():
if child.tag == 'mac':
diags.nic_details[index].mac_address = child.get(
'address')
return diags
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = objects.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
# removed.
attempts = int(inst_obj.system_metadata.get('clean_attempts', '0'))
success = self.delete_instance_files(inst_obj)
inst_obj.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
inst_obj.cleaned = True
inst_obj.save(context)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
ephemerals, swap, block_device_mapping = block_device_lists[:3]
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping)
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
| {
"content_hash": "b13f3548c05538c9a16b2d4a8195628d",
"timestamp": "",
"source": "github",
"line_count": 6425,
"max_line_length": 79,
"avg_line_length": 43.57463035019455,
"alnum_prop": 0.5432926023424189,
"repo_name": "luzheqi1987/nova-annotation",
"id": "90a725e1fabfc28a88d187b09e8f1316b86b3620",
"size": "280894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15206909"
},
{
"name": "Shell",
"bytes": "18273"
}
],
"symlink_target": ""
} |
from typing import AsyncIterator
from aiogen.utils import AsyncTestCase
from aiogen.agenerator import agenerator, async_yield, async_yield_from
from aiogen.abuiltins import anext
@agenerator
async def ay(start) -> AsyncIterator:
r1 = await async_yield(start)
r2 = await async_yield(r1)
return r2
@agenerator
async def ayf(start) -> AsyncIterator:
r1 = await async_yield_from(ay(start))
r2 = await async_yield_from(ay(r1))
return r2
class TestLoop(AsyncTestCase):
async def test_loop(self):
i = 0
async for val in ay(1):
if i == 0: self.assertEqual(val, 1)
if i == 1: self.assertEqual(val, None)
i += 1
async def test_break(self):
i = 0
async for val in ay(1):
if i == 0: self.assertEqual(val, 1)
if i > 0: break
i += 1
async def test_loop_ayf(self):
i = 0
async for val in ayf(1):
if i == 0: self.assertEqual(val, 1)
if i == 1: self.assertEqual(val, None)
if i == 2: self.assertEqual(val, None)
if i == 3: self.assertEqual(val, None)
i += 1
async def test_break_ayf(self):
i = 0
async for val in ayf(1):
if i == 0: self.assertEqual(val, 1)
if i == 1: self.assertEqual(val, None)
if i == 2: self.assertEqual(val, None)
if i > 2: break
i += 1
class TestASend(AsyncTestCase):
async def test_asend(self):
gen = ay(1)
self.assertEqual(await anext(gen), 1)
self.assertEqual(await gen.asend(2), 2)
with self.assertRaises(StopAsyncIteration) as cm:
await gen.asend(3)
self.assertEqual(cm.exception.args[0], 3)
async def test_asend_ayf(self):
gen = ayf(1)
self.assertEqual(await anext(gen), 1)
self.assertEqual(await gen.asend(2), 2)
self.assertEqual(await gen.asend(3), 3)
self.assertEqual(await gen.asend(4), 4)
with self.assertRaises(StopAsyncIteration) as cm:
await gen.asend(5)
self.assertEqual(cm.exception.args[0], 5)
class TestAThrow(AsyncTestCase):
async def test_athrow_first(self):
gen = ay(1)
with self.assertRaises(ValueError) as cm:
await gen.athrow(ValueError())
async def test_athrow_after_anext(self):
gen = ay(1)
await anext(gen)
with self.assertRaises(ValueError) as cm:
await gen.athrow(ValueError())
async def test_anext_after_athrow(self):
gen = ay(1)
with self.assertRaises(ValueError) as cm:
await gen.athrow(ValueError())
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
async def test_athrow_after_athrow(self):
gen = ay(1)
with self.assertRaises(ValueError) as cm:
await gen.athrow(ValueError())
with self.assertRaises(TypeError) as cm:
await gen.athrow(TypeError())
class TestAClose(AsyncTestCase):
async def test_aclose_first(self):
gen = ay(1)
await gen.aclose()
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
async def test_aclose_after_anext(self):
gen = ay(1)
await anext(gen)
await gen.aclose()
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
async def test_anext_after_aclose(self):
gen = ay(1)
await gen.aclose()
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
async def test_athrow_after_aclose(self):
gen = ay(1)
await gen.aclose()
with self.assertRaises(StopAsyncIteration) as cm:
await anext(gen)
with self.assertRaises(ValueError) as cm:
await gen.athrow(ValueError())
| {
"content_hash": "8672b7ef0c26dd96e7b08aa1a89e17a7",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 71,
"avg_line_length": 30.56923076923077,
"alnum_prop": 0.5915953699043784,
"repo_name": "germn/aiogen",
"id": "4e2ea23a1a6c290e235adf90461755fc473e7104",
"size": "3974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_agenerator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31884"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#
# In the current iteration, there is a client object that can be loaded from
# the filesystem into the database and its settings loaded from the database.
# There are no special settings (e.g. active/inactive).
#
# I have no idea how this will be used, but it is nice^H^H^H^H, unit tested code,
# so let us keep it around a bit longer
#
# Ah but this assumes that the settings file is in `emission/clients/` and we
# just deleted that entire directory. Changing this to conf for now...
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from builtins import object
import json
import logging
import dateutil.parser
from datetime import datetime
# Our imports
from emission.core.get_database import get_profile_db, get_client_db
class Client(object):
def __init__(self, clientName):
# TODO: write background process to ensure that there is only one client with each name
# Maybe clean up unused clients?
self.clientName = clientName
self.settings_filename = "conf/clients/%s.settings.json" % self.clientName
self.__reload()
# Smart settings call, which returns the override settings if the client is
# active, and
def getSettings(self):
logging.debug("For client %s, returning settings %s" % (self.clientName, self.clientJSON['client_settings']))
return self.clientJSON['client_settings']
def __reload(self):
self.clientJSON = None
if self.clientName is not None:
self.clientJSON = get_client_db().find_one({'name': self.clientName})
# Figure out if the JSON object here should always be passed in
# Having it be passed in is a lot more flexible
# Let's compromise for now by passing it in and seeing how much of a hassle it is
# That will also ensure that the update_client script is not a complete NOP
def __update(self, newEntry):
get_client_db().update({'name': self.clientName}, newEntry, upsert = True)
self.__reload()
def update(self, createKey = True):
import uuid
newEntry = json.load(open(self.settings_filename))
if createKey:
newEntry['key'] = str(uuid.uuid4())
# logging.info("Updating with new entry %s" % newEntry)
self.__update(newEntry)
return newEntry['key']
def getClientKey(self):
if self.clientJSON is None:
return None
logging.debug("About to return %s from JSON %s" % (self.clientJSON['key'], self.clientJSON))
return self.clientJSON['key']
def clientSpecificSetters(self, uuid, sectionId, predictedModeMap):
return None
| {
"content_hash": "4485c448838d455c939cd9b81df5fa6a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 115,
"avg_line_length": 37.625,
"alnum_prop": 0.7212993724621631,
"repo_name": "sunil07t/e-mission-server",
"id": "13f17984a131118b479da3b6558a4509ec34032b",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emission/core/wrapper/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "445"
},
{
"name": "CSS",
"bytes": "711874"
},
{
"name": "HTML",
"bytes": "122542"
},
{
"name": "JavaScript",
"bytes": "6962852"
},
{
"name": "Jupyter Notebook",
"bytes": "99521529"
},
{
"name": "Python",
"bytes": "1800632"
},
{
"name": "Shell",
"bytes": "2299"
},
{
"name": "Smarty",
"bytes": "3456"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FancyBoxPlugin.target_image_title'
db.add_column('cmsplugin_fancyboxplugin', 'target_image_title', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'FancyBoxPlugin.target_image_title'
db.delete_column('cmsplugin_fancyboxplugin', 'target_image_title')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_fancybox.fancyboxplugin': {
'Meta': {'object_name': 'FancyBoxPlugin', 'db_table': "'cmsplugin_fancyboxplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'link_image': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'fancyboxplugin_set_link_image'", 'null': 'True', 'blank': 'True', 'to': "orm['filer.Image']"}),
'link_image_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_image_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'popup_text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'target_image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fancyboxplugin_set_target_image'", 'null': 'True', 'to': "orm['filer.Image']"}),
'target_image_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'target_image_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target_image_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_fancybox']
| {
"content_hash": "8933674701f2275235ac8d129eddcc29",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 210,
"avg_line_length": 84.51470588235294,
"alnum_prop": 0.5597703149469289,
"repo_name": "MegaMark16/django-cms-fancybox",
"id": "52a248fab067e9fbf885e0a1f0359bb87c72a2ed",
"size": "11512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_fancybox/migrations/0002_auto__add_field_fancyboxplugin_target_image_title.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "37267"
},
{
"name": "Python",
"bytes": "41368"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('map', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='map',
table=None,
),
]
| {
"content_hash": "eac44780666e07023bbaafdaf19144e3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 40,
"avg_line_length": 17.941176470588236,
"alnum_prop": 0.5737704918032787,
"repo_name": "sebastianlan/wedfairy-api",
"id": "20ffa8f6b672bcedc220bab9f575be6b2f6da6ec",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "map/migrations/0002_auto_20151022_1429.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34284"
}
],
"symlink_target": ""
} |
import os
from django import template
register = template.Library()
@register.simple_tag
def get_altair_scripts(dashboard_slug):
path = "dashboards/" + dashboard_slug + "/altair_scripts"
scripts = os.listdir("templates/" + path)
includes = []
for script in scripts:
includes.append(path + "/" + script)
return includes
| {
"content_hash": "14924b760fe7266df4a6ff53bd4f472f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 25,
"alnum_prop": 0.6771428571428572,
"repo_name": "synw/django-chartflo",
"id": "7e44a21990f0ce2954cdd2c0eaf1445be7abda4f",
"size": "374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chartflo/templatetags/chartflo_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "584281"
},
{
"name": "HTML",
"bytes": "3590"
},
{
"name": "JavaScript",
"bytes": "107908"
},
{
"name": "Python",
"bytes": "18204"
}
],
"symlink_target": ""
} |
from django import VERSION
from django.core.management.commands.loaddata import Command as LoadDataCommand
# Because this command is used (instead of default loaddata), then settings have been imported
# and we can safely import MT modules
from wagtail_modeltranslation import settings as mt_settings
from wagtail_modeltranslation.utils import auto_populate
ALLOWED = (None, False, 'all', 'default', 'required')
ALLOWED_FOR_PRINT = ', '.join(str(i) for i in (0, ) + ALLOWED[1:]) # For pretty-printing
def check_mode(option, opt_str, value, parser, namespace=None):
if value == '0' or value.lower() == 'false':
value = False
if value not in ALLOWED:
raise ValueError("%s option can be only one of: %s" % (opt_str, ALLOWED_FOR_PRINT))
setattr(namespace or parser.values, option.dest, value)
class Command(LoadDataCommand):
leave_locale_alone = mt_settings.LOADDATA_RETAIN_LOCALE # Django 1.6
help = ('Using this option will cause fixtures to be loaded under auto-population MODE.' +
'Allowed values are: %s' % ALLOWED_FOR_PRINT)
if VERSION < (1, 8):
from optparse import make_option
option_list = LoadDataCommand.option_list + (
make_option('--populate', action='callback', callback=check_mode, type='string',
dest='populate', metavar='MODE', help=help),
)
else:
import argparse
class CheckAction(argparse.Action):
def __call__(self, parser, namespace, value, option_string=None):
check_mode(self, option_string, value, parser, namespace)
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--populate', action=self.CheckAction, type=str, dest='populate',
metavar='MODE', help=self.help)
def __init__(self):
super(Command, self).__init__()
if mt_settings.LOADDATA_RETAIN_LOCALE and VERSION < (1, 6):
from django.utils import translation
self.locale = translation.get_language()
def handle(self, *fixture_labels, **options):
if self.can_import_settings and hasattr(self, 'locale'):
from django.utils import translation
translation.activate(self.locale)
mode = options.get('populate')
if mode is not None:
with auto_populate(mode):
return super(Command, self).handle(*fixture_labels, **options)
else:
return super(Command, self).handle(*fixture_labels, **options)
| {
"content_hash": "0ca45b5c085462dcdd14f8dc59a8c2c1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 97,
"avg_line_length": 42.42622950819672,
"alnum_prop": 0.6418083462132921,
"repo_name": "tomdyson/wagtail-modeltranslation",
"id": "b22d1d67a650b3d5a2713a561a6799b5877054f1",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtail_modeltranslation/management/commands/loaddata.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1819"
},
{
"name": "Python",
"bytes": "109340"
}
],
"symlink_target": ""
} |
from __future__ import division
def find_combinations(total, a, b, c, d):
counter = 0
for i in range()
return counter
def sum(i, j):
return i + j
def substract(i, j):
return i - j
def multiply(i, j):
return i * j
def divide(i, j):
return i / j
#print find_combinations(42, 3, 8, 9, 7) | {
"content_hash": "89f0d41d0fb6cb84d40b201bd1f23279",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 41,
"avg_line_length": 12.92,
"alnum_prop": 0.5851393188854489,
"repo_name": "agodi/Algorithms",
"id": "c19b922170e7cfdb96fd9e2bf083f767db2d15cc",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/CombinationsFinder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "54591"
},
{
"name": "Python",
"bytes": "24885"
}
],
"symlink_target": ""
} |
from pytest import raises
import pytest
parametrize = pytest.mark.parametrize
from simple_web_generator import metadata
from simple_web_generator.window import Window
from simple_web_generator.main import main, get_windows
import click
from click.testing import CliRunner
import yaml
class TestMain:
@parametrize('helparg', ['--help'])
def test_help(self, helparg, capsys):
with raises(SystemExit) as exc_info:
main(['progname', helparg])
out, err = capsys.readouterr()
# Should have printed some sort of usage message. We don't
# need to explicitly test the content of the message.
assert 'usage' in out.lower()
# Should exit with zero return code.
assert exc_info.value.code == 0
class TestYAMLParsing:
def test_basic_windows(self):
with click.open_file('tests/sample_files/test1.yaml', 'r') as f:
template = yaml.load(f)
result = get_windows(template)
#Should return list of windows
#assert result == [Window(**{'id': 'header', 'name': 'header'}), Window(**{'id': 'main', 'name': 'about'}), Window(**{'id': 'footer'})]
| {
"content_hash": "d6214cd50d8ae5e75c91eb000102dec6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 143,
"avg_line_length": 35.03030303030303,
"alnum_prop": 0.6557093425605537,
"repo_name": "Slepice1/simple-web-generator",
"id": "dd4d3d274bcfce5acdcaec3b8fffbe4d9a781fad",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45878"
}
],
"symlink_target": ""
} |
import re
import datetime
import struct
import dateutil
from HTMLParser import HTMLParser
from lxml.html.diff import tokenize, fixup_ins_del_tags, htmldiff_tokens
from lxml.etree import ParserError, XMLSyntaxError
import lxml.html, lxml.etree
from lxml.html.clean import Cleaner
from itertools import chain
from django.utils.dateformat import DateFormat
from django.utils.html import strip_tags as strip_tags_django
from django.conf import settings
from utils.tornado_escape import linkify as linkify_tornado
from utils.tornado_escape import xhtml_unescape as xhtml_unescape_tornado
from vendor import reseekfile
# COMMENTS_RE = re.compile('\<![ \r\n\t]*(--([^\-]|[\r\n]|-[^\-])*--[ \r\n\t]*)\>')
COMMENTS_RE = re.compile('\<!--.*?--\>')
def story_score(story, bottom_delta=None):
# A) Date - Assumes story is unread and within unread range
if not bottom_delta:
bottom_delta = datetime.timedelta(days=settings.DAYS_OF_UNREAD)
now = datetime.datetime.utcnow()
date_delta = now - story['story_date']
seconds = lambda td: td.seconds + (td.days * 86400)
date_score = max(0, 1 - (seconds(date_delta) / float(seconds(bottom_delta))))
# B) Statistics
statistics_score = 0
# C) Intelligence
intelligence_score = 1
# intelligence_score = feed_counts[int(story['story_feed_id'])] / float(max_feed_count)
# print "%s - %s" % (story['story_date'], date_score)
return (30/100. * date_score) + (55/100. * statistics_score) + (15/100. * intelligence_score)
def format_story_link_date__short(date, now=None):
if not now: now = datetime.datetime.now()
diff = date.date() - now.date()
if diff.days == 0:
return date.strftime('%I:%M%p').lstrip('0').lower()
elif diff.days == 1:
return 'Yesterday, ' + date.strftime('%I:%M%p').lstrip('0').lower()
else:
return date.strftime('%d %b %Y, ') + date.strftime('%I:%M%p').lstrip('0').lower()
def format_story_link_date__long(date, now=None):
if not now: now = datetime.datetime.utcnow()
diff = now.date() - date.date()
parsed_date = DateFormat(date)
if diff.days == 0:
return 'Today, ' + parsed_date.format('F jS ') + date.strftime('%I:%M%p').lstrip('0').lower()
elif diff.days == 1:
return 'Yesterday, ' + parsed_date.format('F jS g:ia').replace('.','')
elif date.date().timetuple()[7] == now.date().timetuple()[7]:
return parsed_date.format('l, F jS g:ia').replace('.','')
else:
return parsed_date.format('l, F jS, Y g:ia').replace('.','')
def _extract_date_tuples(date):
parsed_date = DateFormat(date)
date_tuple = datetime.datetime.timetuple(date)[:3]
today_tuple = datetime.datetime.timetuple(datetime.datetime.utcnow())[:3]
today = datetime.datetime.today()
yesterday_tuple = datetime.datetime.timetuple(today - datetime.timedelta(1))[:3]
return parsed_date, date_tuple, today_tuple, yesterday_tuple
def pre_process_story(entry):
publish_date = entry.get('published_parsed') or entry.get('updated_parsed')
if publish_date:
publish_date = datetime.datetime(*publish_date[:6])
if not publish_date and entry.get('published'):
try:
publish_date = dateutil.parser.parse(entry.get('published')).replace(tzinfo=None)
except ValueError:
pass
if publish_date:
entry['published'] = publish_date
else:
entry['published'] = datetime.datetime.utcnow()
if entry['published'] > datetime.datetime.now() + datetime.timedelta(days=1):
entry['published'] = datetime.datetime.now()
# entry_link = entry.get('link') or ''
# protocol_index = entry_link.find("://")
# if protocol_index != -1:
# entry['link'] = (entry_link[:protocol_index+3]
# + urlquote(entry_link[protocol_index+3:]))
# else:
# entry['link'] = urlquote(entry_link)
if isinstance(entry.get('guid'), dict):
entry['guid'] = unicode(entry['guid'])
# Normalize story content/summary
if entry.get('content'):
entry['story_content'] = entry['content'][0].get('value', '').strip()
else:
summary = entry.get('summary') or ''
entry['story_content'] = summary.strip()
# Add each media enclosure as a Download link
for media_content in chain(entry.get('media_content', [])[:5], entry.get('links', [])[:5]):
media_url = media_content.get('url', '')
media_type = media_content.get('type', '')
if media_url and media_type and entry['story_content'] and media_url not in entry['story_content']:
media_type_name = media_type.split('/')[0]
if 'audio' in media_type and media_url:
entry['story_content'] += """<br><br>
<audio controls="controls" preload="none">
<source src="%(media_url)s" type="%(media_type)s" />
</audio>""" % {
'media_url': media_url,
'media_type': media_type
}
elif 'image' in media_type and media_url:
entry['story_content'] += """<br><br><img src="%s" />""" % media_url
continue
elif media_content.get('rel') == 'alternative' or 'text' in media_content.get('type'):
continue
elif media_type_name in ['application']:
continue
entry['story_content'] += """<br><br>
Download %(media_type)s: <a href="%(media_url)s">%(media_url)s</a>""" % {
'media_type': media_type_name,
'media_url': media_url,
}
entry['guid'] = entry.get('guid') or entry.get('id') or entry.get('link') or str(entry.get('published'))
if not entry.get('title') and entry.get('story_content'):
story_title = strip_tags(entry['story_content'])
if len(story_title) > 80:
story_title = story_title[:80] + '...'
entry['title'] = story_title
entry['title'] = strip_tags(entry.get('title'))
entry['author'] = strip_tags(entry.get('author'))
return entry
class bunch(dict):
"""Example of overloading __getatr__ and __setattr__
This example creates a dictionary where members can be accessed as attributes
"""
def __init__(self, indict=None, attribute=None):
if indict is None:
indict = {}
# set any attributes here - before initialisation
# these remain as normal attributes
self.attribute = attribute
dict.__init__(self, indict)
self.__initialised = True
# after initialisation, setting attributes is the same as setting an item
def __getattr__(self, item):
"""Maps values to attributes.
Only called if there *isn't* an attribute with this name
"""
try:
return self.__getitem__(item)
except KeyError:
return None
def __setattr__(self, item, value):
"""Maps attributes to values.
Only if we are initialised
"""
if not self.__dict__.has_key('_bunch__initialised'): # this test allows attributes to be set in the __init__ method
return dict.__setattr__(self, item, value)
elif self.__dict__.has_key(item): # any normal attributes are handled normally
dict.__setattr__(self, item, value)
else:
self.__setitem__(item, value)
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
if not html:
return ''
return strip_tags_django(html)
s = MLStripper()
s.feed(html)
return s.get_data()
def strip_comments(html_string):
return COMMENTS_RE.sub('', html_string)
def strip_comments__lxml2(html_string=""):
if not html_string: return html_string
tree = lxml.html.fromstring(html_string)
comments = tree.xpath('//comment()')
for c in comments:
p = c.getparent()
p.remove(c)
return lxml.etree.tostring(tree)
def strip_comments__lxml(html_string=""):
if not html_string: return html_string
params = {
'comments': True,
'scripts': False,
'javascript': False,
'style': False,
'links': False,
'meta': False,
'page_structure': False,
'processing_instructions': False,
'embedded': False,
'frames': False,
'forms': False,
'annoying_tags': False,
'remove_tags': None,
'allow_tags': None,
'remove_unknown_tags': True,
'safe_attrs_only': False,
}
try:
cleaner = Cleaner(**params)
html = lxml.html.fromstring(html_string)
clean_html = cleaner.clean_html(html)
return lxml.etree.tostring(clean_html)
except XMLSyntaxError:
return html_string
def linkify(*args, **kwargs):
return xhtml_unescape_tornado(linkify_tornado(*args, **kwargs))
def truncate_chars(value, max_length):
if len(value) <= max_length:
return value
truncd_val = value[:max_length]
if value[max_length] != " ":
rightmost_space = truncd_val.rfind(" ")
if rightmost_space != -1:
truncd_val = truncd_val[:rightmost_space]
return truncd_val + "..."
def image_size(datastream):
datastream = reseekfile.ReseekFile(datastream)
data = str(datastream.read(30))
size = len(data)
height = -1
width = -1
content_type = ''
# handle GIFs
if (size >= 10) and data[:6] in ('GIF87a', 'GIF89a'):
# Check to see if content_type is correct
content_type = 'image/gif'
w, h = struct.unpack("<HH", data[6:10])
width = int(w)
height = int(h)
# See PNG 2. Edition spec (http://www.w3.org/TR/PNG/)
# Bytes 0-7 are below, 4-byte chunk length, then 'IHDR'
# and finally the 4-byte width, height
elif ((size >= 24) and data.startswith('\211PNG\r\n\032\n')
and (data[12:16] == 'IHDR')):
content_type = 'image/png'
w, h = struct.unpack(">LL", data[16:24])
width = int(w)
height = int(h)
# Maybe this is for an older PNG version.
elif (size >= 16) and data.startswith('\211PNG\r\n\032\n'):
# Check to see if we have the right content type
content_type = 'image/png'
w, h = struct.unpack(">LL", data[8:16])
width = int(w)
height = int(h)
# handle JPEGs
elif (size >= 2) and data.startswith('\377\330'):
content_type = 'image/jpeg'
datastream.seek(0)
datastream.read(2)
b = datastream.read(1)
try:
w = 0
h = 0
while (b and ord(b) != 0xDA):
while (ord(b) != 0xFF): b = datastream.read(1)
while (ord(b) == 0xFF): b = datastream.read(1)
if (ord(b) >= 0xC0 and ord(b) <= 0xC3):
datastream.read(3)
h, w = struct.unpack(">HH", datastream.read(4))
break
else:
datastream.read(int(struct.unpack(">H", datastream.read(2))[0])-2)
b = datastream.read(1)
width = int(w)
height = int(h)
except struct.error:
pass
except ValueError:
pass
return content_type, width, height
def htmldiff(old_html, new_html):
try:
old_html_tokens = tokenize(old_html, include_hrefs=False)
new_html_tokens = tokenize(new_html, include_hrefs=False)
except (KeyError, ParserError):
return new_html
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result) | {
"content_hash": "f58ce4f51b38cd494ee40b9f1e6da5c1",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 124,
"avg_line_length": 36.33030303030303,
"alnum_prop": 0.576695304028693,
"repo_name": "huihoo/reader",
"id": "25d92768022370611667a7fa0ad71ba72232066d",
"size": "11989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/story_functions.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import sys
from . import defaults
class OxyioSettings(object):
def __init__(self):
# Apply the defaults to this
self.apply_attrs(defaults)
def load_module(self, name):
# The sys.modules hack below breaks the import
from importlib import import_module
settings_module = import_module(name)
self.apply_attrs(settings_module)
def apply_attrs(self, module):
for key in [name for name in dir(module) if name.isupper()]:
setattr(self, key, getattr(module, key))
sys.modules[__name__] = OxyioSettings()
| {
"content_hash": "ed79b33d475290a56bd6ca59328a1e53",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 25.391304347826086,
"alnum_prop": 0.6438356164383562,
"repo_name": "oxyio/oxyio",
"id": "9b597e66b2ad06f8b8df894846b844aebbea977c",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "oxyio/settings/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57487"
},
{
"name": "HTML",
"bytes": "38296"
},
{
"name": "JavaScript",
"bytes": "26598"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "114316"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
} |
import pytest
from lcs.agents.acs2er.ACS2ER import ACS2ER
from lcs.agents.acs2er.Configuration import Configuration
class TestACS2ER:
@pytest.fixture
def cfg(self):
return Configuration(
classifier_length=2,
number_of_possible_actions=2,
er_buffer_size=100,
er_min_samples=20,
er_samples_number=2)
def test_explore_10_trials_singlestep_20_min_samples_learning_not_begins(self, cfg):
# Arrange
steps = []
env = EnvMock(steps, 1)
agent = ACS2ER(cfg)
# Act
_ = agent.explore(env, 10)
# Assert
assert len(agent.get_population()) == 0
assert len(agent.replay_memory) == 10
def test_explore_50_trials_threestepspertrial_20_min_samples_learning_begins(self, cfg):
# Arrange
steps = []
env = EnvMock(steps, 3)
agent = ACS2ER(cfg)
# Act
_ = agent.explore(env, 50)
# Assert
assert len(agent.get_population()) > 0
assert len(agent.replay_memory) == 100
class EnvMock:
def __init__(self, steps, trial_length):
self.steps = steps
self.trial_length = trial_length
self.trial_steps_count = 0
self.action_space = ActionSpaceMock()
def reset(self):
return ['0', '0']
def step(self, action):
self.steps.append(action)
self.trial_steps_count += 1
if(self.trial_steps_count >= self.trial_length):
self.trial_steps_count = 0
return ['1', '1'], 10, True, None
return ['1', '0'], 1, False, None
class ActionSpaceMock:
def sample(self):
return 1
| {
"content_hash": "5e7ac6e1e2ed49986e47ec9f3069d34c",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 92,
"avg_line_length": 26.6875,
"alnum_prop": 0.5690866510538641,
"repo_name": "ParrotPrediction/pyalcs",
"id": "25dcf110cabdb176990aa5d939ad61d33b22bd51",
"size": "1708",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/lcs/agents/acs2er/test_ACS2ER.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "119"
},
{
"name": "Makefile",
"bytes": "82"
},
{
"name": "Python",
"bytes": "502854"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="tickangle", parent_name="mesh3d.colorbar", **kwargs
):
super(TickangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "8b2d4489f64e5c014a18528e6e202cec",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 33,
"alnum_prop": 0.6153846153846154,
"repo_name": "plotly/plotly.py",
"id": "1a0c0043b8c74e3805219621ef0b2d22bdbfd7a4",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/mesh3d/colorbar/_tickangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import inspect
import os
import glob
import argparse
import importlib
from crawlers.base import BaseCrawler
def build_args():
parser = argparse.ArgumentParser()
parser.add_argument(nargs='+', default='', dest='paths', type=str)
parser.add_argument('--width', dest='width', default=1024, type=int)
parser.add_argument('--height', dest='height', default=1920, type=int)
parser.add_argument('--debug', dest='debug', default=False, action='store_true')
args = parser.parse_args()
return args
def main():
import sys
sys.path.append('')
args = build_args()
targets = []
for paths in args.paths:
x = os.path.splitext(paths)[0]
m = importlib.import_module(x)
for klass in inspect.getmembers(m, inspect.isclass):
if BaseCrawler in klass[1].__bases__:
targets.append(klass[1])
for Crawler in targets:
c = Crawler(args)
c.run()
if __name__ == "__main__":
main()
| {
"content_hash": "a0f13326998c0417f27111b802bd38f0",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 84,
"avg_line_length": 26.54054054054054,
"alnum_prop": 0.6242362525458248,
"repo_name": "altnight/individual-sandbox",
"id": "d62cdd00649cf24ca22661177d3be40788b14bed",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diary/20171022/crawlers/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62"
},
{
"name": "Dockerfile",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "28098"
},
{
"name": "JavaScript",
"bytes": "33696"
},
{
"name": "Makefile",
"bytes": "4501"
},
{
"name": "Python",
"bytes": "30715"
},
{
"name": "Ruby",
"bytes": "1803"
},
{
"name": "Shell",
"bytes": "764"
}
],
"symlink_target": ""
} |
"""Int value conversion.
This module is a utility for reasoning about intervals when computing filter selectivities,
and generating parameters for pagination. Since integers are the easiest type to deal with
in this context, when we encounter a different type we represent it as an int, do all the
computation in the integer domain, and transfer the computation back into the original domain.
In order to be able to reason about value intervals and successor/predecessor values, we
make sure these mappings to integers are increasing bijective functions.
This kind of mapping is easy to do for int, uuid and datetime types, but not possible for other
types, like string. When the need for other types arises, the precise interface for range
reasoning can be defined and implemented separately for each type.
"""
import datetime
from typing import Any
from uuid import UUID
from ..schema import is_meta_field
from ..schema.schema_info import QueryPlanningSchemaInfo, UUIDOrdering
from .helpers import (
get_uuid_ordering,
is_date_field_type,
is_datetime_field_type,
is_int_field_type,
is_uuid4_type,
)
# UUIDs are defined in RFC-4122 as a 128-bit identifier. This means that the minimum UUID value
# (represented as a natural number) is 0, and the maximal value is 2^128-1.
MIN_UUID_INT = 0
MAX_UUID_INT = 2 ** 128 - 1
DATETIME_EPOCH_TZ_NAIVE = datetime.datetime(1970, 1, 1)
def swap_uuid_prefix_and_suffix(uuid_string: str) -> str:
"""Swap the first 12 and last 12 hex digits of a uuid string.
Different databases implement uuid comparison differently (see UUIDOrdering). This function
is useful as a helper method to implement the LastSixBytesFirst ordering method based on the
LeftToRight ordering method.
args:
uuid_string: uuid string
Returns:
the input with the first and last 12 hex digits swapped
"""
segments = uuid_string.split("-")
segment_lengths = tuple(len(segment) for segment in segments)
expected_segment_lengths = (8, 4, 4, 4, 12)
if expected_segment_lengths != segment_lengths:
raise AssertionError(f"Unexpected segment lengths {segment_lengths} in {uuid_string}")
new_segments = [
segments[4][:8],
segments[4][8:],
segments[2],
segments[3],
segments[0] + segments[1],
]
return "-".join(new_segments)
def field_supports_range_reasoning(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str
) -> bool:
"""Return whether range reasoning is supported. See module docstring for definition."""
if is_meta_field(property_field):
return False
return (
is_uuid4_type(schema_info, vertex_class, property_field)
or is_int_field_type(schema_info, vertex_class, property_field)
or is_datetime_field_type(schema_info, vertex_class, property_field)
or is_date_field_type(schema_info, vertex_class, property_field)
)
def convert_int_to_field_value(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str, int_value: int
) -> Any:
"""Return the given integer's corresponding property field value.
See module docstring for details. The int_value is expected to be in the range of
convert_field_value_to_int.
Args:
schema_info: QueryPlanningSchemaInfo
vertex_class: str, name of vertex class to which the property field belongs.
property_field: str, name of property field that the value refers to.
int_value: int, integer value which will be represented as a property field value.
Returns:
Any, the given integer's corresponding property field value.
Raises:
ValueError, if the given int_value is outside the range of valid values for the given
property field.
"""
if is_int_field_type(schema_info, vertex_class, property_field):
return int_value
elif is_datetime_field_type(schema_info, vertex_class, property_field):
return DATETIME_EPOCH_TZ_NAIVE + datetime.timedelta(microseconds=int_value)
elif is_date_field_type(schema_info, vertex_class, property_field):
return datetime.date.fromordinal(int_value)
elif is_uuid4_type(schema_info, vertex_class, property_field):
if not MIN_UUID_INT <= int_value <= MAX_UUID_INT:
raise AssertionError(
"Integer value {} could not be converted to UUID, as it "
"is not in the range of valid UUIDs {} - {}: {} {}".format(
int_value, MIN_UUID_INT, MAX_UUID_INT, vertex_class, property_field
)
)
uuid_string = str(UUID(int=int(int_value)))
ordering = get_uuid_ordering(schema_info, vertex_class, property_field)
if ordering == UUIDOrdering.LeftToRight:
return uuid_string
elif ordering == UUIDOrdering.LastSixBytesFirst:
return swap_uuid_prefix_and_suffix(uuid_string)
else:
raise AssertionError(
f"Unexpected ordering for {vertex_class}.{property_field}: {ordering}"
)
elif field_supports_range_reasoning(schema_info, vertex_class, property_field):
raise AssertionError(
"Could not represent int {} as {} {}, but should be able to.".format(
int_value, vertex_class, property_field
)
)
else:
raise NotImplementedError(
"Could not represent int {} as {} {}.".format(int_value, vertex_class, property_field)
)
def convert_field_value_to_int(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str, value: Any
) -> int:
"""Return the integer representation of a property field value."""
if is_int_field_type(schema_info, vertex_class, property_field):
return value
elif is_datetime_field_type(schema_info, vertex_class, property_field):
return (value.replace(tzinfo=None) - DATETIME_EPOCH_TZ_NAIVE) // datetime.timedelta(
microseconds=1
)
elif is_date_field_type(schema_info, vertex_class, property_field):
return value.toordinal()
elif is_uuid4_type(schema_info, vertex_class, property_field):
ordering = get_uuid_ordering(schema_info, vertex_class, property_field)
if ordering == UUIDOrdering.LeftToRight:
return UUID(value).int
elif ordering == UUIDOrdering.LastSixBytesFirst:
return UUID(swap_uuid_prefix_and_suffix(value)).int
else:
raise AssertionError(
f"Unexpected ordering for {vertex_class}.{property_field}: {ordering}"
)
elif field_supports_range_reasoning(schema_info, vertex_class, property_field):
raise AssertionError(
"Could not represent {} {} value {} as int, but should be able to".format(
vertex_class, property_field, value
)
)
else:
raise NotImplementedError(
"Could not represent {} {} value {} as int.".format(vertex_class, property_field, value)
)
| {
"content_hash": "7e74e0c80afb302532db442794f4eef4",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 100,
"avg_line_length": 41.409356725146196,
"alnum_prop": 0.6744810055076966,
"repo_name": "kensho-technologies/graphql-compiler",
"id": "4572e302b21703a8e19ff6e09dafba54183c0887",
"size": "7132",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "graphql_compiler/cost_estimation/int_value_conversion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8213336"
},
{
"name": "Shell",
"bytes": "12556"
}
],
"symlink_target": ""
} |
import DNANode
class DNASignText(DNANode.DNANode):
COMPONENT_CODE = 7
def __init__(self):
DNANode.DNANode.__init__(self, '')
self.letters = '' | {
"content_hash": "e0d620e42520123d4059f7405970ae6e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 42,
"avg_line_length": 21.875,
"alnum_prop": 0.5771428571428572,
"repo_name": "DedMemez/ODS-August-2017",
"id": "206fe9a7dff7291364962b2207646e3ad0ffe461",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dna/DNASignText.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10152014"
},
{
"name": "Shell",
"bytes": "707"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import json
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView, View
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin, JSONResponseMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| {
"content_hash": "143fb09b216a78af1f1e9c0f45afff41",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 85,
"avg_line_length": 33.391304347826086,
"alnum_prop": 0.7194010416666666,
"repo_name": "Tuteria/Recruitment-test",
"id": "204f7f630511b1f04a5b0964189a5e1b0bd590cf",
"size": "1560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuteria_application_test/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "782"
},
{
"name": "Dockerfile",
"bytes": "1017"
},
{
"name": "HTML",
"bytes": "21085"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "54202"
},
{
"name": "SCSS",
"bytes": "1360"
},
{
"name": "Shell",
"bytes": "8041"
}
],
"symlink_target": ""
} |
"""
Django settings for opencomap project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import abspath, dirname, join, normpath
from django.contrib import messages
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
SITE_ROOT = dirname(DJANGO_ROOT)
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'django.contrib.sites',
# third-party apps
'django_hstore',
'oauth2_provider',
'easy_thumbnails',
'allauth',
'allauth.account',
'allauth.socialaccount',
# geokey apps
'geokey.projects',
'geokey.categories',
'geokey.contributions',
'geokey.users',
'geokey.applications',
'geokey.superusertools',
'geokey.extensions',
'geokey.subsets',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'geokey.core.middleware.XsSharing',
# 'core.middleware.TerminalLogging',
)
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {'read': 'Read scope', 'write': 'Write scope'},
'CLIENT_SECRET_GENERATOR_LENGTH': 40
}
OAUTH2_PROVIDER_APPLICATION_MODEL = 'applications.Application'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
}
MESSAGE_TAGS = {
messages.DEBUG: 'debug',
messages.INFO: 'info',
messages.SUCCESS: 'success',
messages.WARNING: 'warning',
messages.ERROR: 'danger'
}
AUTH_USER_MODEL = 'users.User'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend"
)
SITE_ID = 1
LOGIN_REDIRECT_URL = '/admin/dashboard/'
LOGIN_URL = '/admin/account/login/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/admin/account/login/'
ACCOUNT_USER_MODEL_USERNAME_FIELD = 'display_name'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/admin/account/login/'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/admin/dashboard/'
ACCOUNT_FORMS = {
'signup': 'geokey.users.forms.UserRegistrationForm',
'change_password': 'geokey.users.forms.CustomPasswordChangeForm',
'reset_password_from_key': 'geokey.users.forms.CustomResetPasswordKeyForm'
}
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"geokey.core.context_processors.project_settings",
"django.contrib.messages.context_processors.messages",
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
APPEND_SLASH = True
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ENABLE_VIDEO = False
| {
"content_hash": "55e41fdc3d54a610dc5d361d1ed69bc3",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 78,
"avg_line_length": 29.095890410958905,
"alnum_prop": 0.7231638418079096,
"repo_name": "nagyistoce/geokey",
"id": "62ee9445396f04726051d75fdbcb7725b6644806",
"size": "4248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geokey/core/settings/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15102"
},
{
"name": "HTML",
"bytes": "198094"
},
{
"name": "Handlebars",
"bytes": "7769"
},
{
"name": "JavaScript",
"bytes": "277022"
},
{
"name": "Python",
"bytes": "846818"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
from setuptools import setup
NAME = 'conda_rpms'
DIR = os.path.abspath(os.path.dirname(__file__))
def extract_packages():
packages = []
root = os.path.join(DIR, NAME)
offset = len(os.path.dirname(root)) + 1
for dpath, dnames, fnames in os.walk(root):
if os.path.exists(os.path.join(dpath, '__init__.py')):
package = dpath[offset:].replace(os.path.sep, '.')
packages.append(package)
return packages
def extract_version():
version = None
fname = os.path.join(DIR, NAME, '__init__.py')
with open(fname) as fin:
for line in fin:
if (line.startswith('__version__')):
_, version = line.split('=')
version = version.strip()[1:-1] # Remove quotation.
break
return version
def read(*parts):
result = None
fname = os.path.join(DIR, *parts)
if os.path.isfile(fname):
with open(fname, 'rb') as fh:
result = fh.read().decode('utf-8')
return result
def extract_description():
description = read('README.rst')
if description is None:
description = 'conda-rpms'
return description
def extract_requirements():
require = read('requirements.txt')
return [r.strip() for r in require.splitlines()]
setup_args = dict(
name = NAME,
version = extract_version(),
description = 'conda-rpms',
long_description = extract_description(),
platforms = ['Linux', 'Mac OS X', 'Windows'],
package_data = {NAME: ['templates/*.template']},
license = 'BSD 3-clause',
packages = extract_packages(),
classifiers = [
'License :: OSI Approved :: BSD License',
'Development Status :: 1 - Planning Development Status',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries'],
install_requires = extract_requirements(),
test_suite = '{}.tests'.format(NAME),
)
if __name__ == "__main__":
setup(**setup_args)
| {
"content_hash": "fc0f309b6491247b4b8b5d200865efc8",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 68,
"avg_line_length": 29.463414634146343,
"alnum_prop": 0.5807119205298014,
"repo_name": "pelson/conda-rpms",
"id": "0b428729a6f0fb61bcfff5b3202bbe8ef327e640",
"size": "2439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "415"
},
{
"name": "HTML",
"bytes": "12221"
},
{
"name": "JavaScript",
"bytes": "1424280"
},
{
"name": "Python",
"bytes": "48836"
},
{
"name": "Shell",
"bytes": "2893"
}
],
"symlink_target": ""
} |
''' Provide helper methods for IMOSChecker class
'''
import datetime
import numpy as np
import re
from numpy import amax
from numpy import amin
from compliance_checker.base import BaseCheck
from compliance_checker.base import Result
from compliance_checker.cf.util import units_convertible
CHECK_VARIABLE = 1
CHECK_GLOBAL_ATTRIBUTE = 0
CHECK_VARIABLE_ATTRIBUTE = 3
OPERATOR_EQUAL = 1
OPERATOR_MIN = 2
OPERATOR_MAX = 3
OPERATOR_WITHIN = 4
OPERATOR_DATE_FORMAT = 5
OPERATOR_SUB_STRING = 6
OPERATOR_CONVERTIBLE = 7
OPERATOR_EMAIL = 8
def is_monotonic(array):
"""
Check whether an array is strictly monotonic
"""
diff = np.diff(array)
return np.all(diff < 0) or np.all(diff > 0)
def is_valid_email(email):
"""Email validation, checks for syntactically invalid email"""
emailregex = \
"^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3\})(\\]?)$"
if re.match(emailregex, email) != None:
return True
return False
def is_numeric(variable_type):
"""
Check whether a numpy type is numeric type (byte,
float and integer)
"""
float_type = [np.float16, np.float32, np.float64, np.float128]
integer_type = [np.int, np.int8, np.int16, np.int32, np.int64]
if variable_type == np.double:
return True
if variable_type in integer_type:
return True
if variable_type == np.byte:
return True
if variable_type in float_type:
return True
return False
def vertical_coordinate_type(dataset, variable):
"""Return None if the given variable does not appear to be a vertical
coordinate. Otherwise return the likely type of the coordinate
('height', 'depth' or 'unknown'). A type is returned if the
variable is not listed as an ancillary variable and meets any
of the conditions:
* variable name includes 'depth' or 'height' (case-insensitive),
but not 'quality_control'
* standard_name is 'depth' or 'height'
* positive attribute is 'up' or 'down'
* axis is 'Z' (type is then 'unknown')
"""
ancillary_variables = find_ancillary_variables(dataset.dataset)
# skip ancillary variables
if variable in ancillary_variables:
return None
name = getattr(variable, 'name', '')
# skip QC variables
if name.endswith('_quality_control'):
return None
if 'depth' in name.lower():
return 'depth'
if 'height' in name.lower():
return 'height'
standard_name = getattr(variable, 'standard_name', '')
if standard_name in ('depth', 'height'):
return standard_name
positive = getattr(variable, 'positive', '')
if positive == 'down':
return 'depth'
if positive == 'up':
return 'height'
if getattr(variable, 'axis', '') == 'Z':
return 'unknown'
return None
def find_variables_from_attribute(dataset, variable, attribute_name):
''' Get variables based on a variable attribute such as coordinates.
'''
variables = []
variable_names = getattr(variable, attribute_name, None)
if variable_names is not None:
for variable_name in variable_names.split(' '):
if dataset.variables.has_key(variable_name):
variable = dataset.variables[variable_name]
variables.append(variable)
return variables
def find_auxiliary_coordinate_variables(dataset):
''' Find all ancillary variables associated with a variable.
'''
auxiliary_coordinate_variables = []
for name, var in dataset.variables.iteritems():
auxiliary_coordinate_variables.extend(\
find_variables_from_attribute(dataset, var, 'coordinates'))
return auxiliary_coordinate_variables
def find_ancillary_variables_by_variable(dataset, variable):
''' Find all ancillary variables associated with a variable.
'''
return find_variables_from_attribute(dataset, variable, 'ancillary_variables')
def find_ancillary_variables(dataset):
''' Find all ancillary variables.
'''
ancillary_variables = []
for name, var in dataset.variables.iteritems():
ancillary_variables.extend(find_variables_from_attribute(dataset, var, \
'ancillary_variables'))
return ancillary_variables
def find_data_variables(dataset, coordinate_variables, ancillary_variables):
"""
Finds all variables that could be considered Data variables.
Returns a dictionary mapping name -> variable.
Excludes variables that are:
- coordinate variables
- ancillary variables
- no dimensions
Results are NOT CACHED.
"""
data_variables = []
auxiliary_coordinate_variables = find_auxiliary_coordinate_variables(dataset)
for name, var in dataset.variables.iteritems():
if var not in coordinate_variables and var not in \
ancillary_variables and var.dimensions and var not in \
auxiliary_coordinate_variables \
and is_numeric(var.dtype):
data_variables.append(var)
return data_variables
def find_quality_control_variables(dataset):
''' Find all quality control variables in a given netcdf file
'''
quality_control_variables = []
for name, var in dataset.variables.iteritems():
if name.endswith('_quality_control'):
quality_control_variables.append(var)
continue
standard_name = getattr(var, 'standard_name', None)
if standard_name is not None and standard_name.endswith('status_flag'):
quality_control_variables.append(var)
continue
long_name = getattr(var, 'long_name', None)
if long_name is not None and isinstance(long_name, basestring):
if 'status_flag' in long_name or 'quality flag' in long_name:
quality_control_variables.append(var)
continue
if hasattr(var, 'quality_control_conventions'):
quality_control_variables.append(var)
continue
return quality_control_variables
def check_present(name, data, check_type, result_name, check_priority, reasoning=None):
"""
Help method to check whether a variable, variable attribute
or a global attribute presents.
params:
name (tuple): variable name and attribute name.
For global attribute, only attribute name present.
data (Dataset): netcdf data file
check_type (int): CHECK_VARIABLE, CHECK_GLOBAL_ATTRIBUTE,
CHECK_VARIABLE_ATTRIBUTE
result_name: the result name to display
check_priority (int): the check priority
reasoning (str): reason string for failed check
return:
result (Result): result for the check
"""
passed = True
reasoning_out = None
if check_type == CHECK_GLOBAL_ATTRIBUTE:
result_name_out = result_name or ('globalattr', name[0],'present')
if name[0] not in data.dataset.ncattrs():
reasoning_out = reasoning or ["Attribute %s not present" % name[0]]
passed = False
if check_type == CHECK_VARIABLE or\
check_type == CHECK_VARIABLE_ATTRIBUTE:
result_name_out = result_name or ('var', name[0],'present')
variable = data.dataset.variables.get(name[0], None)
if variable == None:
reasoning_out = reasoning or ['Variable %s not present' % name[0]]
passed = False
elif check_type == CHECK_VARIABLE_ATTRIBUTE:
result_name_out = result_name or ('var', name[0], name[1], 'present')
if name[1] not in variable.ncattrs():
reasoning_out = reasoning or ["Variable attribute %s:%s not present" % tuple(name)]
passed = False
result = Result(check_priority, passed, result_name_out, reasoning_out)
return result
def check_value(name, value, operator, ds, check_type, result_name, check_priority, reasoning=None, skip_check_present=False):
"""
Help method to compare attribute to value or a variable
to a value. It also returns a Result object based on whether
the check is successful or not.
params:
name (tuple): variable name and attribute name.
For global attribute, only attribute name present.
value (str): expected value
operator (int): OPERATOR_EQUAL, OPERATOR_MAX, OPERATOR_MIN
ds (Dataset): netcdf data file
check_type (int): CHECK_VARIABLE, CHECK_GLOBAL_ATTRIBUTE,
CHECK_VARIABLE_ATTRIBUTE
result_name: the result name to display
check_priority (int): the check priority
reasoning (str): reason string for failed check
skip_check_present (boolean): flag to allow check only performed
if attribute is present
return:
result (Result): result for the check
"""
result = check_present(name, ds, check_type,
result_name,
check_priority)
if result.value:
result = None
retrieved_value = None
passed = True
reasoning_out = None
if check_type == CHECK_GLOBAL_ATTRIBUTE:
retrieved_value = getattr(ds.dataset, name[0])
retrieved_name = name[0]
if check_type == CHECK_VARIABLE:
variable = ds.dataset.variables.get(name[0], None)
retrieved_name = name[0]
if check_type == CHECK_VARIABLE_ATTRIBUTE:
variable = ds.dataset.variables.get(name[0], None)
retrieved_value = getattr(variable, name[1])
retrieved_name = '%s:%s' % name
if operator == OPERATOR_EQUAL:
if retrieved_value != value:
passed = False
reasoning_out = reasoning or \
["Attribute %s should be equal to '%s'" % (retrieved_name, str(value))]
if operator == OPERATOR_MIN:
min_value = amin(variable.__array__())
if not np.isclose(min_value, float(value)):
passed = False
reasoning_out = reasoning or \
["Minimum value of %s (%f) does not match attributes (%f)" % \
(retrieved_name, min_value, float(value))]
if operator == OPERATOR_MAX:
max_value = amax(variable.__array__())
if not np.isclose(max_value, float(value)):
passed = False
reasoning_out = reasoning or \
["Maximum value of %s (%f) does not match attributes (%f)" % \
(retrieved_name, max_value, float(value))]
if operator == OPERATOR_DATE_FORMAT:
try:
datetime.datetime.strptime(retrieved_value, value)
except ValueError:
passed = False
reasoning_out = reasoning or \
["Attribute %s is not in correct date/time format (%s)" % \
(retrieved_name, value)]
if operator == OPERATOR_SUB_STRING:
if value not in retrieved_value:
passed = False
reasoning_out = reasoning or \
["Attribute %s should contain the substring '%s'" % \
(retrieved_name, value)]
if operator == OPERATOR_CONVERTIBLE:
if not units_convertible(retrieved_value, value):
passed = False
reasoning_out = reasoning or \
["Units %s should be equivalent to %s" % (retrieved_name, value)]
if operator == OPERATOR_EMAIL:
if not is_valid_email(retrieved_value):
passed = False
reasoning_out = reasoning or ["Attribute %s is not a valid email address" % \
retrieved_name]
if operator == OPERATOR_WITHIN:
if retrieved_value not in value:
passed = False
reasoning_out = reasoning or ["Attribute %s is not in the expected range (%s)" % \
(retrieved_name, str(value))]
result = Result(check_priority, passed, result_name, reasoning_out)
else:
if skip_check_present:
result = None
return result
def check_attribute_type(name, expected_type, ds, check_type, result_name, check_priority, reasoning=None, skip_check_present=False):
"""
Check global data attribute and ensure it has the right type.
params:
name (tuple): attribute name
expected_type (class): expected type
ds (Dataset): netcdf data file
check_type (int): CHECK_VARIABLE, CHECK_GLOBAL_ATTRIBUTE,
CHECK_VARIABLE_ATTRIBUTE
result_name: the result name to display
check_priority (int): the check priority
reasoning (str): reason string for failed check
skip_check_present (boolean): flag to allow check only performed
if attribute is present
return:
result (Result): result for the check
"""
result = check_present(name, ds, check_type,
result_name,
BaseCheck.HIGH)
if result.value:
if check_type == CHECK_GLOBAL_ATTRIBUTE:
attribute_value = getattr(ds.dataset, name[0])
attribute_name = 'Attribute ' + name[0]
if check_type == CHECK_VARIABLE_ATTRIBUTE:
attribute_value = getattr(ds.dataset.variables[name[0]], name[1])
attribute_name = 'Attribute %s:%s' % name
if check_type == CHECK_VARIABLE:
attribute_value = ds.dataset.variables[name[0]]
attribute_name = 'Variable ' + name[0]
dtype = getattr(attribute_value, 'dtype', None)
passed = True
if dtype is not None:
if type(expected_type) is list:
if dtype not in expected_type:
passed = False
elif dtype != expected_type:
passed = False
else:
try:
if not isinstance(attribute_value, expected_type):
passed = False
except TypeError:
passed = False
if not passed:
if not reasoning:
reasoning = ["%s should have type %s" % (attribute_name, str(expected_type))]
result = Result(check_priority, False, result_name, reasoning)
else:
result = Result(check_priority, True, result_name, None)
else:
if skip_check_present:
result = None
return result
| {
"content_hash": "f1569d5eefc80fc6d1b333b1ed9c1272",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 133,
"avg_line_length": 34.906103286384976,
"alnum_prop": 0.5926025554808338,
"repo_name": "petejan/compliance-checker",
"id": "121b521b0b044527199111dd1cb31110e4d5ac67",
"size": "14870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance_checker/imos/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "522241"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow and Renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetAlphaBitPlanes(1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create a default polygonal sphere
sphere = vtk.vtkSphereSource()
sphmapper = vtk.vtkPolyDataMapper()
sphmapper.SetInputConnection(sphere.GetOutputPort())
sphactor = vtk.vtkActor()
sphactor.SetMapper(sphmapper)
# Add the actors to the renderer, set the background to initial
# color (which is also transparent), set size.
ren1.AddActor(sphactor)
ren1.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(256, 256)
# render first image
renWin.Render()
if 0 == renWin.GetAlphaBitPlanes():
print("Failed to find a visual with alpha bit planes.")
exit(0)
else:
print("GetAlphaBitPlanes: " + str(renWin.GetAlphaBitPlanes()))
# create window to image filter, grabbing RGB and alpha
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(renWin)
w2i.SetInputBufferTypeToRGBA()
# grab window
w2i.Update()
# copy the output
outputData = w2i.GetOutput().NewInstance()
outputData.DeepCopy(w2i.GetOutput())
# set up mappers and actors to display the image
im = vtk.vtkImageMapper()
im.SetColorWindow(255)
im.SetColorLevel(127.5)
im.SetInputData(outputData)
ia2 = vtk.vtkActor2D()
ia2.SetMapper(im)
# now, change the image (background is now green)
sphactor.SetScale(2, 2, 2)
ren1.SetBackground(0, 1, 0)
# add the image of the sphere (keeping the original sphere too)
ren1.AddActor(ia2)
ren1.SetViewport(0, 0, 1, 1)
# render result (the polygonal sphere appears behind a smaller image
# of itself). Background of original image is transparent, so you
# can see through it back to the larger sphere and new background.
renWin.Render()
#iren.Start();
| {
"content_hash": "b6ae1085514148c22060ec7e1853f4b5",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 68,
"avg_line_length": 27.464788732394368,
"alnum_prop": 0.7384615384615385,
"repo_name": "hlzz/dotfiles",
"id": "18f0ce87dcbdd449158b4f444c11da5a971d5773",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/Rendering/Core/Testing/Python/TestWindowToImageTransparency.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
def positions_count_for_all_ballot_items_doc_template_values(url_root):
"""
Show documentation about positionsCountForAllBallotItems
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'google_civic_election_id',
'value': 'integer', # boolean, integer, long, string
'description': 'The unique identifier for a particular election. If not provided, return all positions'
' for this voter.',
},
]
potential_status_codes_list = [
]
try_now_link_variables_dict = {
}
api_response = '{\n' \
' "success": boolean,\n' \
' "status": string,\n' \
' "google_civic_election_id: integer,\n' \
' "ballot_item_list": list\n' \
' [\n' \
' "ballot_item_we_vote_id": string,\n' \
' "support_count": integer,\n' \
' "oppose_count": integer,\n' \
' "support_we_vote_id_list": list\n' \
' [\n' \
' "organization or friend we_vote_id": string,\n' \
' ],\n' \
' "support_name_list": list\n' \
' [\n' \
' "Speaker Display Name": string,\n' \
' ],\n' \
' "oppose_we_vote_list": list\n' \
' [\n' \
' "organization or friend we_vote_id": string,\n' \
' ],\n' \
' "oppose_name_list": list\n' \
' [\n' \
' "Speaker Display Name": string,\n' \
' ],\n' \
' ],\n' \
'}'
template_values = {
'api_name': 'positionsCountForAllBallotItems',
'api_slug': 'positionsCountForAllBallotItems',
'api_introduction':
"Retrieve all positions held by this voter in one list.",
'try_now_link': 'apis_v1:positionsCountForAllBallotItemsView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| {
"content_hash": "4aaa84f39194b1a3bec5fc80372722af",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 116,
"avg_line_length": 42.171052631578945,
"alnum_prop": 0.4630265210608424,
"repo_name": "jainanisha90/WeVoteServer",
"id": "45a193bbf6e900e82c34bf73274772ec19816638",
"size": "3344",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apis_v1/documentation_source/positions_count_for_all_ballot_items_doc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1003027"
},
{
"name": "Python",
"bytes": "7489854"
},
{
"name": "Shell",
"bytes": "611"
}
],
"symlink_target": ""
} |
"""This example lists all creative groups.
Tags: creativeGroups.list
"""
__author__ = ('api.jimper@gmail.com (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to get creative groups for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct the request.
request = service.creativeGroups().list(profileId=profile_id)
while True:
# Execute request and print response.
response = request.execute()
for group in response['creativeGroups']:
print ('Found creative group with ID %s and name "%s".'
% (group['id'], group['name']))
if response['creativeGroups'] and response['nextPageToken']:
request = service.advertisers().list_next(request, response)
else:
break
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "1501c1bf41b90c4fb419f1c58418a3cf",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 28.0188679245283,
"alnum_prop": 0.6713804713804714,
"repo_name": "vanant/googleads-dfa-reporting-samples",
"id": "a52dee6cee7c0582c9addec6858cb0e7f8dac58c",
"size": "2103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/v2.1/get_creative_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "414621"
},
{
"name": "CSS",
"bytes": "2434"
},
{
"name": "Java",
"bytes": "379788"
},
{
"name": "PHP",
"bytes": "401830"
},
{
"name": "Python",
"bytes": "346799"
},
{
"name": "Ruby",
"bytes": "154945"
}
],
"symlink_target": ""
} |
from Bio import SeqIO
import sys
seen_names = []
output_file = sys.argv[1] + "_dedup"
outh = open(output_file, "w")
for record in SeqIO.parse(sys.argv[1], "fasta"):
if record.id in seen_names:
continue
else:
seen_names.append(record.id)
outh.write(">" + str(record.id) + "\n" + str(record.seq) + "\n")
outh.close()
| {
"content_hash": "227a0fc2ba8b6ceba102add17bcc3e53",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 24.928571428571427,
"alnum_prop": 0.6045845272206304,
"repo_name": "Tancata/phylo",
"id": "953f1753211479fb1221be30a7595724ef71b620",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedup_sequences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155099"
},
{
"name": "Shell",
"bytes": "1600"
}
],
"symlink_target": ""
} |
"""
NBtrain.py -
Model generator for langid.py
Marco Lui, January 2013
Based on research by Marco Lui and Tim Baldwin.
Copyright 2013 Marco Lui <saffsd@gmail.com>. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the copyright holder.
"""
MAX_CHUNK_SIZE = 100 # maximum number of files to tokenize at once
NUM_BUCKETS = 64 # number of buckets to use in k-v pair generation
import base64, bz2, cPickle
import os, sys, argparse, csv
import array
import numpy as np
import tempfile
import marshal
import atexit, shutil
import multiprocessing as mp
import gzip
from collections import deque, defaultdict
from contextlib import closing
from common import chunk, unmarshal_iter, read_features, index, MapPool
def state_trace(text):
"""
Returns counts of how often each state was entered
"""
global __nm_arr
c = defaultdict(int)
state = 0
for letter in map(ord,text):
state = __nm_arr[(state << 8) + letter]
c[state] += 1
return c
def setup_pass_tokenize(nm_arr, output_states, tk_output, b_dirs, line_level):
"""
Set the global next-move array used by the aho-corasick scanner
"""
global __nm_arr, __output_states, __tk_output, __b_dirs, __line_level
__nm_arr = nm_arr
__output_states = output_states
__tk_output = tk_output
__b_dirs = b_dirs
__line_level = line_level
def pass_tokenize(arg):
"""
Tokenize documents and do counts for each feature
Split this into buckets chunked over features rather than documents
chunk_paths contains label, path pairs because we only know the
labels per-path, but in line mode there will be multiple documents
per path and we don't know how many those are.
"""
global __output_states, __tk_output, __b_dirs, __line_level
chunk_id, chunk_paths = arg
term_freq = defaultdict(int)
# Tokenize each document and add to a count of (doc_id, f_id) frequencies
doc_count = 0
labels = []
for label, path in chunk_paths:
with open(path) as f:
if __line_level:
# each line is treated as a document
for text in f:
count = state_trace(text)
for state in (set(count) & __output_states):
for f_id in __tk_output[state]:
term_freq[doc_count, f_id] += count[state]
doc_count += 1
labels.append(label)
else:
text = f.read()
count = state_trace(text)
for state in (set(count) & __output_states):
for f_id in __tk_output[state]:
term_freq[doc_count, f_id] += count[state]
doc_count += 1
labels.append(label)
# Distribute the aggregated counts into buckets
__procname = mp.current_process().name
__buckets = [gzip.open(os.path.join(p,__procname+'.index'), 'a') for p in __b_dirs]
bucket_count = len(__buckets)
for doc_id, f_id in term_freq:
bucket_index = hash(f_id) % bucket_count
count = term_freq[doc_id, f_id]
item = ( f_id, chunk_id, doc_id, count )
__buckets[bucket_index].write(marshal.dumps(item))
for f in __buckets:
f.close()
return chunk_id, doc_count, len(term_freq), labels
def setup_pass_ptc(cm, num_instances, chunk_offsets):
global __cm, __num_instances, __chunk_offsets
__cm = cm
__num_instances = num_instances
__chunk_offsets = chunk_offsets
def pass_ptc(b_dir):
"""
Take a bucket, form a feature map, compute the count of
each feature in each class.
@param b_dir path to the bucket directory
@returns (read_count, f_ids, prod)
"""
global __cm, __num_instances, __chunk_offsets
terms = defaultdict(lambda : np.zeros((__num_instances,), dtype='int'))
read_count = 0
for path in os.listdir(b_dir):
if path.endswith('.index'):
for f_id, chunk_id, doc_id, count in unmarshal_iter(os.path.join(b_dir, path)):
index = doc_id + __chunk_offsets[chunk_id]
terms[f_id][index] = count
read_count += 1
f_ids, f_vs = zip(*terms.items())
fm = np.vstack(f_vs)
# The calculation of the term-class distribution is done per-chunk rather
# than globally for memory efficiency reasons.
prod = np.dot(fm, __cm)
return read_count, f_ids, prod
def learn_nb_params(items, num_langs, tk_nextmove, tk_output, temp_path, args):
"""
@param items label, path pairs
"""
global outdir
print "learning NB parameters on {} items".format(len(items))
# Generate the feature map
nm_arr = mp.Array('i', tk_nextmove, lock=False)
if args.jobs:
tasks = args.jobs * 2
else:
tasks = mp.cpu_count() * 2
# Ensure chunksize of at least 1, but not exceeding specified chunksize
chunksize = max(1, min(len(items) / tasks, args.chunksize))
outdir = tempfile.mkdtemp(prefix="NBtrain-",suffix='-buckets', dir=temp_path)
b_dirs = [ os.path.join(outdir,"bucket{0}".format(i)) for i in range(args.buckets) ]
for d in b_dirs:
os.mkdir(d)
output_states = set(tk_output)
# Divide all the items to be processed into chunks, and enumerate each chunk.
item_chunks = list(chunk(items, chunksize))
num_chunks = len(item_chunks)
print "about to tokenize {} chunks".format(num_chunks)
pass_tokenize_arg = enumerate(item_chunks)
pass_tokenize_params = (nm_arr, output_states, tk_output, b_dirs, args.line)
with MapPool(args.jobs, setup_pass_tokenize, pass_tokenize_params) as f:
pass_tokenize_out = f(pass_tokenize, pass_tokenize_arg)
write_count = 0
chunk_sizes = {}
chunk_labels = []
for i, (chunk_id, doc_count, writes, labels) in enumerate(pass_tokenize_out):
write_count += writes
chunk_sizes[chunk_id] = doc_count
chunk_labels.append((chunk_id, labels))
print "processed chunk ID:{0} ({1}/{2}) [{3} keys]".format(chunk_id, i+1, num_chunks, writes)
print "wrote a total of %d keys" % write_count
num_instances = sum(chunk_sizes.values())
print "processed a total of %d instances" % num_instances
chunk_offsets = {}
for i in range(len(chunk_sizes)):
chunk_offsets[i] = sum(chunk_sizes[x] for x in range(i))
# Build CM based on re-ordeing chunk
cm = np.zeros((num_instances, num_langs), dtype='bool')
for chunk_id, chunk_label in chunk_labels:
for doc_id, lang_id in enumerate(chunk_label):
index = doc_id + chunk_offsets[chunk_id]
cm[index, lang_id] = True
pass_ptc_params = (cm, num_instances, chunk_offsets)
with MapPool(args.jobs, setup_pass_ptc, pass_ptc_params) as f:
pass_ptc_out = f(pass_ptc, b_dirs)
def pass_ptc_progress():
for i,v in enumerate(pass_ptc_out):
yield v
print "processed chunk ({0}/{1})".format(i+1, len(b_dirs))
reads, ids, prods = zip(*pass_ptc_progress())
read_count = sum(reads)
print "read a total of %d keys (%d short)" % (read_count, write_count - read_count)
num_features = max( i for v in tk_output.values() for i in v) + 1
prod = np.zeros((num_features, cm.shape[1]), dtype=int)
prod[np.concatenate(ids)] = np.vstack(prods)
# This is where the smoothing occurs
ptc = np.log(1 + prod) - np.log(num_features + prod.sum(0))
nb_ptc = array.array('d')
for term_dist in ptc.tolist():
nb_ptc.extend(term_dist)
pc = np.log(cm.sum(0))
nb_pc = array.array('d', pc)
return nb_pc, nb_ptc
@atexit.register
def cleanup():
global outdir
try:
shutil.rmtree(outdir)
except NameError:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-j","--jobs", type=int, metavar='N', help="spawn N processes (set to 1 for no paralleization)")
parser.add_argument("-t", "--temp", metavar='TEMP_DIR', help="store buckets in TEMP_DIR instead of in MODEL_DIR/buckets")
parser.add_argument("-s", "--scanner", metavar='SCANNER', help="use SCANNER for feature counting")
parser.add_argument("-o", "--output", metavar='OUTPUT', help="output langid.py-compatible model to OUTPUT")
#parser.add_argument("-i","--index",metavar='INDEX',help="read list of training document paths from INDEX")
parser.add_argument("model", metavar='MODEL_DIR', help="read index and produce output in MODEL_DIR")
parser.add_argument("--chunksize", type=int, help='maximum chunk size (number of files)', default=MAX_CHUNK_SIZE)
parser.add_argument("--buckets", type=int, metavar='N', help="distribute features into N buckets", default=NUM_BUCKETS)
parser.add_argument("--line", action="store_true", help="treat each line in a file as a document")
args = parser.parse_args()
if args.temp:
temp_path = args.temp
else:
temp_path = os.path.join(args.model, 'buckets')
if args.scanner:
scanner_path = args.scanner
else:
scanner_path = os.path.join(args.model, 'LDfeats.scanner')
if args.output:
output_path = args.output
else:
output_path = os.path.join(args.model, 'model')
index_path = os.path.join(args.model, 'paths')
lang_path = os.path.join(args.model, 'lang_index')
# display paths
print "model path:", args.model
print "temp path:", temp_path
print "scanner path:", scanner_path
print "output path:", output_path
if args.line:
print "treating each LINE as a document"
# read list of training files
with open(index_path) as f:
reader = csv.reader(f)
items = [ (int(l),p) for _,l,p in reader ]
# read scanner
with open(scanner_path) as f:
tk_nextmove, tk_output, _ = cPickle.load(f)
# read list of languages in order
with open(lang_path) as f:
reader = csv.reader(f)
langs = zip(*reader)[0]
nb_classes = langs
nb_pc, nb_ptc = learn_nb_params(items, len(langs), tk_nextmove, tk_output, temp_path, args)
# output the model
model = nb_ptc, nb_pc, nb_classes, tk_nextmove, tk_output
string = base64.b64encode(bz2.compress(cPickle.dumps(model)))
with open(output_path, 'w') as f:
f.write(string)
print "wrote model to %s (%d bytes)" % (output_path, len(string))
| {
"content_hash": "5cface19d99162d5ac89b8ed42d760f9",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 123,
"avg_line_length": 35.88785046728972,
"alnum_prop": 0.6608506944444444,
"repo_name": "wikiteams/github-gender-studies",
"id": "89a6acdc05599643c5a69a782d6197fddc4a675e",
"size": "11543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/gender_checker/deprecated/langid/train/NBtrain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "1891258"
}
],
"symlink_target": ""
} |
r"""A backend module to load and dump (Java) properties files.
- Format to support: Java Properties file, e.g.
http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Properties.html
- Requirements: None (built-in)
- Development Status :: 4 - Beta
- Limitations:
- Key and value separator of white spaces is not supported
- Keys contain escaped white spaces is not supported
- Special options: None
Changelog:
.. versionchanged:: 0.7.0
- Fix handling of empty values, pointed by @ajays20078
- Fix handling of values contain strings start with '#' or '!' by
@ajays20078
.. versionadded:: 0.2
- Added native Java properties parser instead of a plugin utilizes
pyjavaproperties module.
"""
import os
import re
import typing
import warnings
from . import base
_COMMENT_MARKERS: typing.Tuple[str, ...] = ('#', '!')
def parseline(line: str) -> typing.Tuple[typing.Optional[str], str]:
"""Parse a line of Java properties file.
:param line:
A string to parse, must not start with ' ', '#' or '!' (comment)
:return: A tuple of (key, value), both key and value may be None
"""
pair = re.split(r"(?:\s+)?(?:(?<!\\)[=:])", line.strip(), 1)
key = pair[0].rstrip()
if len(pair) < 2:
warnings.warn(f'Invalid line found: {line}', SyntaxWarning)
return (key or None, '')
return (key, pair[1].strip())
def _pre_process_line(
line: str,
comment_markers: typing.Tuple[str, ...] = _COMMENT_MARKERS
):
"""Preprocess a line in properties; strip comments, etc.
:param line:
A string not starting w/ any white spaces and ending w/ line breaks.
It may be empty. see also: :func:`load`.
:param comment_markers: Comment markers, e.g. '#' (hash)
"""
if not line:
return None
if any(c in line for c in comment_markers):
if line.startswith(comment_markers):
return None
return line
def unescape(in_s: str) -> str:
"""Un-escape and take out the content from given str ``in_s``."""
return re.sub(r'\\(.)', r'\1', in_s)
def _escape_char(in_c: str) -> str:
"""Escape some special characters in java .properties files."""
return '\\' + in_c if in_c in (':', '=', '\\') else in_c
def escape(in_s: str) -> str:
"""Escape special characters in given str."""
return ''.join(_escape_char(c) for c in in_s)
def load(stream, container=dict, comment_markers=_COMMENT_MARKERS):
"""Load data from a java properties files given as ``stream``.
:param stream: A file or file like object of Java properties files
:param container:
Factory function to create a dict-like object to store properties
:param comment_markers: Comment markers, e.g. '#' (hash)
:return: Dict-like object holding properties
"""
ret = container()
prev = ""
for line in stream:
line = _pre_process_line(prev + line.strip().rstrip(),
comment_markers)
# I don't think later case may happen but just in case.
if line is None or not line:
continue
prev = "" # re-initialize for later use.
if line.endswith("\\"):
prev += line.rstrip(" \\")
continue
(key, val) = parseline(line)
if key is None:
warnings.warn(f'Failed to parse the line: {line}')
continue
ret[key] = unescape(val)
return ret
class Parser(base.StreamParser):
"""Parser for Java properties files."""
_cid = 'properties'
_type = 'properties'
_extensions = ['properties']
_ordered = True
_dict_opts = ['ac_dict']
def load_from_stream(self, stream, container, **kwargs):
"""Load config from given file like object 'stream'.
:param stream: A file or file like object of Java properties files
:param container: callble to make a container object
:param kwargs: optional keyword parameters (ignored)
:return: Dict-like object holding config parameters
"""
return load(stream, container=container)
def dump_to_stream(self, cnf, stream, **kwargs):
"""Dump config 'cnf' to a file or file-like object 'stream'.
:param cnf: Java properties config data to dump
:param stream: Java properties file or file like object
:param kwargs: backend-specific optional keyword parameters :: dict
"""
for key, val in cnf.items():
stream.write(f'{key} = {escape(val)}{os.linesep}')
# vim:sw=4:ts=4:et:
| {
"content_hash": "9c4c74d68ab3180f576c724ea1c4f1db",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 76,
"avg_line_length": 29.115384615384617,
"alnum_prop": 0.618229854689564,
"repo_name": "ssato/python-anyconfig",
"id": "ceef40cb0a0ea816926f716c0a641cafbdce79a0",
"size": "4643",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "src/anyconfig/backend/properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "568"
},
{
"name": "Python",
"bytes": "348779"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask.ext.cors import CORS
from views.root import root
from apis.root import api_root
app = Flask(__name__)
app.register_blueprint(root)
app.register_blueprint(api_root)
app.config['CORS_ALLOW_HEADERS'] = "Content-Type"
cors = CORS(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge, chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
if __name__ == "__main__":
app.run(debug=True)
| {
"content_hash": "088b75df9a812a580d61dd11e6b367cf",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 61,
"avg_line_length": 23.181818181818183,
"alnum_prop": 0.703921568627451,
"repo_name": "supistar/OandaOrderbook",
"id": "c105f667600a3ec555513d82a7bb32fed8190a06",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1331"
},
{
"name": "HTML",
"bytes": "2176"
},
{
"name": "JavaScript",
"bytes": "113313"
},
{
"name": "Python",
"bytes": "3738"
}
],
"symlink_target": ""
} |
"""Interface for consistency group snapshots extension."""
from six.moves.urllib import parse
from manilaclient import api_versions
from manilaclient import base
from manilaclient.openstack.common.apiclient import base as common_base
RESOURCES_PATH = '/cgsnapshots'
RESOURCE_PATH = '/cgsnapshots/%s'
RESOURCE_PATH_ACTION = '/cgsnapshots/%s/action'
RESOURCES_NAME = 'cgsnapshots'
RESOURCE_NAME = 'cgsnapshot'
MEMBERS_RESOURCE_NAME = 'cgsnapshot_members'
class ConsistencyGroupSnapshot(common_base.Resource):
"""A group of snapshots taken of multiple shares."""
def __repr__(self):
return "<Consistency Group Snapshot: %s>" % self.id
def update(self, **kwargs):
"""Update this consistency group snapshot."""
self.manager.update(self, **kwargs)
def delete(self):
"""Delete this consistency group snapshot."""
self.manager.delete(self)
def reset_state(self, state):
"""Update the consistency group snapshot with the provided state."""
self.manager.reset_state(self, state)
class ConsistencyGroupSnapshotManager(base.ManagerWithFind):
resource_class = ConsistencyGroupSnapshot
@api_versions.wraps("2.4")
def create(self, consistency_group_id, name=None, description=None):
"""Create a consistency group snapshot.
:param name: text - name of the new cg snapshot
:param description: text - description of the cg snapshot
:rtype: :class:`ConsistencyGroup`
"""
body = {
'consistency_group_id': consistency_group_id,
'name': name,
'description': description,
}
return self._create(RESOURCES_PATH,
{RESOURCE_NAME: body},
RESOURCE_NAME)
@api_versions.wraps("2.4")
def get(self, cg_snapshot):
"""Get a consistency group snapshot.
:param cg_snapshot: either cg snapshot object or text with
its ID.
:rtype: :class:`ConsistencyGroup`
"""
consistency_group_id = common_base.getid(cg_snapshot)
return self._get(RESOURCE_PATH % consistency_group_id,
RESOURCE_NAME)
@api_versions.wraps("2.4")
def update(self, cg_snapshot, **kwargs):
"""Updates a consistency group snapshot.
:param cg_snapshot: either consistency group snapshot object or text
with its ID.
:rtype: :class:`ConsistencyGroup`
"""
if not kwargs:
return
body = {RESOURCE_NAME: kwargs}
cg_snapshot_id = common_base.getid(cg_snapshot)
return self._update(RESOURCE_PATH % cg_snapshot_id,
body,
RESOURCE_NAME)
@api_versions.wraps("2.4")
def list(self, detailed=True, search_opts=None):
"""Get a list of all consistency group snapshots.
:param detailed: Whether to return detailed snapshot info or not.
:param search_opts: dict with search options to filter out snapshots.
available keys are below (('name1', 'name2', ...), 'type'):
- ('all_tenants', int)
- ('offset', int)
- ('limit', int)
Note, that member context will have restricted set of
available search options.
:rtype: list of :class:`ConsistencyGroupSnapshot`
"""
if search_opts is None:
search_opts = {}
query_string = self._query_string_helper(search_opts)
if detailed:
path = RESOURCES_PATH + '/detail%s' % (query_string,)
else:
path = RESOURCES_PATH + '%s' % (query_string,)
return self._list(path, RESOURCES_NAME)
def _do_delete(self, cg_snapshot, force=False, action_name='force_delete'):
"""Delete a consistency group snapshot.
:param cg_snapshot: either a cg snapshot object or text wit its ID.
"""
cg_id = common_base.getid(cg_snapshot)
body = None
if force:
body = {action_name: None}
if body:
self.api.client.post(RESOURCE_PATH_ACTION % cg_id, body=body)
else:
self._delete(RESOURCE_PATH % cg_id)
@api_versions.wraps("2.4", "2.6")
def delete(self, cg_snapshot, force=False):
return self._do_delete(cg_snapshot, force, 'os-force_delete')
@api_versions.wraps("2.7") # noqa
def delete(self, cg_snapshot, force=False):
return self._do_delete(cg_snapshot, force, 'force_delete')
@api_versions.wraps("2.4")
def members(self, cg_snapshot, search_opts=None):
"""Get a list of consistency group snapshot members.
:param search_opts: dict with search options to filter out members.
- ('offset', int)
- ('limit', int)
:rtype: list of :class:`ConsistencyGroupSnapshot`
"""
consistency_group_id = common_base.getid(cg_snapshot)
if search_opts is None:
search_opts = {}
query_string = self._query_string_helper(search_opts)
path = RESOURCES_PATH + '/%s/members%s' % (consistency_group_id,
query_string,)
return self._list(path, MEMBERS_RESOURCE_NAME)
def _do_reset_state(self, cg_snapshot, state, action_name):
"""Update the specified consistency group with the provided state."""
body = {action_name: {'status': state}}
cg_id = common_base.getid(cg_snapshot)
url = RESOURCE_PATH_ACTION % cg_id
return self.api.client.post(url, body=body)
@api_versions.wraps("2.4", "2.6")
def reset_state(self, cg_snapshot, state):
return self._do_reset_state(cg_snapshot, state, 'os-reset_status')
@api_versions.wraps("2.7") # noqa
def reset_state(self, cg_snapshot, state):
return self._do_reset_state(cg_snapshot, state, 'reset_status')
def _query_string_helper(self, search_opts):
q_string = parse.urlencode(
sorted([(k, v) for (k, v) in list(search_opts.items()) if v]))
if q_string:
q_string = "?%s" % (q_string,)
else:
q_string = ''
return q_string
| {
"content_hash": "8f177bf99a35cef85613af753280d834",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 34.71508379888268,
"alnum_prop": 0.5992919214676536,
"repo_name": "sniperganso/python-manilaclient",
"id": "08c6a179b2f63ece2a02eda609326a5788e8af75",
"size": "6840",
"binary": false,
"copies": "1",
"ref": "refs/heads/bp/data-service-migration-api",
"path": "manilaclient/v2/consistency_group_snapshots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "755723"
},
{
"name": "Shell",
"bytes": "11199"
}
],
"symlink_target": ""
} |
"""
.. dialect:: firebird+kinterbasdb
:name: kinterbasdb
:dbapi: kinterbasdb
:connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: https://firebirdsql.org/index.php?op=devel&sub=python
Arguments
----------
The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining``
arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect.
In addition, it also accepts the following:
* ``type_conv`` - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support. See
the linked documents below for further information.
* ``concurrency_level`` - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1. See the linked documents
below for further information.
.. seealso::
https://sourceforge.net/projects/kinterbasdb
https://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
https://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
""" # noqa
import decimal
from re import match
from .base import FBDialect
from .base import FBExecutionContext
from ... import types as sqltypes
from ... import util
class _kinterbasdb_numeric(object):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class _FBNumeric_kinterbasdb(_kinterbasdb_numeric, sqltypes.Numeric):
pass
class _FBFloat_kinterbasdb(_kinterbasdb_numeric, sqltypes.Float):
pass
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get(
"enable_rowcount", self.dialect.enable_rowcount
):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = "kinterbasdb"
supports_statement_cache = True
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric: _FBNumeric_kinterbasdb,
sqltypes.Float: _FBFloat_kinterbasdb,
},
)
def __init__(
self,
type_conv=200,
concurrency_level=1,
enable_rowcount=True,
retaining=False,
**kwargs
):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
self.retaining = retaining
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
return __import__("kinterbasdb")
def do_execute(self, cursor, statement, parameters, context=None):
# kinterbase does not accept a None, but wants an empty list
# when there are no arguments.
cursor.execute(statement, parameters or [])
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback(self.retaining)
def do_commit(self, dbapi_connection):
dbapi_connection.commit(self.retaining)
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
if opts.get("port"):
opts["host"] = "%s/%s" % (opts["host"], opts["port"])
del opts["port"]
opts.update(url.query)
util.coerce_kw_type(opts, "type_conv", int)
type_conv = opts.pop("type_conv", self.type_conv)
concurrency_level = opts.pop(
"concurrency_level", self.concurrency_level
)
if self.dbapi is not None:
initialized = getattr(self.dbapi, "initialized", None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# https://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/
# Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, "_initialized", False)
if not initialized:
self.dbapi.init(
type_conv=type_conv, concurrency_level=concurrency_level
)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match(
r"\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?", version
)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version
)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ["firebird"])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ["interbase"])
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)
):
msg = str(e)
return (
"Error writing data to the connection" in msg
or "Unable to complete network request to host" in msg
or "Invalid connection state" in msg
or "Invalid cursor state" in msg
or "connection shutdown" in msg
)
else:
return False
dialect = FBDialect_kinterbasdb
| {
"content_hash": "1c4fa4e76959f39dcfd881f3caa0f48b",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 102,
"avg_line_length": 31.96923076923077,
"alnum_prop": 0.6191851138915624,
"repo_name": "monetate/sqlalchemy",
"id": "102222de0acb9485103d508f796794a75f53df28",
"size": "6479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/dialects/firebird/kinterbasdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
} |
from dcbase.models import UserProfile
from django.forms import ModelForm
class UserProfileForm(ModelForm):
class Meta:
model = UserProfile
fields = ['language', 'timezone']
| {
"content_hash": "6708ed07b1191b37be4916e079f1e56c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 24.375,
"alnum_prop": 0.7128205128205128,
"repo_name": "tctimmeh/dc-django-base",
"id": "57316b2bb185025f47280c38227355e448cb34d4",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dcbase/forms/userProfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1475"
},
{
"name": "HTML",
"bytes": "33639"
},
{
"name": "JavaScript",
"bytes": "3078"
},
{
"name": "Makefile",
"bytes": "514"
},
{
"name": "Python",
"bytes": "76148"
}
],
"symlink_target": ""
} |
from django.http import Http404, JsonResponse
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView, DetailView, View
from TCA.administration.utils import get_user_type
from TCA.administration.models import Course, Teacher, Father, Student
from TCA.utils.models.shortcuts import get_object_or_none
from TCA.utils.text import slugifile
from TCA.posts.models import Post, ImagePost, PDFPost, FilePost
class PostListView(ListView):
model = Post
class PostDetailView(DetailView):
model = Post
@login_required
def allowed_posts(request, grade_id=None):
"""Return a list of the allowed streams a user can see."""
user = request.user
user_type = get_user_type(user)
if user_type in ['teacher', 'admin']:
posts = Post.objects.all()
elif user_type == 'teacher':
teacher = Teacher.objects.get(user=user)
courses = [course.id for course in teacher.courses.all()]
posts = Post.objects.filter(course__id__in=courses)
elif user_type == 'father':
father = Father.objects.get(user=user)
sons = father.sons.all()
grades = [s.grade.id for s in sons]
int_grades = [int(grade) for grade in grades]
if grade_id is not None and int(grade_id) in int_grades:
grades = [grade_id]
else:
grades = int_grades
posts = Post.objects.filter(course__grade__id__in=grades)
elif user_type == 'student':
student = Student.objects.get(user=user)
posts = Post.objects.filter(course__grade=student.grade)
else:
raise Http404('No está autorizado para ver está página.')
context = {'post_list': posts}
return render(request, 'posts/post_list.html', context)
class PostView(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
"""Ask for login."""
return super(PostView, self).dispatch(*args, **kwargs)
def get(self, request, course_key, id=None):
"""Get task form for a related course."""
self._is_teacher_or_staff(request)
course = get_object_or_404(Course, key=course_key)
context = {"course": course}
if id:
post = get_object_or_404(Post, id=id)
context['post'] = post
return render(request, 'posts/post_form.html', context)
def post(self, request, course_key, id=None):
"""Validate and save form."""
course = get_object_or_404(Course, key=course_key)
if id:
post = get_object_or_404(Post, id=id)
else:
post = Post()
post.author = request.user
post.headline = request.POST['headline']
post.slug = slugify(request.POST['headline'])
post.text = request.POST['text']
post.course = course
post.save()
post = self._add_content_elements(post, request)
return redirect(reverse('dashboards.course', args=[course_key]))
def _is_teacher_or_staff(self, request):
user = request.user
user_type = get_user_type(user)
if not (user_type == 'teacher' or user.is_staff):
raise Http404('No está autorizado para ver está página.')
def _add_content_elements(self, post, request):
post = self._add_image(post, request)
post = self._add_file(post, request)
post = self._add_pdf(post, request)
return post
def _add_image(self, post, request):
image = request.FILES.get('image', None)
if image is None:
return post
image_post = get_object_or_none(ImagePost, post=post)
if image_post is None:
image_post = ImagePost(post=post)
image_post.image.save(slugifile(image.name), image)
return post
def _add_file(self, post, request):
file = request.FILES.get('file', None)
if file is None:
return post
file_post = get_object_or_none(FilePost, post=post)
if file_post is None:
file_post = FilePost(post=post)
file_post.file.save(slugifile(file.name), file)
return post
def _add_pdf(self, post, request):
pdf = request.FILES.get('pdf', None)
if pdf is None:
return post
pdf_post = get_object_or_none(PDFPost, post=post)
if pdf_post is None:
pdf_post = PDFPost(post=post)
pdf_post.pdf.save(slugifile(pdf.name), pdf)
return post
class DeletePost(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
"""Ask for login."""
return super(DeletePost, self).dispatch(*args, **kwargs)
@method_decorator(csrf_exempt)
def post(self, request, model, id):
"""Delete an specific type post."""
self._is_teacher_or_staff(request)
_model = self.get_model(model)
post = get_object_or_404(_model, id=id)
post.delete()
return JsonResponse({
'model': model,
'id': id,
'message': 'Se ha eliminado correctamente el objeto.'
})
def get_model(self, model):
"""Return the model selected."""
models = {
'Post': Post,
'ImagePost': ImagePost,
'FilePost': FilePost,
'PDFPost': PDFPost
}
return models[model]
def _is_teacher_or_staff(self, request):
user = request.user
user_type = get_user_type(user)
if not (user_type == 'teacher' or user.is_staff):
raise Http404('No está autorizado para realizar esta acción.')
| {
"content_hash": "bb8a869d4238604264527bca6dca0c43",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 74,
"avg_line_length": 35.21084337349398,
"alnum_prop": 0.61830624465355,
"repo_name": "JosmanPS/tsebaoth-christian-academy",
"id": "d528e977f9cd76690ba6f569a4b4f29c7b88c9cc",
"size": "5878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TCA/posts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155074"
},
{
"name": "HTML",
"bytes": "51352"
},
{
"name": "JavaScript",
"bytes": "302885"
},
{
"name": "Python",
"bytes": "88031"
}
],
"symlink_target": ""
} |
import os
from subliminal import download_best_subtitles, save_subtitles
from subliminal.video import Episode
from subliminal.core import search_external_subtitles
from babelfish.language import Language
class Subtitler:
def __init__(self, languages, providers):
self.languages = languages
self.providers = providers
def subtitle(self, episodes):
# Parse babelfish languages
bb_lang = {Language.fromietf(l) for l in self.languages}
# Create subliminal episode set
sub_episodes = set()
for episode in episodes:
ep_path = os.path.join(episode['dir'], episode['filename'])
sub_episode = Episode.fromguess(ep_path, episode)
# Look for external subtitles (not done automatically, apparently)
sub_episode.subtitle_languages |= set(search_external_subtitles(sub_episode.name).values())
sub_episodes.add(sub_episode)
# download subtitles in the specified language
subl_subtitles = download_best_subtitles(sub_episodes, bb_lang, providers=self.providers)
for video, subtitles in subl_subtitles.items():
save_subtitles(video, subtitles)
# save subtitle languages in episode dict | {
"content_hash": "c49b5f192cb24879996889c20d77085c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 103,
"avg_line_length": 31.375,
"alnum_prop": 0.6788844621513944,
"repo_name": "rkohser/gustaf2",
"id": "f7f43d3dbe815e422dee5f434812cd2cbacb19e4",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "back/fs/subtitler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "467"
},
{
"name": "CSS",
"bytes": "179"
},
{
"name": "HTML",
"bytes": "3255"
},
{
"name": "JavaScript",
"bytes": "1645"
},
{
"name": "Python",
"bytes": "9151"
},
{
"name": "TypeScript",
"bytes": "12672"
}
],
"symlink_target": ""
} |
def getIncreaseAUCDict():
assertEviD = getAssertionDict()
increaseAucL = ["contVal", "numericVal", "numOfSubjects", "objectDose", "precipDose", "evidenceVal", "object", "precip"]
for attr in increaseAucL:
assertEviD[attr] = None
return assertEviD
## inhibits, substrate of
def getAssertionDict():
return { "objectURI": None, "valueURI": None,"assertType": None, "homepage":None, "label": None,
"evidence": None, "evidenceRole": None, "source":"DIKB",
"evidenceType":None, "dateAnnotated":None, "whoAnnotated":None, "evidenceStatement": None, "evidenceSource": None, "researchStatementLabel": None,
"asrt": None}
| {
"content_hash": "3ad59a958b143632ee8f751a2141699b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 159,
"avg_line_length": 40.294117647058826,
"alnum_prop": 0.6583941605839416,
"repo_name": "dbmi-pitt/DIKB-Micropublication",
"id": "5c84e88b9a930b4602ca38a1b27b11a35f20d8b8",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mp-scripts/PDDI_Model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3682138"
},
{
"name": "R",
"bytes": "4656"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
} |
import io
import unittest
from unittest.mock import patch
from kattis import k_amultiplicationgame
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('162')
inputs.append('17')
inputs.append('34012226')
inputs.append('4294967295')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('Stan wins.')
outputs.append('Ollie wins.')
outputs.append('Stan wins.')
outputs.append('Stan wins.')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_amultiplicationgame.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8018ff270b8cd5886c7204b9338b88e1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.5209731543624161,
"repo_name": "ivanlyon/exercises",
"id": "5b3c7f20c7d58ae2ae85ad07a23fe7503b8e1477",
"size": "1192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_k_amultiplicationgame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "HTML",
"bytes": "9068"
},
{
"name": "Python",
"bytes": "96419"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib import messages
from django.forms import widgets
from django.forms.utils import flatatt
from django.db.models import JSONField
from django.templatetags.static import static
from django.utils.html import format_html
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from cms.models.placeholderpluginmodel import PlaceholderReference
from cmsplugin_cascade.clipboard.utils import deserialize_to_clipboard, serialize_from_placeholder
from cmsplugin_cascade.models import CascadeClipboard
class JSONAdminWidget(widgets.Textarea):
def __init__(self):
attrs = {'cols': '40', 'rows': '3'}
super(JSONAdminWidget, self).__init__(attrs)
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
final_attrs = self.build_attrs(self.attrs, extra_attrs=dict(attrs, name=name))
id_data = attrs.get('id', 'id_data')
clippy_url = static('cascade/admin/clippy.svg')
return format_html('<textarea{0}>\r\n{1}</textarea> '
'<button data-clipboard-target="#{2}" type="button" title="{4}" class="clip-btn">'
'<img src="{3}" alt="{4}">'
'</button>\n'
'<div class="status-line"><label></label><strong id="pasted_success">{5}</strong>'
'<strong id="copied_success">{6}</strong></div>',
flatatt(final_attrs), str(value), id_data, clippy_url,
_("Copy to Clipboard"),
_("Successfully pasted JSON data"),
_("Successfully copied JSON data"))
@admin.register(CascadeClipboard)
class CascadeClipboardAdmin(admin.ModelAdmin):
fields = ['identifier', ('created_by', 'created_at', 'last_accessed_at'), 'save_clipboard', 'restore_clipboard', 'data']
readonly_fields = ['created_by', 'created_at', 'last_accessed_at', 'save_clipboard', 'restore_clipboard']
formfield_overrides = {
JSONField: {'widget': JSONAdminWidget},
}
list_display = ['identifier', 'created_by', 'created_at']
class Media:
css = {'all': ['cascade/css/admin/clipboard.css']}
js = ['admin/js/jquery.init.js', 'cascade/js/admin/clipboard.js']
def save_clipboard(self, obj):
return format_html('<input type="submit" value="{}" class="default pull-left" name="save_clipboard" />',
_("Insert Data"))
save_clipboard.short_description = _("From CMS Clipboard")
def restore_clipboard(self, obj):
return format_html('<input type="submit" value="{}" class="default pull-left" name="restore_clipboard" />',
_("Restore Data"))
restore_clipboard.short_description = _("To CMS Clipboard")
def save_model(self, request, obj, form, change):
if request.POST.get('save_clipboard'):
placeholder_reference = PlaceholderReference.objects.last()
if placeholder_reference:
placeholder = placeholder_reference.placeholder_ref
obj.data = serialize_from_placeholder(placeholder, self.admin_site)
request.POST = request.POST.copy()
request.POST['_continue'] = True
messages.add_message(request, messages.INFO, _("The clipboard's content has been persisted for later."))
if request.POST.get('restore_clipboard'):
request.POST = request.POST.copy()
request.POST['_continue'] = True
messages.add_message(request, messages.INFO, _("Persisted content has been restored to the clipboard."))
if request.POST.get('restore_clipboard'):
deserialize_to_clipboard(request, obj.data)
obj.last_accessed_at = now()
super().save_model(request, obj, form, change)
| {
"content_hash": "4fbcc8b2d0901a0138df62a8269bb3f0",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 124,
"avg_line_length": 48.08860759493671,
"alnum_prop": 0.6425375098710187,
"repo_name": "jrief/djangocms-cascade",
"id": "72a40e01fab9ec77f74bfc5404ce6bede53ec91b",
"size": "3799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_cascade/clipboard/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20152"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "JavaScript",
"bytes": "106622"
},
{
"name": "Python",
"bytes": "424314"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import BaseUserManager
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email))
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **kwargs):
user = self.create_user(email, password=password, **kwargs)
user.is_staff = True
user.save(using=self._db)
return user
| {
"content_hash": "dbe17ee43be654cbe732f01b2248404e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 31.736842105263158,
"alnum_prop": 0.6533996683250415,
"repo_name": "mongkok/defocus",
"id": "b7018631d32906f0a9742ca0dc8aa952059b680a",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "defocus/users/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5215"
},
{
"name": "Makefile",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "39366"
},
{
"name": "Shell",
"bytes": "1712"
}
],
"symlink_target": ""
} |
"""
This module is inspired by the open-sourced project Changes:
https://github.com/dropbox/changes
"""
from __future__ import absolute_import
import os
import os.path
from contextlib import contextmanager
from subprocess32 import Popen, PIPE, check_call
from easyci.utils import contextmanagers
class CommandError(Exception):
def __init__(self, cmd, retcode, stdout, stderr):
self.cmd = cmd
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def __unicode__(self):
return '%s returned %d:\nSTDOUT: %r\nSTDERR: %r' % (
self.cmd, self.retcode, self.stdout, self.stderr) # pragma: no cover
def __str__(self):
return self.__unicode__().encode('utf-8') # pragma: no cover
class Vcs(object):
def __init__(self, path=None):
"""Initialize a new Vcs object for a repository located at `path`.
If `path` is `None`, then `get_working_directory` is used to identify
the path.
Args:
path (str) - optional. The path to the repo working directory.
"""
self.path = None
if path is None:
self.path = self.get_working_directory()
else:
self.path = path
assert self.exists()
def run(self, *args, **kwargs):
if self.path is not None:
# only None when called in the __init__ function
kwargs.setdefault('cwd', self.path)
# NOTE if we do want to make a copy of environmental variables,
# we must remove GIT_WORK_TREE
kwargs['env'] = {}
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
proc = Popen(args, **kwargs)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise CommandError(args[0], proc.returncode, stdout, stderr)
return stdout
def exists(self):
"""Check if the working directory exists
Returns:
bool - True if the working directory exists
"""
return os.path.exists(self.path)
def get_working_directory(self):
"""Get the working directory for this repo.
Args:
cls (class object): The class
Returns:
str - the path to the working directory
Raises:
CommandError
"""
raise NotImplementedError # pragma: no cover
def install_hook(self, hook_name, hook_content):
"""Install the repository hook for this repo.
Args:
hook_name (str)
hook_content (str)
"""
raise NotImplementedError # pragma: no cover
def remove_ignored_files(self):
"""Remove files ignored by the repository
"""
raise NotImplementedError # pragma: no cover
def remove_unstaged_files(self):
"""Remove all unstaged files. This does NOT remove ignored files.
TODO this may be specific to git?
"""
raise NotImplementedError # pragma: no cover
def clear(self, target_commit):
"""Resets the repository to the target commit, removing any staged,
unstaged, and untracked files.
Args:
target_commit (str): the commit ID
Raises:
CommandError - if the commit does not exist
"""
raise NotImplementedError # pragma: no cover
def private_dir(self):
"""Get the private directory associated with this repo, but untracked
by the repo.
Returns:
str - absolute path
"""
raise NotImplementedError # pragma: no cover
def repository_dir(self):
"""Get the directory used by the VCS to store repository info.
e.g. .git for git
Returns:
str - absolute path
"""
raise NotImplementedError # pragma: no cover
def get_signature(self):
"""Get the signature of the current state of the repository
Returns:
str
"""
raise NotImplementedError # pragma: no cover
def ignore_patterns_file(self):
"""The ignore patterns file for this repo type.
e.g. .gitignore for git
Returns:
str - file name
"""
raise NotImplementedError # pragma: no cover
def path_is_ignored(self, path):
"""Given a path, check if the path would be ignored.
Returns:
boolean
"""
raise NotImplementedError # pragma: no cover
def get_ignored_files(self):
"""Returns the list of files being ignored in this repository.
Note that file names, not directories, are returned.
So, we will get the following:
a/b.txt
a/c.txt
instead of just:
a/
Returns:
List[str] - list of ignored files. The paths are relative to the repo.
"""
raise NotImplementedError # pragma: no cover
@contextmanager
def temp_copy(self):
"""Yields a new Vcs object that represents a temporary, disposable
copy of the current repository. The copy is deleted at the end
of the context.
The following are not copied:
- ignored files
- easyci private directory (.git/eci for git)
Yields:
Vcs
"""
with contextmanagers.temp_dir() as temp_dir:
temp_root_path = os.path.join(temp_dir, 'root')
path = os.path.join(self.path, '') # adds trailing slash
check_call(['rsync', '-r', "--exclude={}".format(self.private_dir()), "--filter=dir-merge,- {}".format(
self.ignore_patterns_file()), path, temp_root_path])
copy = self.__class__(path=temp_root_path)
yield copy
| {
"content_hash": "27413d312b4ef5e37aa54eca5f4fc999",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 115,
"avg_line_length": 28.364532019704434,
"alnum_prop": 0.5830149357415769,
"repo_name": "naphatkrit/easyci",
"id": "61d7527ff3961ad47881f00ed8bd6153a1a23da6",
"size": "5758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easyci/vcs/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "73597"
}
],
"symlink_target": ""
} |
from django import http
from django.conf import settings
from utils import next_redirect, confirmation_view
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.html import escape
from django.views.decorators.http import require_POST
from django.contrib import comments
from django.contrib.comments import signals
class CommentPostBadRequest(http.HttpResponseBadRequest):
"""
Response returned when a comment post is invalid. If ``DEBUG`` is on a
nice-ish error message will be displayed (for debugging purposes), but in
production mode a simple opaque 400 page will be displayed.
"""
def __init__(self, why):
super(CommentPostBadRequest, self).__init__()
if settings.DEBUG:
self.content = render_to_string("comments/400-debug.html", {"why": why})
def post_comment(request, next=None):
"""
Post a comment.
HTTP POST is required. If ``POST['submit'] == "preview"`` or if there are
errors a preview template, ``comments/preview.html``, will be rendered.
"""
# Fill out some initial data fields from an authenticated user, if present
data = request.POST.copy()
if hasattr(request, 'user') and request.user.is_authenticated():
if not data.get('name', ''):
data["name"] = request.user.get_full_name() or request.user.username
if not data.get('email', ''):
data["email"] = request.user.email
# Check to see if the POST data overrides the view's next argument.
next = data.get("next", next)
# Look up the object we're trying to comment about
ctype = data.get("content_type")
object_pk = data.get("object_pk")
if ctype is None or object_pk is None:
return CommentPostBadRequest("Missing content_type or object_pk field.")
try:
model = models.get_model(*ctype.split(".", 1))
target = model._default_manager.get(pk=object_pk)
except TypeError:
return CommentPostBadRequest(
"Invalid content_type value: %r" % escape(ctype))
except AttributeError:
return CommentPostBadRequest(
"The given content-type %r does not resolve to a valid model." % \
escape(ctype))
except ObjectDoesNotExist:
return CommentPostBadRequest(
"No object matching content-type %r and object PK %r exists." % \
(escape(ctype), escape(object_pk)))
# Do we want to preview the comment?
preview = "preview" in data
# Construct the comment form
form = comments.get_form()(target, data=data)
# Check security information
if form.security_errors():
return CommentPostBadRequest(
"The comment form failed security verification: %s" % \
escape(str(form.security_errors())))
# If there are errors or if we requested a preview show the comment
if form.errors or preview:
template_list = [
"comments/%s_%s_preview.html" % tuple(str(model._meta).split(".")),
"comments/%s_preview.html" % model._meta.app_label,
"comments/preview.html",
]
return render_to_response(
template_list, {
"comment" : form.data.get("comment", ""),
"form" : form,
"next": next,
},
RequestContext(request, {})
)
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = request.META.get("REMOTE_ADDR", None)
if hasattr(request, 'user') and request.user.is_authenticated():
comment.user = request.user
# Signal that the comment is about to be saved
responses = signals.comment_will_be_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
for (receiver, response) in responses:
if response == False:
return CommentPostBadRequest(
"comment_will_be_posted receiver %r killed the comment" % receiver.__name__)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender = comment.__class__,
comment = comment,
request = request
)
return next_redirect(data, next, comment_done, c=comment._get_pk_val())
post_comment = require_POST(post_comment)
comment_done = confirmation_view(
template = "comments/posted.html",
doc = """Display a "comment was posted" success page."""
)
| {
"content_hash": "52224b09c19e4d1eaa9cbe56b0f20bc3",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 92,
"avg_line_length": 37.216,
"alnum_prop": 0.6444539982803096,
"repo_name": "grangier/django-11599",
"id": "e35e1fb80ed6982a353f9e78f4899a406c80e458",
"size": "4652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/comments/views/comments.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "80589"
},
{
"name": "Python",
"bytes": "4902708"
},
{
"name": "Shell",
"bytes": "1608"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
try:
from django.conf.urls import url, include
except ImportError:
# for Django version less than 1.4
from django.conf.urls.defaults import url, include # NOQA
from raven.contrib.django.resolver import RouteResolver
included_url_conf = (
url(r'^foo/bar/(?P<param>[\w]+)', lambda x: ''),
), '', ''
example_url_conf = (
url(r'^api/(?P<project_id>[\w_-]+)/store/$', lambda x: ''),
url(r'^example/', include(included_url_conf)),
)
def test_no_match():
resolver = RouteResolver()
result = resolver.resolve('/foo/bar', example_url_conf)
assert result == '/foo/bar'
def test_simple_match():
resolver = RouteResolver()
result = resolver.resolve('/report/', example_url_conf)
assert result == '/report/'
def test_complex_match():
resolver = RouteResolver()
result = resolver.resolve('/api/1234/store/', example_url_conf)
assert result == '/api/{project_id}/store/'
def test_included_match():
resolver = RouteResolver()
result = resolver.resolve('/example/foo/bar/baz', example_url_conf)
assert result == '/example/foo/bar/{param}'
| {
"content_hash": "f010ae2e868b1af28d2c06901d95495d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 71,
"avg_line_length": 27.285714285714285,
"alnum_prop": 0.6553228621291448,
"repo_name": "johansteffner/raven-python",
"id": "3960161940a7b49abda3291d379bba39f57ab215",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/contrib/django/test_resolver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "101"
},
{
"name": "Makefile",
"bytes": "663"
},
{
"name": "Python",
"bytes": "404876"
},
{
"name": "Shell",
"bytes": "489"
}
],
"symlink_target": ""
} |
import yaml
class Event(yaml.YAMLObject):
yaml_tag = u'!Event'
def __init__(self):
pass
#self.Id = Id
#self.metadata = metadata
#self.application = application
#self.payload = payload
#self.parameters = parameters
# def validate(self):
#
# self._expand_parameter_presets()
# return self._validate_parameter_scalars() and self._validate_payload() and self._validate_dimensions()
#
# def _expand_parameter_presets(self):
# for preset in self.parameters["presets"]:
# for parameter in self.application.presets[preset]:
# if parameter in self.parameters["scalars"]:
# logger.info("Ambiguous parameter definition: " + str(parameter) + ". Given values: " + str(
# self.application.presets[preset][parameter]) + "," + str(
# self.parameters["scalars"][parameter]) + " --> Maximum chosen.")
# self.parameters["scalars"].update({
# parameter: max(self.application.presets[preset][parameter],
# self.parameters["scalars"][parameter])})
# else:
# self.parameters["scalars"].update({parameter: self.application.presets[preset][parameter]})
#
# def _validate_parameter_scalars(self):
# for scalar in self.parameters["scalars"]:
# if not (self._validate_parameter_name(scalar) and self._validate_parameter_minimum(
# scalar) and self._validate_parameter_maximum(scalar)):
# return False
# return True
#
# def _validate_payload(self):
# payload_keys = self.payload.get("key")
# for key in payload_keys:
# field = self.payload["fields"][key]
# if field is None:
# return False
# return True
#
# def _validate_parameter_name(self, parameter):
# application_parameter = self.application.parameters[parameter]
# if application_parameter is None:
# return False
# return True
#
# def _validate_parameter_minimum(self, parameter):
# application_minimum = self.application.parameters[parameter]["min"]
# return self.parameters["scalars"].get(parameter) >= application_minimum
#
# def _validate_parameter_maximum(self, parameter):
# application_maximum = self.application.parameters[parameter]["max"]
# return self.parameters["scalars"].get(parameter) <= application_maximum
#
# def _validate_dimensions(self):
# return True
#
# def generate_payload_cpp(self):
# payload_cpp = ""
# fields = self.payload["fields"]
#
# for field in fields:
#
# # handle simple field
# if isinstance(fields[field], basestring):
# print "simple field"
#
# # handle range field
# if isinstance(fields[field], list) and "range" in fields[field][1]:
# print "range field"
#
# # handle enum field
# if isinstance(fields[field], list) and "values" in fields[field][1]:
# print "enum field" | {
"content_hash": "7fd0ae93533ac83ab737c3d675d12685",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 113,
"avg_line_length": 40.54320987654321,
"alnum_prop": 0.5624238733252132,
"repo_name": "ClockworkOrigins/m2etis",
"id": "7db065af28ffc9bc04307749efccb91faa60d4b8",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configurator/missile/descriptions_old/spec/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13275"
},
{
"name": "C++",
"bytes": "1044746"
},
{
"name": "CMake",
"bytes": "187386"
},
{
"name": "Python",
"bytes": "136028"
},
{
"name": "Shell",
"bytes": "1079"
}
],
"symlink_target": ""
} |
import ConfigParser
import base64
import json
import urllib2
import sys
parser = ConfigParser.SafeConfigParser()
parser.read('/root/env.conf')
fuel_ip = dict(parser.items('fuel'))['fuel_ip']
interface = dict(parser.items('fuel'))['interface']
create_new_run = dict(parser.items('testrail'))['create_new_run']
suite_id = dict(parser.items('testrail'))['suite_id']
cluster_id = dict(parser.items('fuel'))['cluster_id']
between_nodes = dict(parser.items('testrail'))['between_nodes']
between_nodes = True if between_nodes == "true" else False
version = str(dict(parser.items('fuel'))['version'])
print "create new run: {}".format(create_new_run)
if create_new_run == "true":
print "suite_id: {}".format(suite_id)
# Testrail API
class APIClient:
def __init__(self, base_url):
self.user = ''
self.password = ''
if not base_url.endswith('/'):
base_url += '/'
self.__url = base_url + 'index.php?/api/v2/'
def send_get(self, uri):
return self.__send_request('GET', uri, None)
def send_post(self, uri, data):
return self.__send_request('POST', uri, data)
def __send_request(self, method, uri, data):
url = self.__url + uri
request = urllib2.Request(url)
if (method == 'POST'):
request.add_data(json.dumps(data))
auth = base64.b64encode('%s:%s' % (self.user, self.password))
request.add_header('Authorization', 'Basic %s' % auth)
request.add_header('Content-Type', 'application/json')
e = None
try:
response = urllib2.urlopen(request).read()
except urllib2.HTTPError as e:
response = e.read()
if response:
result = json.loads(response)
else:
result = {}
if e != None:
if result and 'error' in result:
error = '"' + result['error'] + '"'
else:
error = 'No additional error message received'
raise APIError('TestRail API returned HTTP %s (%s)' %
(e.code, error))
return result
class APIError(Exception):
pass
client = APIClient('https://mirantis.testrail.com/')
client.user = 'sgudz@mirantis.com'
client.password = 'qwertY123'
def get_run_id():
create_new_run = dict(parser.items('testrail'))['create_new_run']
if create_new_run == "true":
run_name = dict(parser.items('testrail'))['run_name']
suite_id = int(dict(parser.items('testrail'))['suite_id'])
data_str = """{"suite_id": %(suite_id)s, "name": "%(name)s", "assignedto_id": 89, "include_all": true}""" %{"suite_id": suite_id, "name": run_name}
data = json.loads(data_str)
result = client.send_post('add_run/3', data)
return result['id']
else:
return dict(parser.items('testrail'))['run_id']
def get_tests_ids(run_id):
tests = client.send_get('get_tests/{}'.format(run_id))
tests_ids = []
for item in tests:
tests_ids.append(item['id'])
return tests_ids
def get_token_id(fuel_ip):
url='http://{}:5000/v2.0/tokens'.format(fuel_ip)
headers={'Content-Type': 'application/json', 'Accept': 'application/json'}
post_data = '{"auth": {"tenantName": "admin", "passwordCredentials": {"username": "admin", "password": "admin"}}}'
req = urllib2.Request(url,data=post_data, headers=headers)
content = urllib2.urlopen(req)
json_data = json.load(content)
return json_data['access']['token']['id']
def get_neutron_conf(fuel_ip, token_id):
headers = {'X-Auth-Token': token_id}
url = 'http://{0}:8000/api/clusters/{1}/network_configuration/neutron'.format(fuel_ip,cluster_id)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req)
json_data = json.load(content)
return json_data
def get_nodes(fuel_ip, token_id):
headers = {'X-Auth-Token': token_id}
url = 'http://{0}:8000/api/nodes/?cluster_id={1}'.format(fuel_ip, cluster_id)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req)
nodes_data = json.load(content)
nodes_list = [item['id'] for item in nodes_data]
return nodes_list
def get_cluster_attributes(fuel_ip, token_id):
headers = {'X-Auth-Token': token_id}
url = 'http://{0}:8000/api/clusters/{1}/attributes'.format(fuel_ip, cluster_id)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req)
attributes_data = json.load(content)
return attributes_data
def get_computes(fuel_ip, token_id):
headers = {'X-Auth-Token': token_id}
compute_ids = []
for node in get_nodes(fuel_ip, token_id):
url = 'http://{0}:8000/api/nodes/{1}'.format(fuel_ip, node)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req)
nodes_data = json.load(content)
if 'compute' in nodes_data['roles']:
compute_ids.append(node)
return compute_ids
def get_offloading(fuel_ip, token_id):
headers = {'X-Auth-Token': token_id}
offloading_nodes = {}
for node in get_nodes(fuel_ip, token_id):
url = 'http://{0}:8000/api/nodes/{1}/interfaces'.format(fuel_ip, node)
req = urllib2.Request(url, headers=headers)
content = urllib2.urlopen(req)
interface_data = json.load(content)
for item in interface_data:
if item['name'] == interface:
interface_data = item
state_list = []
for item in interface_data['offloading_modes']:
state_list.append(item['state'])
for item in state_list:
if item is None:
state = "Default"
elif not item:
state = False
else:
state = True
offloading_nodes["Node-" + str(node)] = state
return offloading_nodes
if __name__ == "__main__":
token_id = get_token_id(fuel_ip)
median = 0
stdev = 0
run_id = get_run_id()
test1, test2, test3, test4, test5, test6, test7, test8 = get_tests_ids(run_id)
seg_type = get_neutron_conf(fuel_ip, token_id)['networking_parameters']['segmentation_type']
if seg_type == 'vlan':
vlan = True
vxlan = False
else:
vlan = False
vxlan = True
dvr = get_cluster_attributes(fuel_ip, token_id)['editable']['neutron_advanced_configuration']['neutron_dvr']['value']
l3ha = get_cluster_attributes(fuel_ip, token_id)['editable']['neutron_advanced_configuration']['neutron_l3_ha']['value']
nodes = get_nodes(fuel_ip, token_id)
compute_id1 = get_computes(fuel_ip, token_id)[0]
compute_id2 = get_computes(fuel_ip, token_id)[1]
offloading_compute1 = get_offloading(fuel_ip, token_id)['Node-{}'.format(compute_id1)]
offloading_compute2 = get_offloading(fuel_ip, token_id)['Node-{}'.format(compute_id2)]
if offloading_compute1 and offloading_compute2:
offloading = True
else:
offloading = False
if dvr and vxlan and offloading:
test_id = test3
elif dvr and vlan and offloading:
test_id = test4
elif dvr and vxlan:
test_id = test1
elif dvr and vlan:
test_id = test2
elif l3ha and vxlan and offloading and between_nodes:
test_id = test5
elif l3ha and vxlan and offloading:
test_id = test6
elif l3ha and vlan and offloading and between_nodes:
test_id = test7
elif l3ha and vlan and offloading:
test_id = test8
else:
print "Wrong cluster config. DVR: {0}, L3HA: {1}, VLAN: {2}, VXLAN: {3}, BETWEEN_NODES: {4}, OFFLOADING: {5}".format(dvr, l3ha, vlan, vxlan, between_nodes, offloading)
#raise ClusterError("DVR: {0}, L3HA: {1}, VLAN: {2}, VXLAN: {3}, BETWEEN_NODES: {4}, OFFLOADING: {5}".format(dvr, l3ha, vlan, vxlan, between_nodes, offloading))
#sys.exit("Wrong cluster config")
test_id = 0
base_median = client.send_get('get_test/{}'.format(test_id))['custom_test_case_steps'][0]['expected']
base_stdev = client.send_get('get_test/{}'.format(test_id))['custom_test_case_steps'][1]['expected']
print "Test ID for testing: {}".format(test_id)
print "DVR: {0}, L3HA: {1}, VLAN: {2}, VXLAN: {3}, BETWEEN_NODES: {4}, OFFLOADING: {5}".format(dvr, l3ha, vlan, vxlan, between_nodes, offloading)
content = dict(parser.items('test_json'))['json_data']
json_data = json.loads(content)
item = [each for each in json_data['records']]
for i in range(len(item)):
try:
median = int(round(json_data['records'][item[i]]['stats']['bandwidth']['median'], 0))
stdev = int(round(json_data['records'][item[i]]['stats']['bandwidth']['stdev'], 0))
except KeyError:
continue
test_glob_status = test_custom_median_status = test_custom_stdev_status = 1
if median < float(base_median) * 0.9:
test_glob_status = test_custom_median_status = 5
if stdev > float(base_stdev) * 1.2:
test_custom_stdev_status = 5
### Collecting results
custom_test_res = [{'status_id': test_custom_median_status, 'content': 'Check [network bandwidth, Median; Mbps]',
'expected': str(base_median), 'actual': str(median)},
{'status_id': test_custom_stdev_status, 'content': 'Check [deviation; pcs]', 'expected': str(base_stdev),
'actual': str(stdev)}]
glob_test_result = {'test_id': test_id, 'status_id': test_glob_status, 'version': str(version),
'custom_test_case_steps_results': custom_test_res}
results_all_dict = {'results': [glob_test_result]}
client.send_post('add_results/{}'.format(run_id), results_all_dict)
| {
"content_hash": "27af1bddd5485260240c284ef1c0db55",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 175,
"avg_line_length": 40.252066115702476,
"alnum_prop": 0.6039421004003696,
"repo_name": "vortex610/mos",
"id": "5e0b85f3118ceb23e153c0b0c3b69380bd66008f",
"size": "9741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests/shaker_run/addresult.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36203"
},
{
"name": "JavaScript",
"bytes": "5776"
},
{
"name": "Makefile",
"bytes": "2015"
},
{
"name": "Mako",
"bytes": "3223"
},
{
"name": "PLpgSQL",
"bytes": "5639"
},
{
"name": "PowerShell",
"bytes": "6417"
},
{
"name": "Python",
"bytes": "446313"
},
{
"name": "Shell",
"bytes": "141451"
}
],
"symlink_target": ""
} |
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.rst")) as fd:
long_description = fd.read()
setup(
name="pyoffers",
url="https://github.com/Stranger6667/pyoffers",
version="0.7.0",
license="MIT",
author="Dmitry Dygalo",
author_email="dadygalo@gmail.com",
maintainer="Dmitry Dygalo",
maintainer_email="dadygalo@gmail.com",
keywords=["hasoffers", "api", "client"],
description="Python client library for HasOffers API",
long_description=long_description,
long_description_content_type="text/x-rst",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
include_package_data=True,
packages=find_packages(where="src"),
package_dir={"": "src"},
install_requires=["requests"],
)
| {
"content_hash": "35d71fdf94773d19488e3aa699133ba0",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.6321138211382114,
"repo_name": "Stranger6667/pyoffers",
"id": "aeddf8413e6852678efd04b1794fc84a694f6c27",
"size": "1492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74265"
}
],
"symlink_target": ""
} |
from .default_file_handler import * | {
"content_hash": "101d82b9d263ea388417736215b079ba",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 35,
"alnum_prop": 0.8,
"repo_name": "vinoth3v/In",
"id": "7d9c06e5dd1f16c544753e7c8a2a9392371b084f",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "In/filer/path/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33032"
},
{
"name": "Python",
"bytes": "779047"
}
],
"symlink_target": ""
} |
""" Defines convenience pyparsing constructs and token converters.
Based on sparser.py by Tim Cera timcera@earthlink.net.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from pyparsing import \
TokenConverter, oneOf, string, Literal, Group, Word, Optional, Combine, \
sglQuotedString, dblQuotedString, restOfLine, nums
#------------------------------------------------------------------------------
# "ToBoolean" class:
#------------------------------------------------------------------------------
class ToBoolean(TokenConverter):
""" Converter to make token boolean """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token to boolean """
return bool(tokenlist[0])
#------------------------------------------------------------------------------
# "ToInteger" class:
#------------------------------------------------------------------------------
class ToInteger(TokenConverter):
""" Converter to make token into an integer """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token to an integer """
return int(tokenlist[0])
#------------------------------------------------------------------------------
# "ToFloat" class:
#------------------------------------------------------------------------------
class ToFloat(TokenConverter):
""" Converter to make token into a float """
def postParse(self, instring, loc, tokenlist):
""" Converts the first token into a float """
return float(tokenlist[0])
#------------------------------------------------------------------------------
# Convenience pyparsing constructs
#------------------------------------------------------------------------------
decimal_sep = "."
sign = oneOf("+ -")
scolon = Literal(";").suppress()
matlab_comment = Group(Literal('%') + restOfLine).suppress()
psse_comment = Literal('@!') + Optional(restOfLine)
# part of printables without decimal_sep, +, -
special_chars = string.replace(
'!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~', decimal_sep, ""
)
boolean = ToBoolean(ToInteger(Word("01", exact=1))).setName("bool")
integer = ToInteger(
Combine(Optional(sign) + Word(nums))
).setName("integer")
positive_integer = ToInteger(
Combine(Optional("+") + Word(nums))
).setName("integer")
negative_integer = ToInteger(
Combine("-" + Word(nums))
).setName("integer")
real = ToFloat(
Combine(
Optional(sign) +
Word(nums) +
Optional(decimal_sep + Word(nums)) +
Optional(oneOf("E e") + Optional(sign) + Word(nums))
)
).setName("real")
#real = ToFloat(
# Combine(Optional(sign) +
# Word(nums) +
# decimal_sep +
# Optional(Word(nums)) +
# Optional(oneOf("E e") +
# Word(nums)))).setName("real")
positive_real = ToFloat(
Combine(
Optional("+") + Word(nums) + decimal_sep + Optional(Word(nums)) +
Optional(oneOf("E e") + Word(nums))
)
).setName("real")
negative_real = ToFloat(
Combine(
"-" + Word(nums) + decimal_sep + Optional(Word(nums)) +
Optional(oneOf("E e") + Word(nums))
)
).setName("real")
q_string = (sglQuotedString | dblQuotedString).setName("q_string")
# add other characters we should skip over between interesting fields
#integer_junk = Optional(
# Suppress(Word(alphas + special_chars + decimal_sep))
#).setName("integer_junk")
#
#real_junk = Optional(
# Suppress(Word(alphas + special_chars))
#).setName("real_junk")
#
#q_string_junk = SkipTo(q_string).setName("q_string_junk")
# punctuation
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
at = Literal("@")
minus = Literal("-")
comma_sep = comma.suppress()
#------------------------------------------------------------------------------
# A convenient function for calculating a unique name given a list of
# existing names.
#------------------------------------------------------------------------------
def make_unique_name(base, existing=[], format="%s_%s"):
""" Return a name, unique within a context, based on the specified name.
@param base: the desired base name of the generated unique name.
@param existing: a sequence of the existing names to avoid returning.
@param format: a formatting specification for how the name is made unique.
"""
count = 2
name = base
while name in existing:
name = format % (base, count)
count += 1
return name
# EOF -------------------------------------------------------------------------
| {
"content_hash": "c8d912beb44a4e3f35f3ac455bf2b0ad",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 79,
"avg_line_length": 29.85542168674699,
"alnum_prop": 0.4943502824858757,
"repo_name": "rwl/pylon",
"id": "9dc0f7325f9896434ae2c1471608971b78905232",
"size": "5705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pylon/io/parsing_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1168"
},
{
"name": "HTML",
"bytes": "369"
},
{
"name": "MATLAB",
"bytes": "943460"
},
{
"name": "Python",
"bytes": "988513"
}
],
"symlink_target": ""
} |
import requests
import time
from io import BytesIO
from picamera import PiCamera
# Create an in-memory stream
print('Raspberry Pi laundry camera started')
stream = BytesIO()
# Setup the camera
# Rotate 90 degrees
# ROI is 0.6, 0.22, 0.2, 0.2
camera = PiCamera()
camera.resolution = (1280, 1024)
camera.rotation = 90
camera.zoom = (0.4, 0.05, 0.4, 0.5)
# Camera warm-up time?
camera.start_preview()
time.sleep(2)
print('Camera is warmed up.')
for x in range(0, 3):
camera.capture(stream, 'jpeg')
# Send the image to the back end
statusCode = 0
try:
res = requests.post(url='http://envy5:8888/img',
data=stream.getvalue(),
headers={'Content-Type': 'image/jpeg'})
statusCode = res.status_code
finally:
print('Response: ', statusCode)
# Reset the stream
stream.seek(0)
stream.truncate()
# Sleep for 15 seconds
time.sleep(15)
| {
"content_hash": "9912d1b138e536073f8a456476653bde",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 52,
"avg_line_length": 20.761904761904763,
"alnum_prop": 0.6811926605504587,
"repo_name": "kenahrens/iot-laundry",
"id": "355c2b93a804eba6fdf6c755d2fc20d4d1cdca80",
"size": "1224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raspberrypi/backup/rpi-laundry-thrice.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "639"
},
{
"name": "Dockerfile",
"bytes": "980"
},
{
"name": "HTML",
"bytes": "1722"
},
{
"name": "Java",
"bytes": "6551"
},
{
"name": "JavaScript",
"bytes": "26093"
},
{
"name": "PowerShell",
"bytes": "45"
},
{
"name": "Python",
"bytes": "10292"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
} |
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.utilities import abstractMethod
class Environment(object):
""" The general interface for whatever we would like to model, learn about,
predict, or simply interact in. We can perform actions, and access
(partial) observations.
"""
# the number of action values the environment accepts
indim = 0
# the number of sensor values the environment produces
outdim = 0
# discrete state space
discreteStates = False
# discrete action space
discreteActions = False
# number of possible actions for discrete action space
numActions = None
def getSensors(self):
""" the currently visible state of the world (the observation may be
stochastic - repeated calls returning different values)
:rtype: by default, this is assumed to be a numpy array of doubles
:note: This function is abstract and has to be implemented.
"""
abstractMethod()
def performAction(self, action):
""" perform an action on the world that changes it's internal state (maybe
stochastically).
:key action: an action that should be executed in the Environment.
:type action: by default, this is assumed to be a numpy array of doubles
:note: This function is abstract and has to be implemented.
"""
abstractMethod()
def reset(self):
""" Most environments will implement this optional method that allows for
reinitialization.
"""
| {
"content_hash": "35120e425e6f9f64a709177b9697e978",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 32.705882352941174,
"alnum_prop": 0.6223021582733813,
"repo_name": "rbalda/neural_ocr",
"id": "8897b3c17d831cf88f7fa77947d7d25ac43ed096",
"size": "1668",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/pybrain/rl/environments/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "497604"
},
{
"name": "C++",
"bytes": "3309990"
},
{
"name": "CSS",
"bytes": "135235"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "215390"
},
{
"name": "JavaScript",
"bytes": "206780"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "26980034"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
"""This example updates a campaign by setting its status to PAUSED.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CampaignService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
campaign_service = client.GetService('CampaignService', version='v201406')
# Construct operations and update campaign.
operations = [{
'operator': 'SET',
'operand': {
'id': campaign_id,
'status': 'PAUSED'
}
}]
campaigns = campaign_service.mutate(operations)
# Display results.
for campaign in campaigns['value']:
print ('Campaign with name \'%s\' and id \'%s\' was updated.'
% (campaign['name'], campaign['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| {
"content_hash": "f56645984bea8f3ac83b4188bf92de27",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 27.23913043478261,
"alnum_prop": 0.6855546687948922,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "f93143e3b27190e7ecde63f1bd8f377a4618a09d",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201406/basic_operations/update_campaign.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
from matplotlib import pyplot
from shapely.geometry import Polygon
from shapely import affinity
from descartes.patch import PolygonPatch
from figures import SIZE, BLUE, GRAY, set_limits, add_origin
fig = pyplot.figure(1, figsize=SIZE, dpi=90)
triangle = Polygon([(1, 1), (2, 3), (3, 1)])
# 1
ax = fig.add_subplot(121)
patch = PolygonPatch(triangle, facecolor=GRAY, edgecolor=GRAY,
alpha=0.5, zorder=1)
triangle_a = affinity.scale(triangle, xfact=1.5, yfact=-1)
patch_a = PolygonPatch(triangle_a, facecolor=BLUE, edgecolor=BLUE,
alpha=0.5, zorder=2)
ax.add_patch(patch)
ax.add_patch(patch_a)
add_origin(ax, triangle, 'center')
ax.set_title("a) xfact=1.5, yfact=-1")
set_limits(ax, 0, 5, 0, 4)
# 2
ax = fig.add_subplot(122)
patch = PolygonPatch(triangle, facecolor=GRAY, edgecolor=GRAY,
alpha=0.5, zorder=1)
triangle_b = affinity.scale(triangle, xfact=2, origin=(1, 1))
patch_b = PolygonPatch(triangle_b, facecolor=BLUE, edgecolor=BLUE,
alpha=0.5, zorder=2)
ax.add_patch(patch)
ax.add_patch(patch_b)
add_origin(ax, triangle, (1, 1))
ax.set_title("b) xfact=2, origin=(1, 1)")
set_limits(ax, 0, 5, 0, 4)
pyplot.show()
| {
"content_hash": "c8d7beb36f98e5969633aaa09fb3491a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 66,
"avg_line_length": 26.434782608695652,
"alnum_prop": 0.6620065789473685,
"repo_name": "jdmcbr/Shapely",
"id": "e2ecf7abe8dad4cac66fbe25240616ebb5d6045d",
"size": "1216",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/code/scale.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "376703"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
} |
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers, operations_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
import grpc # type: ignore
from google.cloud.retail_v2.types import import_config
from google.cloud.retail_v2.types import product
from google.cloud.retail_v2.types import product as gcr_product
from google.cloud.retail_v2.types import product_service
from .base import DEFAULT_CLIENT_INFO, ProductServiceTransport
class ProductServiceGrpcTransport(ProductServiceTransport):
"""gRPC backend transport for ProductService.
Service for ingesting [Product][google.cloud.retail.v2.Product]
information of the customer's website.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "retail.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "retail.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_product(
self,
) -> Callable[[product_service.CreateProductRequest], gcr_product.Product]:
r"""Return a callable for the create product method over gRPC.
Creates a [Product][google.cloud.retail.v2.Product].
Returns:
Callable[[~.CreateProductRequest],
~.Product]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_product" not in self._stubs:
self._stubs["create_product"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/CreateProduct",
request_serializer=product_service.CreateProductRequest.serialize,
response_deserializer=gcr_product.Product.deserialize,
)
return self._stubs["create_product"]
@property
def get_product(
self,
) -> Callable[[product_service.GetProductRequest], product.Product]:
r"""Return a callable for the get product method over gRPC.
Gets a [Product][google.cloud.retail.v2.Product].
Returns:
Callable[[~.GetProductRequest],
~.Product]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_product" not in self._stubs:
self._stubs["get_product"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/GetProduct",
request_serializer=product_service.GetProductRequest.serialize,
response_deserializer=product.Product.deserialize,
)
return self._stubs["get_product"]
@property
def list_products(
self,
) -> Callable[
[product_service.ListProductsRequest], product_service.ListProductsResponse
]:
r"""Return a callable for the list products method over gRPC.
Gets a list of [Product][google.cloud.retail.v2.Product]s.
Returns:
Callable[[~.ListProductsRequest],
~.ListProductsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_products" not in self._stubs:
self._stubs["list_products"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/ListProducts",
request_serializer=product_service.ListProductsRequest.serialize,
response_deserializer=product_service.ListProductsResponse.deserialize,
)
return self._stubs["list_products"]
@property
def update_product(
self,
) -> Callable[[product_service.UpdateProductRequest], gcr_product.Product]:
r"""Return a callable for the update product method over gRPC.
Updates a [Product][google.cloud.retail.v2.Product].
Returns:
Callable[[~.UpdateProductRequest],
~.Product]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_product" not in self._stubs:
self._stubs["update_product"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/UpdateProduct",
request_serializer=product_service.UpdateProductRequest.serialize,
response_deserializer=gcr_product.Product.deserialize,
)
return self._stubs["update_product"]
@property
def delete_product(
self,
) -> Callable[[product_service.DeleteProductRequest], empty_pb2.Empty]:
r"""Return a callable for the delete product method over gRPC.
Deletes a [Product][google.cloud.retail.v2.Product].
Returns:
Callable[[~.DeleteProductRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_product" not in self._stubs:
self._stubs["delete_product"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/DeleteProduct",
request_serializer=product_service.DeleteProductRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_product"]
@property
def import_products(
self,
) -> Callable[[import_config.ImportProductsRequest], operations_pb2.Operation]:
r"""Return a callable for the import products method over gRPC.
Bulk import of multiple
[Product][google.cloud.retail.v2.Product]s.
Request processing may be synchronous. Non-existing items are
created.
Note that it is possible for a subset of the
[Product][google.cloud.retail.v2.Product]s to be successfully
updated.
Returns:
Callable[[~.ImportProductsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_products" not in self._stubs:
self._stubs["import_products"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/ImportProducts",
request_serializer=import_config.ImportProductsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_products"]
@property
def set_inventory(
self,
) -> Callable[[product_service.SetInventoryRequest], operations_pb2.Operation]:
r"""Return a callable for the set inventory method over gRPC.
Updates inventory information for a
[Product][google.cloud.retail.v2.Product] while respecting the
last update timestamps of each inventory field.
This process is asynchronous and does not require the
[Product][google.cloud.retail.v2.Product] to exist before
updating fulfillment information. If the request is valid, the
update will be enqueued and processed downstream. As a
consequence, when a response is returned, updates are not
immediately manifested in the
[Product][google.cloud.retail.v2.Product] queried by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
or
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
When inventory is updated with
[ProductService.CreateProduct][google.cloud.retail.v2.ProductService.CreateProduct]
and
[ProductService.UpdateProduct][google.cloud.retail.v2.ProductService.UpdateProduct],
the specified inventory field value(s) will overwrite any
existing value(s) while ignoring the last update time for this
field. Furthermore, the last update time for the specified
inventory fields will be overwritten to the time of the
[ProductService.CreateProduct][google.cloud.retail.v2.ProductService.CreateProduct]
or
[ProductService.UpdateProduct][google.cloud.retail.v2.ProductService.UpdateProduct]
request.
If no inventory fields are set in
[CreateProductRequest.product][google.cloud.retail.v2.CreateProductRequest.product],
then any pre-existing inventory information for this product
will be used.
If no inventory fields are set in
[SetInventoryRequest.set_mask][google.cloud.retail.v2.SetInventoryRequest.set_mask],
then any existing inventory information will be preserved.
Pre-existing inventory information can only be updated with
[ProductService.SetInventory][google.cloud.retail.v2.ProductService.SetInventory],
[ProductService.AddFulfillmentPlaces][google.cloud.retail.v2.ProductService.AddFulfillmentPlaces],
and
[ProductService.RemoveFulfillmentPlaces][google.cloud.retail.v2.ProductService.RemoveFulfillmentPlaces].
The returned [Operation][]s will be obsolete after 1 day, and
[GetOperation][] API will return NOT_FOUND afterwards.
If conflicting updates are issued, the [Operation][]s associated
with the stale updates will not be marked as
[done][Operation.done] until being obsolete.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
Returns:
Callable[[~.SetInventoryRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_inventory" not in self._stubs:
self._stubs["set_inventory"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/SetInventory",
request_serializer=product_service.SetInventoryRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["set_inventory"]
@property
def add_fulfillment_places(
self,
) -> Callable[
[product_service.AddFulfillmentPlacesRequest], operations_pb2.Operation
]:
r"""Return a callable for the add fulfillment places method over gRPC.
Incrementally adds place IDs to
[Product.fulfillment_info.place_ids][google.cloud.retail.v2.FulfillmentInfo.place_ids].
This process is asynchronous and does not require the
[Product][google.cloud.retail.v2.Product] to exist before
updating fulfillment information. If the request is valid, the
update will be enqueued and processed downstream. As a
consequence, when a response is returned, the added place IDs
are not immediately manifested in the
[Product][google.cloud.retail.v2.Product] queried by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
or
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
The returned [Operation][]s will be obsolete after 1 day, and
[GetOperation][] API will return NOT_FOUND afterwards.
If conflicting updates are issued, the [Operation][]s associated
with the stale updates will not be marked as
[done][Operation.done] until being obsolete.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
Returns:
Callable[[~.AddFulfillmentPlacesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_fulfillment_places" not in self._stubs:
self._stubs["add_fulfillment_places"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/AddFulfillmentPlaces",
request_serializer=product_service.AddFulfillmentPlacesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["add_fulfillment_places"]
@property
def remove_fulfillment_places(
self,
) -> Callable[
[product_service.RemoveFulfillmentPlacesRequest], operations_pb2.Operation
]:
r"""Return a callable for the remove fulfillment places method over gRPC.
Incrementally removes place IDs from a
[Product.fulfillment_info.place_ids][google.cloud.retail.v2.FulfillmentInfo.place_ids].
This process is asynchronous and does not require the
[Product][google.cloud.retail.v2.Product] to exist before
updating fulfillment information. If the request is valid, the
update will be enqueued and processed downstream. As a
consequence, when a response is returned, the removed place IDs
are not immediately manifested in the
[Product][google.cloud.retail.v2.Product] queried by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
or
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
The returned [Operation][]s will be obsolete after 1 day, and
[GetOperation][] API will return NOT_FOUND afterwards.
If conflicting updates are issued, the [Operation][]s associated
with the stale updates will not be marked as
[done][Operation.done] until being obsolete.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
Returns:
Callable[[~.RemoveFulfillmentPlacesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "remove_fulfillment_places" not in self._stubs:
self._stubs["remove_fulfillment_places"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/RemoveFulfillmentPlaces",
request_serializer=product_service.RemoveFulfillmentPlacesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["remove_fulfillment_places"]
@property
def add_local_inventories(
self,
) -> Callable[
[product_service.AddLocalInventoriesRequest], operations_pb2.Operation
]:
r"""Return a callable for the add local inventories method over gRPC.
Updates local inventory information for a
[Product][google.cloud.retail.v2.Product] at a list of places,
while respecting the last update timestamps of each inventory
field.
This process is asynchronous and does not require the
[Product][google.cloud.retail.v2.Product] to exist before
updating inventory information. If the request is valid, the
update will be enqueued and processed downstream. As a
consequence, when a response is returned, updates are not
immediately manifested in the
[Product][google.cloud.retail.v2.Product] queried by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
or
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
Local inventory information can only be modified using this
method.
[ProductService.CreateProduct][google.cloud.retail.v2.ProductService.CreateProduct]
and
[ProductService.UpdateProduct][google.cloud.retail.v2.ProductService.UpdateProduct]
has no effect on local inventories.
The returned [Operation][]s will be obsolete after 1 day, and
[GetOperation][] API will return NOT_FOUND afterwards.
If conflicting updates are issued, the [Operation][]s associated
with the stale updates will not be marked as
[done][Operation.done] until being obsolete.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
Returns:
Callable[[~.AddLocalInventoriesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "add_local_inventories" not in self._stubs:
self._stubs["add_local_inventories"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/AddLocalInventories",
request_serializer=product_service.AddLocalInventoriesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["add_local_inventories"]
@property
def remove_local_inventories(
self,
) -> Callable[
[product_service.RemoveLocalInventoriesRequest], operations_pb2.Operation
]:
r"""Return a callable for the remove local inventories method over gRPC.
Remove local inventory information for a
[Product][google.cloud.retail.v2.Product] at a list of places at
a removal timestamp.
This process is asynchronous. If the request is valid, the
removal will be enqueued and processed downstream. As a
consequence, when a response is returned, removals are not
immediately manifested in the
[Product][google.cloud.retail.v2.Product] queried by
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct]
or
[ProductService.ListProducts][google.cloud.retail.v2.ProductService.ListProducts].
Local inventory information can only be removed using this
method.
[ProductService.CreateProduct][google.cloud.retail.v2.ProductService.CreateProduct]
and
[ProductService.UpdateProduct][google.cloud.retail.v2.ProductService.UpdateProduct]
has no effect on local inventories.
The returned [Operation][]s will be obsolete after 1 day, and
[GetOperation][] API will return NOT_FOUND afterwards.
If conflicting updates are issued, the [Operation][]s associated
with the stale updates will not be marked as
[done][Operation.done] until being obsolete.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
Returns:
Callable[[~.RemoveLocalInventoriesRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "remove_local_inventories" not in self._stubs:
self._stubs["remove_local_inventories"] = self.grpc_channel.unary_unary(
"/google.cloud.retail.v2.ProductService/RemoveLocalInventories",
request_serializer=product_service.RemoveLocalInventoriesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["remove_local_inventories"]
def close(self):
self.grpc_channel.close()
@property
def kind(self) -> str:
return "grpc"
__all__ = ("ProductServiceGrpcTransport",)
| {
"content_hash": "e49d21e9d38338d8cbed6eda60c26a62",
"timestamp": "",
"source": "github",
"line_count": 708,
"max_line_length": 112,
"avg_line_length": 44.932203389830505,
"alnum_prop": 0.6418647051427134,
"repo_name": "googleapis/python-retail",
"id": "b8d625eaa70544c43eae6feffff0b0c5a8b1c315",
"size": "32412",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/retail_v2/services/product_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import logging
import six
import unittest2
from st2client import models
LOG = logging.getLogger(__name__)
FAKE_ENDPOINT = 'http://127.0.0.1:8268'
RESOURCES = [
{
"id": "123",
"name": "abc",
},
{
"id": "456",
"name": "def"
}
]
class FakeResource(models.Resource):
_plural = 'FakeResources'
class FakeResponse(object):
def __init__(self, text, status_code, reason):
self.text = text
self.status_code = status_code
self.reason = reason
def json(self):
return json.loads(self.text)
def raise_for_status(self):
raise Exception(self.reason)
class FakeClient(object):
def __init__(self):
self.managers = {
'FakeResource': models.ResourceManager(FakeResource,
FAKE_ENDPOINT)
}
class FakeApp(object):
def __init__(self):
self.client = FakeClient()
class BaseCLITestCase(unittest2.TestCase):
capture_output = True # if True, stdout and stderr are saved to self.stdout and self.stderr
stdout = six.moves.StringIO()
stderr = six.moves.StringIO()
DEFAULT_SKIP_CONFIG = '1'
def setUp(self):
super(BaseCLITestCase, self).setUp()
# Setup environment
for var in ['ST2_BASE_URL', 'ST2_AUTH_URL', 'ST2_API_URL',
'ST2_AUTH_TOKEN', 'ST2_CONFIG_FILE', 'ST2_API_KEY']:
if var in os.environ:
del os.environ[var]
os.environ['ST2_CLI_SKIP_CONFIG'] = self.DEFAULT_SKIP_CONFIG
if self.capture_output:
# Make sure we reset it for each test class instance
self.stdout = six.moves.StringIO()
self.stderr = six.moves.StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
super(BaseCLITestCase, self).tearDown()
if self.capture_output:
# Reset to original stdout and stderr.
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
| {
"content_hash": "8fdf6c577fa012200cbb38cf2d83199b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 96,
"avg_line_length": 22.934782608695652,
"alnum_prop": 0.5748815165876777,
"repo_name": "tonybaloney/st2",
"id": "9f40ff0329d430af1804eacfe775c8d82f0d313b",
"size": "2890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2client/tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import numpy as np
import torch
import torchvision.models as models
from torch import nn as nn
from rlkit.pythonplusplus import identity
from rlkit.torch.core import PyTorchModule
class PretrainedCNN(PyTorchModule):
# Uses a pretrained CNN architecture from torchvision
def __init__(
self,
input_width,
input_height,
input_channels,
output_size,
hidden_sizes=None,
added_fc_input_size=0,
batch_norm_fc=False,
init_w=1e-4,
hidden_init=nn.init.xavier_uniform_,
hidden_activation=nn.ReLU(),
output_activation=identity,
output_conv_channels=False,
model_architecture=models.resnet18,
model_pretrained=True,
model_freeze=False,
):
if hidden_sizes is None:
hidden_sizes = []
super().__init__()
self.hidden_sizes = hidden_sizes
self.input_width = input_width
self.input_height = input_height
self.input_channels = input_channels
self.output_size = output_size
self.output_activation = output_activation
self.hidden_activation = hidden_activation
self.batch_norm_fc = batch_norm_fc
self.added_fc_input_size = added_fc_input_size
self.conv_input_length = self.input_width * self.input_height * self.input_channels
self.output_conv_channels = output_conv_channels
self.pretrained_model = nn.Sequential(*list(model_architecture(
pretrained=model_pretrained).children())[:-1])
if model_freeze:
for child in self.pretrained_model.children():
for param in child.parameters():
param.requires_grad = False
self.fc_layers = nn.ModuleList()
self.fc_norm_layers = nn.ModuleList()
# use torch rather than ptu because initially the model is on CPU
test_mat = torch.zeros(
1,
self.input_channels,
self.input_width,
self.input_height,
)
# find output dim of conv_layers by trial and add norm conv layers
test_mat = self.pretrained_model(test_mat)
self.conv_output_flat_size = int(np.prod(test_mat.shape))
if self.output_conv_channels:
self.last_fc = None
else:
fc_input_size = self.conv_output_flat_size
# used only for injecting input directly into fc layers
fc_input_size += added_fc_input_size
for idx, hidden_size in enumerate(hidden_sizes):
fc_layer = nn.Linear(fc_input_size, hidden_size)
fc_input_size = hidden_size
fc_layer.weight.data.uniform_(-init_w, init_w)
fc_layer.bias.data.uniform_(-init_w, init_w)
self.fc_layers.append(fc_layer)
if self.batch_norm_fc:
norm_layer = nn.BatchNorm1d(hidden_size)
self.fc_norm_layers.append(norm_layer)
self.last_fc = nn.Linear(fc_input_size, output_size)
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, input, return_last_activations=False):
conv_input = input.narrow(start=0,
length=self.conv_input_length,
dim=1).contiguous()
# reshape from batch of flattened images into (channels, w, h)
h = conv_input.view(conv_input.shape[0],
self.input_channels,
self.input_height,
self.input_width)
h = self.apply_forward_conv(h)
if self.output_conv_channels:
return h
# flatten channels for fc layers
h = h.view(h.size(0), -1)
if self.added_fc_input_size != 0:
extra_fc_input = input.narrow(
start=self.conv_input_length,
length=self.added_fc_input_size,
dim=1,
)
h = torch.cat((h, extra_fc_input), dim=1)
h = self.apply_forward_fc(h)
if return_last_activations:
return h
return self.output_activation(self.last_fc(h))
def apply_forward_conv(self, h):
return self.pretrained_model(h)
def apply_forward_fc(self, h):
for i, layer in enumerate(self.fc_layers):
h = layer(h)
if self.batch_norm_fc:
h = self.fc_norm_layers[i](h)
h = self.hidden_activation(h)
return h
| {
"content_hash": "28c4aa6389129ab9d647f312d93c7117",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 91,
"avg_line_length": 35.86153846153846,
"alnum_prop": 0.5634920634920635,
"repo_name": "vitchyr/rlkit",
"id": "4e4aee3d21ec2108984cc0f17a5b04bc19859726",
"size": "4662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rlkit/torch/networks/pretrained_cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3338"
},
{
"name": "Python",
"bytes": "355210"
}
],
"symlink_target": ""
} |
import rospy
from sensor_msgs.msg import LaserScan
class laser_reader:
def __init__(self):
self.laser_sub = rospy.Subscriber("/scan",
LaserScan,
self.callback)
def callback(self, data):
min_range = data.range_max
print len(data.ranges)
for v in data.ranges[300:340]:
if v < min_range:
min_range = v
print min_range
ic = laser_reader()
rospy.init_node('laser_reader')
rospy.spin()
| {
"content_hash": "ca0b09355cac001cf2dea846b7802589",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 56,
"avg_line_length": 23.73913043478261,
"alnum_prop": 0.5128205128205128,
"repo_name": "LCAS/teaching",
"id": "22f60393b0a1630917d17a4f129699cf3cd2c8b5",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/lcas_melodic",
"path": "cmp3103m-code-fragments/scripts/laser_reader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "225768"
},
{
"name": "CMake",
"bytes": "38516"
},
{
"name": "EmberScript",
"bytes": "6519"
},
{
"name": "HTML",
"bytes": "2216"
},
{
"name": "Jupyter Notebook",
"bytes": "19698"
},
{
"name": "MATLAB",
"bytes": "1491"
},
{
"name": "Pawn",
"bytes": "414"
},
{
"name": "Python",
"bytes": "61493"
},
{
"name": "Shell",
"bytes": "839"
}
],
"symlink_target": ""
} |
from ..rman_constants import RFB_PREFS_NAME
import bpy
def get_addon_prefs():
try:
addon = bpy.context.preferences.addons[RFB_PREFS_NAME]
return addon.preferences
except:
# try looking for all variants of RFB_PREFS_NAME
for k, v in bpy.context.preferences.addons.items():
if RFB_PREFS_NAME in k:
return v
return None
def get_pref(pref_name='', default=None):
""" Return the value of a preference
Args:
pref_name (str) - name of the preference to look up
default (AnyType) - default to return, if pref_name doesn't exist
Returns:
(AnyType) - preference value.
"""
prefs = get_addon_prefs()
if not prefs:
if default is None:
from ..preferences import __DEFAULTS__
default = __DEFAULTS__.get(pref_name, None)
return default
return getattr(prefs, pref_name, default)
def get_bl_temp_dir():
return bpy.context.preferences.filepaths.temporary_directory | {
"content_hash": "1a787e6c21f7fb52606b0502e433a7b7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 29.257142857142856,
"alnum_prop": 0.625,
"repo_name": "prman-pixar/RenderManForBlender",
"id": "dad83defdfa445b4421d1fec8590dd30cc2c91de",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rfb_utils/prefs_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "45529"
},
{
"name": "Python",
"bytes": "1741308"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../../../src/')
import cgmap as cg
import mdtraj as md
from mdtraj.core.element import Element
import md_check as check
############################### config #####################################
input_traj = "dppc.trr"
input_top = "dppc.pdb"
input_maps = ["mapping_bead_1_dppc",
"mapping_bead_2_dppc",
"mapping_bead_3_dppc"]
output_traj_null_mass = "dppc_nm.trr"
output_top_null_mass = "dppc_nm.pdb"
output_traj_native = "dppc_native.trr"
output_top_native = "dppc_native.pdb"
reference_traj = "dppc_nm.trr"
reference_top = "dppc_nm.pdb"
output_dir ='./output/'
input_dir ='./input/'
reference_dir ='./reference/'
#collection of names of molecules.
lipid_types = ['DPPC']
############################### config proc ################################
fq_input_maps = [ input_dir + loc for loc in input_maps ]
#read maps for each bead from files.
#list of lists of strings.
mapping_atom_names_dppc = [ [ l.strip() for l in open(mp_file,'r').readlines()]
for mp_file in fq_input_maps ]
#index strings for which to atom query the trajectory when making beads.
#list of lists of strings.
name_lists = [ " or ".join( ["name %s"%mn for mn in mapping_names ])
for mapping_names in mapping_atom_names_dppc ]
#names of cg beads created.
label_lists = ['DPH','DPM','DPT']
############################### run null mass ############################
### pull in trajectories
trj = md.load(input_dir + input_traj,top=input_dir + input_top)
#the types of each molecule in the trajectory.
molecule_types = [lipid_types.index(r.name) for r in trj.top.residues]
#preprocess trajectory content by adding new parts
for a in trj.top.atoms: a.element = Element.getBySymbol('H')
#actual map command
call_params = cg.map_molecules( trj = trj,
selection_list = [ name_lists ],
bead_label_list = [ label_lists ],
molecule_types = molecule_types,
mapping_function = 'com',
return_call = True)
cg_trj = cg.cg_by_index(call_params[0],call_params[1],call_params[2],
mapping_function=call_params[3]['mapping_function'])
cg_trj.save(output_dir + output_traj_null_mass)
cg_trj[0].save(output_dir + output_top_null_mass)
############################### check results ###############################
# reloading results from disk.
cg_traj_null_mass = cg_trj.load(output_dir + output_traj_null_mass,
top=output_dir + output_top_null_mass)
ref_cg_traj = cg_trj.load(reference_dir + reference_traj,
top=reference_dir + reference_top)
result_null = check.md_content_equality(cg_traj_null_mass,ref_cg_traj)
sys.exit(check.check_result_to_exitval(result_null))
| {
"content_hash": "461e65aa82e060096c5aa099ac7783da",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 80,
"avg_line_length": 34.03529411764706,
"alnum_prop": 0.5737988247493951,
"repo_name": "uchicago-voth/cgmap",
"id": "a7eb44f8d896a497397ec46575e7c3c7a6ba636a",
"size": "3078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/molecular_map_test/same_molecules_dump_call_cop/test_same_molecules_cop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64237"
},
{
"name": "Shell",
"bytes": "2111"
}
],
"symlink_target": ""
} |
# coding=utf-8
from django.db import models
from plugins.arduino.models import Arduino, set_command
MODEL = 'OnOffSwitch'
class OnOffSwitch(models.Model):
"""
Модель переключателей с двумя состояниями (on/off).
"""
CONTAINER = 'home'
TYPE = 'OnOffSwitch'
WIDGET_TYPE = 'positioned'
name = models.SlugField(
max_length=20,
verbose_name='Системное имя',
unique=True
)
controller = models.ForeignKey(
Arduino,
verbose_name='Контроллер Arduino',
)
controller_pin = models.PositiveSmallIntegerField(
verbose_name='Вывод (pin) на Arduino',
)
state = models.BooleanField(
default=False,
editable=False,
)
last_changed = models.DateTimeField(
auto_now=True,
verbose_name='Время определения состояния',
)
class Meta(object):
db_table = 'home_onoffswitch_ext'
verbose_name = 'Переключатель'
verbose_name_plural = 'Переключатели'
def __unicode__(self):
return self.name
def set_command(self):
cmd = 'sw_state:%d' % self.controller_pin
set_command(self, cmd)
def set_result(self, result):
if result == '0' or result == '1':
result = bool(int(result))
if self.state != result:
self.state = result
self.save()
| {
"content_hash": "67c8f29eae0d3bac99282b54cf832eff",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 55,
"avg_line_length": 24.210526315789473,
"alnum_prop": 0.5927536231884057,
"repo_name": "sug4rok/Servus",
"id": "73fb830ed7de406785db01e8747635f9422466d3",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Servus/plugins/arduino_on_off_switch/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "13156"
},
{
"name": "CSS",
"bytes": "82380"
},
{
"name": "HTML",
"bytes": "34128"
},
{
"name": "JavaScript",
"bytes": "97398"
},
{
"name": "Python",
"bytes": "159966"
}
],
"symlink_target": ""
} |
import torch as T
import numpy as np
from . import core
class Model(T.nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.in_features = T.nn.Linear(6, hidden_size)
d = .2 * (self.in_features.in_features) ** -.5
T.nn.init.uniform_(self.in_features.weight, -d, d)
self.predict = T.nn.Linear(hidden_size, 4)
def forward(self, x):
h = T.relu(self.in_features(x))
return self.predict(h)
class Agent:
ACTIONS = [(False, False), (False, True), (True, False), (True, True)]
def __init__(self):
# Fixed state (settings)
self.discount = .95
self.ticks_per_action = 10
self.greed = .9
self.qsteps = 5
self.max_buffer = 10000
self.update_sample = 100
# Transient state
self.model = Model(100)
self.buffer = []
self.random = np.random.RandomState()
self.pre_buffer = []
self.opt = T.optim.Adam(self.model.parameters())
def _update(self, log):
if self.update_sample <= len(self.buffer):
self.opt.zero_grad()
indices = self.random.randint(0, len(self.buffer), self.update_sample)
states = T.FloatTensor([self.buffer[idx][0] for idx in indices])
actions = T.LongTensor([self.buffer[idx][1] for idx in indices])
rewards = T.FloatTensor([self.buffer[idx][2] for idx in indices])
y = self.model(states)[T.arange(self.update_sample), actions]
loss = (1 - self.discount) * ((y - rewards) ** 2).mean()
log.append('loss', loss=float(loss))
loss.backward()
self.opt.step()
def _act(self, state, greedy):
if greedy or self.random.rand() < self.greed:
return int(T.argmax(self.model(T.FloatTensor(state))))
else:
return self.random.randint(len(self.ACTIONS))
def _add_memory(self, state, action, reward, log):
if self.qsteps == len(self.pre_buffer):
sar = self.pre_buffer.pop(0)
sar[2] += (self.discount ** self.qsteps) * float(T.max(self.model(T.FloatTensor(state))))
self.buffer.append(sar)
self.pre_buffer.append([state, action, 0])
for n in range(len(self.pre_buffer)):
self.pre_buffer[-1 - n][2] += reward * (self.discount ** n)
self._update(log)
def _flush_and_trim_buffer(self):
self.buffer += self.pre_buffer
self.pre_buffer = []
self.buffer = self.buffer[-self.max_buffer:]
def train(self, log):
"""Train the agent on a single game."""
game = core.Game()
while True:
state = game.state
action = self._act(state, greedy=False)
outcome = game.step_multi(self.ACTIONS[action], self.ticks_per_action)
self._add_memory(state, action, outcome is None or outcome.success, log)
if outcome:
self._flush_and_trim_buffer()
log.append('outcome', **outcome.to_json())
return
def __call__(self, state):
"""Evaluation policy - greedy as anything."""
return self.ACTIONS[self._act(state, greedy=True)]
| {
"content_hash": "7e782713a06b25d855a6a995292fa779",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 101,
"avg_line_length": 36.19101123595506,
"alnum_prop": 0.5650419124495498,
"repo_name": "DouglasOrr/Snippets",
"id": "7bbdceba6beffcfc2ad7b788adb1d3eb18550779",
"size": "3221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hover/hover/qlearning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "61098"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Clojure",
"bytes": "42894"
},
{
"name": "Dockerfile",
"bytes": "3894"
},
{
"name": "HTML",
"bytes": "17302"
},
{
"name": "Haml",
"bytes": "2454"
},
{
"name": "Haskell",
"bytes": "277"
},
{
"name": "Java",
"bytes": "127511"
},
{
"name": "JavaScript",
"bytes": "12117"
},
{
"name": "Jupyter Notebook",
"bytes": "33198"
},
{
"name": "Python",
"bytes": "137390"
},
{
"name": "Ruby",
"bytes": "8897"
},
{
"name": "Rust",
"bytes": "32172"
},
{
"name": "Shell",
"bytes": "3263"
}
],
"symlink_target": ""
} |
import ConfigParser
import string, os, sys
class Secret(object):
apiKey = "apiKey"
secretKey = "secretKey"
def __init__(self):
cf = ConfigParser.ConfigParser()
cf.read("secret.ini")
#read by type
self.apiKey = cf.get("baidu_secret", "apikey")
self.secretKey = cf.get("baidu_secret", "secretkey")
| {
"content_hash": "6f8b19b5600c1c3516af56ec686b9bcd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 18.705882352941178,
"alnum_prop": 0.6729559748427673,
"repo_name": "fuzhouch/amberalertcn",
"id": "4dff6bbf74e94ade8a50143cc7e975dfdd90abcf",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/amberalertcn/api/v1/Secret.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "49063"
},
{
"name": "Python",
"bytes": "44347"
}
],
"symlink_target": ""
} |
from numpy import sin, cos, deg2rad, transpose, dot, arcsin, arctan2, zeros,\
ndarray, array, rad2deg, pi
from premat import premat
def precess(ra0, dec0, equinox1, equinox2, fk4=False, radian=False):
# '''NAME:
# PRECESS
# PURPOSE:
# Precess coordinates from EQUINOX1 to EQUINOX2.
# EXPLANATION:
# For interactive display, one can use the procedure ASTRO which calls
# PRECESS or use the /PRINT keyword. The default (RA,DEC) system is
# FK5 based on epoch J2000.0 but FK4 based on B1950.0 is available via
# the /FK4 keyword.
#
# Use BPRECESS and JPRECESS to convert between FK4 and FK5 systems
# CALLING SEQUENCE:
# PRECESS, ra, dec, [ equinox1, equinox2, /PRINT, /FK4, /RADIAN ]
#
# INPUT - OUTPUT:
# RA - Input right ascension (scalar or vector) in DEGREES, unless the
# /RADIAN keyword is set
# DEC - Input declination in DEGREES (scalar or vector), unless the
# /RADIAN keyword is set
#
# The input RA and DEC are modified by PRECESS to give the
# values after precession.
#
# OPTIONAL INPUTS:
# EQUINOX1 - Original equinox of coordinates, numeric scalar. If
# omitted, then PRECESS will query for EQUINOX1 and EQUINOX2.
# EQUINOX2 - Equinox of precessed coordinates.
#
# OPTIONAL INPUT KEYWORDS:
# /PRINT - If this keyword is set and non-zero, then the precessed
# coordinates are displayed at the terminal. Cannot be used
# with the /RADIAN keyword
# /FK4 - If this keyword is set and non-zero, the FK4 (B1950.0) system
# will be used otherwise FK5 (J2000.0) will be used instead.
# /RADIAN - If this keyword is set and non-zero, then the input and
# output RA and DEC vectors are in radians rather than degrees
#
# RESTRICTIONS:
# Accuracy of precession decreases for declination values near 90
# degrees. PRECESS should not be used more than 2.5 centuries from
# 2000 on the FK5 system (1950.0 on the FK4 system).
#
# EXAMPLES:
# (1) The Pole Star has J2000.0 coordinates (2h, 31m, 46.3s,
# 89d 15' 50.6"); compute its coordinates at J1985.0
#
# IDL> precess, ten(2,31,46.3)*15, ten(89,15,50.6), 2000, 1985, /PRINT
#
# ====> 2h 16m 22.73s, 89d 11' 47.3"
#
# (2) Precess the B1950 coordinates of Eps Ind (RA = 21h 59m,33.053s,
# DEC = (-56d, 59', 33.053") to equinox B1975.
#
# IDL> ra = ten(21, 59, 33.053)*15
# IDL> dec = ten(-56, 59, 33.053)
# IDL> precess, ra, dec ,1950, 1975, /fk4
#
# PROCEDURE:
# Algorithm from Computational Spherical Astronomy by Taff (1983),
# p. 24. (FK4). FK5 constants from "Astronomical Almanac Explanatory
# Supplement 1992, page 104 Table 3.211.1.
#
# PROCEDURE CALLED:
## Function PREMAT - computes precession matrix
#
# REVISION HISTORY
# Written, Wayne Landsman, STI Corporation August 1986
# Correct negative output RA values February 1989
# Added /PRINT keyword W. Landsman November, 1991
# Provided FK5 (J2000.0) I. Freedman January 1994
# Precession Matrix computation now in PREMAT W. Landsman June 1994
# Added /RADIAN keyword W. Landsman June 1997
# Converted to IDL V5.0 W. Landsman September 1997
# Correct negative output RA values when /RADIAN used March 1999
# Work for arrays, not just vectors W. Landsman September 2003
# Convert to PythonSergey KoposovJuly 2010 '''
scal = True
if isinstance(ra0, ndarray):
ra = ra0.copy()
dec = dec0.copy()
scal = False
else:
ra=array([ra0])
dec=array([dec0])
npts = ra.size
if not radian:
ra_rad = deg2rad(ra) #Convert to double precision if not already
dec_rad = deg2rad(dec)
else:
ra_rad = ra
dec_rad = dec
a = cos(dec_rad)
x = zeros((npts, 3))
x[:,0] = a * cos(ra_rad)
x[:,1] = a * sin(ra_rad)
x[:,2] = sin(dec_rad)
# Use PREMAT function to get precession matrix from Equinox1 to Equinox2
r = premat(equinox1, equinox2, fk4=fk4)
x2 = transpose(dot(transpose(r), transpose(x))) #rotate to get output direction cosines
ra_rad = zeros(npts) + arctan2(x2[:,1], x2[:,0])
dec_rad = zeros(npts) + arcsin(x2[:,2])
if not radian:
ra = rad2deg(ra_rad)
ra = ra + (ra < 0.) * 360.e0 #RA between 0 and 360 degrees
dec = rad2deg(dec_rad)
else:
ra = ra_rad
dec = dec_rad
ra = ra + (ra < 0.) * 2.0e0 * pi
# if do#print:
#print 'Equinox (%.2f): %f,%f' % (equinox2, ra, dec)
# if scal:
# ra, dec = ra[0], dec[0]
return ra, dec
| {
"content_hash": "daa61235d31d578cabae5c3ec7507f5a",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 95,
"avg_line_length": 38.775193798449614,
"alnum_prop": 0.5833666533386646,
"repo_name": "plasidu/phoenix4iraf",
"id": "882ab7ac030e15edb68d7664d079d54681dfa83b",
"size": "5002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astrolibpy/astrolib/precess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "155"
},
{
"name": "Python",
"bytes": "258947"
},
{
"name": "Shell",
"bytes": "139"
}
],
"symlink_target": ""
} |
_version = (0, 0, 1)
_name = 'Paste.py'
import ConfigParser
import os
import sys
from common import exception
from lib import logger
def version():
return '.'.join(map(str, _version))
def version_tuple():
return _version[:]
def full_version():
return _name + '/' + version()
Raise = object()
_global_filename = '/etc/paste.conf'
_user_filename = os.path.expanduser('~/.pasterc')
class FileConfig(object):
def __init__(self, filename = None):
self.rc = ConfigParser.RawConfigParser()
if filename is None:
if os.name == 'posix':
filelist = [_global_filename, _user_filename]
else:
filelist = [_user_filename]
logger.debug('config loaded: ' + str(self.rc.read(filelist)))
else:
self.rc.read(filename)
self._filename = filename
def get(self, path, default=None):
logger.debug('config get: ' + path)
try:
section, entry = path.rsplit('.', 1)
except ValueError:
section = 'DEFAULT'
entry = path
if section.lower() == 'default' :
section = 'DEFAULT'
if section != 'DEFAULT' and not self.rc.has_section(section):
if default is Raise:
raise exception.NoSuchOption('No such section: ' + section)
return default
try:
return self.rc.get(section, entry)
except ConfigParser.NoOptionError as e:
if default is Raise:
raise exception.NoSuchOption(e)
return default
def require(self, path):
return self.get(path, default=Raise)
def getint(self, path, default=None):
try:
return int(self.get(path, default))
except ValueError as e:
if default is Raise:
raise exception.InvalidValue(e)
return default
def getfloat(self, path, default=None):
try:
return float(self.get(path, default))
except ValueError as e:
if default is Raise:
raise exception.InvalidValue(e)
return default
def getboolean(self, path, default=None):
val = self.get(path, default)
if isinstance(val, bool):
return val
if val is default:
return val
_val = val.lower()
if _val in ['1', 'yes', 'true', 'on', 'y']:
return True
if _val in ['0', 'no', 'false', 'off', 'n']:
return False
if default is Raise:
raise exception.InvalidValue(val + ' is True or False?')
return default
def getsection(self, section):
if section.lower() == 'default' :
section = 'DEFAULT'
if section != 'DEFAULT' and not self.rc.has_section(section):
raise exception.NoSuchOption(section)
return dict(self.rc.items(section))
def set(self, path, val):
logger.debug('config set: %s=%r' % (path, val))
if val is None: # Do not change.
return
try:
section, entry = path.rsplit('.', 1)
except ValueError:
section = 'DEFAULT'
entry = path
if section.lower() == 'default':
section = 'DEFAULT'
if section != 'DEFAULT' and not self.rc.has_section(section):
self.rc.add_section(section)
if isinstance(val, str):
self.rc.set(section, entry, val)
elif isinstance(val, unicode):
self.rc.set(section, entry, val.encode('UTF-8'))
elif isinstance(val, (int, float, long)):
self.rc.set(section, entry, str(val))
elif isinstance(val, bool):
self.rc.set(section, entry, '1' if val else '0')
else:
raise ValueError('Invalid type for val')
def remove(self, path, check=False):
logger.debug('remove key: ' + path)
try:
section, entry = path.rsplit('.', 1)
except ValueError:
section = 'DEFAULT'
entry = path
if section.lower() == 'default':
section = 'DEFAULT'
if section != 'DEFAULT' and not self.rc.has_section(section):
if check:
raise exception.NoSuchOption('No such section: ' + section)
return
res = self.rc.remove_option(section, entry)
if check and not res:
raise exception.NoSuchOption('No such config entry: ' + path)
return res
def saveTo(self, fd):
self.rc.write(fd)
def save(self):
if not isinstance(self._filename, (str, unicode)):
raise ValueError('Invalid filename.')
logger.info('saving to ' + self._filename)
with open(self._filename, 'w') as f:
self.saveTo(f)
def dump(self, fd=sys.stderr):
for section in self.rc.sections() + ['DEFAULT']:
print >>fd, 'Section ' + section
for k, v in self.rc.items(section):
print >>fd, 'Option %s=%s' % (k, str(v))
print >>fd, 'EndSection'
print >>fd
print >>fd, '===================='
class RuntimeConfig(FileConfig):
def set(self, path, val):
if val is None: # Do not change.
return
try:
section, entry = path.rsplit('.', 1)
except ValueError:
section = 'DEFAULT'
entry = path
if section.lower() == 'default':
section = 'DEFAULT'
if section != 'DEFAULT' and not self.rc.has_section(section):
self.rc.add_section(section)
self.rc.set(section, entry, val)
def saveTo(self, fd):
raise NotImplementedError('RuntimeConfig contains not serializable information.')
_instance = None
_global_instance = None
_user_instance = None
def getConfig():
global _instance
if _instance is None:
logger.debug('creating config instance')
_instance = RuntimeConfig()
return _instance
def getGlobalConfig():
if os.name != 'posix':
raise OSError('No global config supported in your platform, currently only posix are supported.')
global _global_instance
if _global_instance is None:
logger.debug('opening global config')
_global_instance = FileConfig(_global_filename)
return _global_instance
def getUserConfig():
global _user_instance
if _user_instance is None:
logger.debug('opening user config')
_user_instance = FileConfig(_user_filename)
return _user_instance
| {
"content_hash": "84af45aa36a20100693694a3695c569a",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 99,
"avg_line_length": 25.378995433789953,
"alnum_prop": 0.662108672184239,
"repo_name": "jackyyf/paste.py",
"id": "dc92ca46b23b47b1b3870c2093e70efbd6207785",
"size": "5607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38644"
}
],
"symlink_target": ""
} |
from tweepy.api import API
from celery.decorators import task
@task()
def update_twitter_profile(user):
a = API()
try:
profile = user.get_profile()
twitter_user = a.get_user(user_id=profile.twitter_profile.twitter_id)
except:
twitter_user = None
if twitter_user:
profile.user.first_name = twitter_user.name.split(" ")[0]
profile.user.last_name = " ".join(twitter_user.name.split(" ")[1:])
profile.user.save()
profile.website = twitter_user.url
profile.profile_image_url = twitter_user.profile_image_url
profile.description = twitter_user.description
profile.twitter_name = twitter_user.screen_name
profile.location=twitter_user.location
profile.save()
| {
"content_hash": "8abae56de104b058bfa244f485240fa0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6345177664974619,
"repo_name": "fxdgear/beersocial",
"id": "0188584a71d54bc26d6215cfc6c91db5110f0112",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialbeer/members/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "216423"
},
{
"name": "Python",
"bytes": "107389"
}
],
"symlink_target": ""
} |
"""Classes and methods to describe contract Providers."""
class Provider(object):
"""A Pact provider."""
def __init__(self, name):
"""
Create a new Provider.
:param name: The name of this provider. This will be shown in the Pact
when it is published.
:type name: str
"""
self.name = name
| {
"content_hash": "f230ace53bd0a3b087eb82d8eb671b24",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 24,
"alnum_prop": 0.5638888888888889,
"repo_name": "pact-foundation/pact-python",
"id": "543f0152dbd56dd40046b2846dba485a3eb4e9f1",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pact/provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "552"
},
{
"name": "Makefile",
"bytes": "3106"
},
{
"name": "Python",
"bytes": "210480"
},
{
"name": "Shell",
"bytes": "1918"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
urlpatterns = patterns('aquaticore.about.views.',
(r'^$', 'index'),
)
| {
"content_hash": "6730b47aa66ee2c9b900cb693bcfe931",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 28,
"alnum_prop": 0.55,
"repo_name": "rockerBOO/aquaticore",
"id": "44d033be87979c73ba966541e70d95f66ef03a8a",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aquaticore/about/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13106"
},
{
"name": "HTML",
"bytes": "44504"
},
{
"name": "JavaScript",
"bytes": "11994"
},
{
"name": "PHP",
"bytes": "17127"
},
{
"name": "Python",
"bytes": "51374"
}
],
"symlink_target": ""
} |
"""Support for MQTT climate devices."""
import logging
import voluptuous as vol
from homeassistant.components import climate, mqtt
from homeassistant.components.climate import (
PLATFORM_SCHEMA as CLIMATE_PLATFORM_SCHEMA,
ClimateDevice,
)
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
PRESET_AWAY,
SUPPORT_TARGET_TEMPERATURE_RANGE,
PRESET_NONE,
)
from homeassistant.components.fan import SPEED_HIGH, SPEED_LOW, SPEED_MEDIUM
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_DEVICE,
CONF_NAME,
CONF_VALUE_TEMPLATE,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_QOS,
CONF_RETAIN,
CONF_UNIQUE_ID,
MQTT_BASE_PLATFORM_SCHEMA,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MQTT HVAC"
CONF_ACTION_TEMPLATE = "action_template"
CONF_ACTION_TOPIC = "action_topic"
CONF_AUX_COMMAND_TOPIC = "aux_command_topic"
CONF_AUX_STATE_TEMPLATE = "aux_state_template"
CONF_AUX_STATE_TOPIC = "aux_state_topic"
CONF_AWAY_MODE_COMMAND_TOPIC = "away_mode_command_topic"
CONF_AWAY_MODE_STATE_TEMPLATE = "away_mode_state_template"
CONF_AWAY_MODE_STATE_TOPIC = "away_mode_state_topic"
CONF_CURRENT_TEMP_TEMPLATE = "current_temperature_template"
CONF_CURRENT_TEMP_TOPIC = "current_temperature_topic"
CONF_FAN_MODE_COMMAND_TOPIC = "fan_mode_command_topic"
CONF_FAN_MODE_LIST = "fan_modes"
CONF_FAN_MODE_STATE_TEMPLATE = "fan_mode_state_template"
CONF_FAN_MODE_STATE_TOPIC = "fan_mode_state_topic"
CONF_HOLD_COMMAND_TOPIC = "hold_command_topic"
CONF_HOLD_STATE_TEMPLATE = "hold_state_template"
CONF_HOLD_STATE_TOPIC = "hold_state_topic"
CONF_HOLD_LIST = "hold_modes"
CONF_MODE_COMMAND_TOPIC = "mode_command_topic"
CONF_MODE_LIST = "modes"
CONF_MODE_STATE_TEMPLATE = "mode_state_template"
CONF_MODE_STATE_TOPIC = "mode_state_topic"
CONF_PAYLOAD_OFF = "payload_off"
CONF_PAYLOAD_ON = "payload_on"
CONF_POWER_COMMAND_TOPIC = "power_command_topic"
CONF_POWER_STATE_TEMPLATE = "power_state_template"
CONF_POWER_STATE_TOPIC = "power_state_topic"
CONF_PRECISION = "precision"
CONF_SEND_IF_OFF = "send_if_off"
CONF_SWING_MODE_COMMAND_TOPIC = "swing_mode_command_topic"
CONF_SWING_MODE_LIST = "swing_modes"
CONF_SWING_MODE_STATE_TEMPLATE = "swing_mode_state_template"
CONF_SWING_MODE_STATE_TOPIC = "swing_mode_state_topic"
CONF_TEMP_COMMAND_TOPIC = "temperature_command_topic"
CONF_TEMP_HIGH_COMMAND_TOPIC = "temperature_high_command_topic"
CONF_TEMP_HIGH_STATE_TEMPLATE = "temperature_high_state_template"
CONF_TEMP_HIGH_STATE_TOPIC = "temperature_high_state_topic"
CONF_TEMP_LOW_COMMAND_TOPIC = "temperature_low_command_topic"
CONF_TEMP_LOW_STATE_TEMPLATE = "temperature_low_state_template"
CONF_TEMP_LOW_STATE_TOPIC = "temperature_low_state_topic"
CONF_TEMP_STATE_TEMPLATE = "temperature_state_template"
CONF_TEMP_STATE_TOPIC = "temperature_state_topic"
CONF_TEMP_INITIAL = "initial"
CONF_TEMP_MAX = "max_temp"
CONF_TEMP_MIN = "min_temp"
CONF_TEMP_STEP = "temp_step"
TEMPLATE_KEYS = (
CONF_AUX_STATE_TEMPLATE,
CONF_AWAY_MODE_STATE_TEMPLATE,
CONF_CURRENT_TEMP_TEMPLATE,
CONF_FAN_MODE_STATE_TEMPLATE,
CONF_HOLD_STATE_TEMPLATE,
CONF_MODE_STATE_TEMPLATE,
CONF_POWER_STATE_TEMPLATE,
CONF_ACTION_TEMPLATE,
CONF_SWING_MODE_STATE_TEMPLATE,
CONF_TEMP_HIGH_STATE_TEMPLATE,
CONF_TEMP_LOW_STATE_TEMPLATE,
CONF_TEMP_STATE_TEMPLATE,
)
TOPIC_KEYS = (
CONF_AUX_COMMAND_TOPIC,
CONF_AUX_STATE_TOPIC,
CONF_AWAY_MODE_COMMAND_TOPIC,
CONF_AWAY_MODE_STATE_TOPIC,
CONF_CURRENT_TEMP_TOPIC,
CONF_FAN_MODE_COMMAND_TOPIC,
CONF_FAN_MODE_STATE_TOPIC,
CONF_HOLD_COMMAND_TOPIC,
CONF_HOLD_STATE_TOPIC,
CONF_MODE_COMMAND_TOPIC,
CONF_MODE_STATE_TOPIC,
CONF_POWER_COMMAND_TOPIC,
CONF_POWER_STATE_TOPIC,
CONF_ACTION_TOPIC,
CONF_SWING_MODE_COMMAND_TOPIC,
CONF_SWING_MODE_STATE_TOPIC,
CONF_TEMP_COMMAND_TOPIC,
CONF_TEMP_HIGH_COMMAND_TOPIC,
CONF_TEMP_HIGH_STATE_TOPIC,
CONF_TEMP_LOW_COMMAND_TOPIC,
CONF_TEMP_LOW_STATE_TOPIC,
CONF_TEMP_STATE_TOPIC,
)
SCHEMA_BASE = CLIMATE_PLATFORM_SCHEMA.extend(MQTT_BASE_PLATFORM_SCHEMA.schema)
PLATFORM_SCHEMA = (
SCHEMA_BASE.extend(
{
vol.Optional(CONF_AUX_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AUX_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AUX_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AWAY_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AWAY_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AWAY_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CURRENT_TEMP_TEMPLATE): cv.template,
vol.Optional(CONF_CURRENT_TEMP_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_FAN_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_FAN_MODE_LIST,
default=[HVAC_MODE_AUTO, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH],
): cv.ensure_list,
vol.Optional(CONF_FAN_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HOLD_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_HOLD_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_LIST, default=list): cv.ensure_list,
vol.Optional(CONF_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_MODE_LIST,
default=[
HVAC_MODE_AUTO,
HVAC_MODE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
],
): cv.ensure_list,
vol.Optional(CONF_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default="ON"): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default="OFF"): cv.string,
vol.Optional(CONF_POWER_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_POWER_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_POWER_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_SEND_IF_OFF, default=True): cv.boolean,
vol.Optional(CONF_ACTION_TEMPLATE): cv.template,
vol.Optional(CONF_ACTION_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SWING_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SWING_MODE_LIST, default=[STATE_ON, HVAC_MODE_OFF]
): cv.ensure_list,
vol.Optional(CONF_SWING_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_SWING_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_INITIAL, default=21): cv.positive_int,
vol.Optional(CONF_TEMP_MIN, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_TEMP_MAX, default=DEFAULT_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_TEMP_STEP, default=1.0): vol.Coerce(float),
vol.Optional(CONF_TEMP_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_HIGH_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_HIGH_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_HIGH_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_LOW_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_LOW_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_LOW_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT climate device through configuration.yaml."""
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT climate device dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT climate device."""
try:
discovery_hash = discovery_payload.pop(ATTR_DISCOVERY_HASH)
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_hash
)
except Exception:
if discovery_hash:
clear_discovery_hash(hass, discovery_hash)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(climate.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_hash=None
):
"""Set up the MQTT climate devices."""
async_add_entities([MqttClimate(hass, config, config_entry, discovery_hash)])
class MqttClimate(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
ClimateDevice,
):
"""Representation of an MQTT climate device."""
def __init__(self, hass, config, config_entry, discovery_hash):
"""Initialize the climate device."""
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
self.hass = hass
self._action = None
self._aux = False
self._away = False
self._current_fan_mode = None
self._current_operation = None
self._current_swing_mode = None
self._current_temp = None
self._hold = None
self._target_temp = None
self._target_temp_high = None
self._target_temp_low = None
self._topic = None
self._unit_of_measurement = hass.config.units.temperature_unit
self._value_templates = None
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Handle being added to home assistant."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._config = config
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._topic = {key: config.get(key) for key in TOPIC_KEYS}
# set to None in non-optimistic mode
self._target_temp = (
self._current_fan_mode
) = self._current_operation = self._current_swing_mode = None
self._target_temp_low = None
self._target_temp_high = None
if self._topic[CONF_TEMP_STATE_TOPIC] is None:
self._target_temp = config[CONF_TEMP_INITIAL]
if self._topic[CONF_TEMP_LOW_STATE_TOPIC] is None:
self._target_temp_low = config[CONF_TEMP_INITIAL]
if self._topic[CONF_TEMP_HIGH_STATE_TOPIC] is None:
self._target_temp_high = config[CONF_TEMP_INITIAL]
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = SPEED_LOW
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = HVAC_MODE_OFF
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = HVAC_MODE_OFF
self._action = None
self._away = False
self._hold = None
self._aux = False
value_templates = {}
for key in TEMPLATE_KEYS:
value_templates[key] = lambda value: value
if CONF_VALUE_TEMPLATE in config:
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = self.hass
value_templates = {
key: value_template.async_render_with_possible_json_value
for key in TEMPLATE_KEYS
}
for key in TEMPLATE_KEYS & config.keys():
tpl = config[key]
value_templates[key] = tpl.async_render_with_possible_json_value
tpl.hass = self.hass
self._value_templates = value_templates
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
qos = self._config[CONF_QOS]
def add_subscription(topics, topic, msg_callback):
if self._topic[topic] is not None:
topics[topic] = {
"topic": self._topic[topic],
"msg_callback": msg_callback,
"qos": qos,
}
def render_template(msg, template_name):
template = self._value_templates[template_name]
return template(msg.payload)
@callback
def handle_action_received(msg):
"""Handle receiving action via MQTT."""
payload = render_template(msg, CONF_ACTION_TEMPLATE)
self._action = payload
self.async_write_ha_state()
add_subscription(topics, CONF_ACTION_TOPIC, handle_action_received)
@callback
def handle_temperature_received(msg, template_name, attr):
"""Handle temperature coming via MQTT."""
payload = render_template(msg, template_name)
try:
setattr(self, attr, float(payload))
self.async_write_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
@callback
def handle_current_temperature_received(msg):
"""Handle current temperature coming via MQTT."""
handle_temperature_received(
msg, CONF_CURRENT_TEMP_TEMPLATE, "_current_temp"
)
add_subscription(
topics, CONF_CURRENT_TEMP_TOPIC, handle_current_temperature_received
)
@callback
def handle_target_temperature_received(msg):
"""Handle target temperature coming via MQTT."""
handle_temperature_received(msg, CONF_TEMP_STATE_TEMPLATE, "_target_temp")
add_subscription(
topics, CONF_TEMP_STATE_TOPIC, handle_target_temperature_received
)
@callback
def handle_temperature_low_received(msg):
"""Handle target temperature low coming via MQTT."""
handle_temperature_received(
msg, CONF_TEMP_LOW_STATE_TEMPLATE, "_target_temp_low"
)
add_subscription(
topics, CONF_TEMP_LOW_STATE_TOPIC, handle_temperature_low_received
)
@callback
def handle_temperature_high_received(msg):
"""Handle target temperature high coming via MQTT."""
handle_temperature_received(
msg, CONF_TEMP_HIGH_STATE_TEMPLATE, "_target_temp_high"
)
add_subscription(
topics, CONF_TEMP_HIGH_STATE_TOPIC, handle_temperature_high_received
)
@callback
def handle_mode_received(msg, template_name, attr, mode_list):
"""Handle receiving listed mode via MQTT."""
payload = render_template(msg, template_name)
if payload not in self._config[mode_list]:
_LOGGER.error("Invalid %s mode: %s", mode_list, payload)
else:
setattr(self, attr, payload)
self.async_write_ha_state()
@callback
def handle_current_mode_received(msg):
"""Handle receiving mode via MQTT."""
handle_mode_received(
msg, CONF_MODE_STATE_TEMPLATE, "_current_operation", CONF_MODE_LIST
)
add_subscription(topics, CONF_MODE_STATE_TOPIC, handle_current_mode_received)
@callback
def handle_fan_mode_received(msg):
"""Handle receiving fan mode via MQTT."""
handle_mode_received(
msg,
CONF_FAN_MODE_STATE_TEMPLATE,
"_current_fan_mode",
CONF_FAN_MODE_LIST,
)
add_subscription(topics, CONF_FAN_MODE_STATE_TOPIC, handle_fan_mode_received)
@callback
def handle_swing_mode_received(msg):
"""Handle receiving swing mode via MQTT."""
handle_mode_received(
msg,
CONF_SWING_MODE_STATE_TEMPLATE,
"_current_swing_mode",
CONF_SWING_MODE_LIST,
)
add_subscription(
topics, CONF_SWING_MODE_STATE_TOPIC, handle_swing_mode_received
)
@callback
def handle_onoff_mode_received(msg, template_name, attr):
"""Handle receiving on/off mode via MQTT."""
payload = render_template(msg, template_name)
payload_on = self._config[CONF_PAYLOAD_ON]
payload_off = self._config[CONF_PAYLOAD_OFF]
if payload == "True":
payload = payload_on
elif payload == "False":
payload = payload_off
if payload == payload_on:
setattr(self, attr, True)
elif payload == payload_off:
setattr(self, attr, False)
else:
_LOGGER.error("Invalid %s mode: %s", attr, payload)
self.async_write_ha_state()
@callback
def handle_away_mode_received(msg):
"""Handle receiving away mode via MQTT."""
handle_onoff_mode_received(msg, CONF_AWAY_MODE_STATE_TEMPLATE, "_away")
add_subscription(topics, CONF_AWAY_MODE_STATE_TOPIC, handle_away_mode_received)
@callback
def handle_aux_mode_received(msg):
"""Handle receiving aux mode via MQTT."""
handle_onoff_mode_received(msg, CONF_AUX_STATE_TEMPLATE, "_aux")
add_subscription(topics, CONF_AUX_STATE_TOPIC, handle_aux_mode_received)
@callback
def handle_hold_mode_received(msg):
"""Handle receiving hold mode via MQTT."""
payload = render_template(msg, CONF_HOLD_STATE_TEMPLATE)
if payload == "off":
payload = None
self._hold = payload
self.async_write_ha_state()
add_subscription(topics, CONF_HOLD_STATE_TOPIC, handle_hold_mode_received)
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the climate device."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def target_temperature_low(self):
"""Return the low target temperature we try to reach."""
return self._target_temp_low
@property
def target_temperature_high(self):
"""Return the high target temperature we try to reach."""
return self._target_temp_high
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
return self._action
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._config[CONF_MODE_LIST]
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._config[CONF_TEMP_STEP]
@property
def preset_mode(self):
"""Return preset mode."""
if self._hold:
return self._hold
if self._away:
return PRESET_AWAY
return None
@property
def preset_modes(self):
"""Return preset modes."""
presets = []
if (self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None
):
presets.append(PRESET_AWAY)
presets.extend(self._config[CONF_HOLD_LIST])
if presets:
presets.insert(0, PRESET_NONE)
return presets
@property
def is_aux_heat(self):
"""Return true if away mode is on."""
return self._aux
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._config[CONF_FAN_MODE_LIST]
def _publish(self, topic, payload):
if self._topic[topic] is not None:
mqtt.async_publish(
self.hass,
self._topic[topic],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def _set_temperature(self, temp, cmnd_topic, state_topic, attr):
if temp is not None:
if self._topic[state_topic] is None:
# optimistic mode
setattr(self, attr, temp)
if (
self._config[CONF_SEND_IF_OFF]
or self._current_operation != HVAC_MODE_OFF
):
self._publish(cmnd_topic, temp)
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_HVAC_MODE) is not None:
operation_mode = kwargs.get(ATTR_HVAC_MODE)
await self.async_set_hvac_mode(operation_mode)
self._set_temperature(
kwargs.get(ATTR_TEMPERATURE),
CONF_TEMP_COMMAND_TOPIC,
CONF_TEMP_STATE_TOPIC,
"_target_temp",
)
self._set_temperature(
kwargs.get(ATTR_TARGET_TEMP_LOW),
CONF_TEMP_LOW_COMMAND_TOPIC,
CONF_TEMP_LOW_STATE_TOPIC,
"_target_temp_low",
)
self._set_temperature(
kwargs.get(ATTR_TARGET_TEMP_HIGH),
CONF_TEMP_HIGH_COMMAND_TOPIC,
CONF_TEMP_HIGH_STATE_TOPIC,
"_target_temp_high",
)
# Always optimistic?
self.async_write_ha_state()
async def async_set_swing_mode(self, swing_mode):
"""Set new swing mode."""
if self._config[CONF_SEND_IF_OFF] or self._current_operation != HVAC_MODE_OFF:
self._publish(CONF_SWING_MODE_COMMAND_TOPIC, swing_mode)
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = swing_mode
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
if self._config[CONF_SEND_IF_OFF] or self._current_operation != HVAC_MODE_OFF:
self._publish(CONF_FAN_MODE_COMMAND_TOPIC, fan_mode)
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = fan_mode
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set new operation mode."""
if self._current_operation == HVAC_MODE_OFF and hvac_mode != HVAC_MODE_OFF:
self._publish(CONF_POWER_COMMAND_TOPIC, self._config[CONF_PAYLOAD_ON])
elif self._current_operation != HVAC_MODE_OFF and hvac_mode == HVAC_MODE_OFF:
self._publish(CONF_POWER_COMMAND_TOPIC, self._config[CONF_PAYLOAD_OFF])
self._publish(CONF_MODE_COMMAND_TOPIC, hvac_mode)
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = hvac_mode
self.async_write_ha_state()
@property
def swing_mode(self):
"""Return the swing setting."""
return self._current_swing_mode
@property
def swing_modes(self):
"""List of available swing modes."""
return self._config[CONF_SWING_MODE_LIST]
async def async_set_preset_mode(self, preset_mode):
"""Set a preset mode."""
if preset_mode == self.preset_mode:
return
# Track if we should optimistic update the state
optimistic_update = False
if self._away:
optimistic_update = optimistic_update or self._set_away_mode(False)
elif preset_mode == PRESET_AWAY:
optimistic_update = optimistic_update or self._set_away_mode(True)
if self._hold:
optimistic_update = optimistic_update or self._set_hold_mode(None)
elif preset_mode not in (None, PRESET_AWAY):
optimistic_update = optimistic_update or self._set_hold_mode(preset_mode)
if optimistic_update:
self.async_write_ha_state()
def _set_away_mode(self, state):
"""Set away mode.
Returns if we should optimistically write the state.
"""
self._publish(
CONF_AWAY_MODE_COMMAND_TOPIC,
self._config[CONF_PAYLOAD_ON] if state else self._config[CONF_PAYLOAD_OFF],
)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None:
return False
self._away = state
return True
def _set_hold_mode(self, hold_mode):
"""Set hold mode.
Returns if we should optimistically write the state.
"""
self._publish(CONF_HOLD_COMMAND_TOPIC, hold_mode or "off")
if self._topic[CONF_HOLD_STATE_TOPIC] is not None:
return False
self._hold = hold_mode
return True
def _set_aux_heat(self, state):
self._publish(
CONF_AUX_COMMAND_TOPIC,
self._config[CONF_PAYLOAD_ON] if state else self._config[CONF_PAYLOAD_OFF],
)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = state
self.async_write_ha_state()
async def async_turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self._set_aux_heat(True)
async def async_turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self._set_aux_heat(False)
@property
def supported_features(self):
"""Return the list of supported features."""
support = 0
if (self._topic[CONF_TEMP_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE
if (self._topic[CONF_TEMP_LOW_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_LOW_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if (self._topic[CONF_TEMP_HIGH_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_HIGH_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if (self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_FAN_MODE_COMMAND_TOPIC] is not None
):
support |= SUPPORT_FAN_MODE
if (self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_SWING_MODE_COMMAND_TOPIC] is not None
):
support |= SUPPORT_SWING_MODE
if (
(self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None)
or (self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None)
or (self._topic[CONF_HOLD_STATE_TOPIC] is not None)
or (self._topic[CONF_HOLD_COMMAND_TOPIC] is not None)
):
support |= SUPPORT_PRESET_MODE
if (self._topic[CONF_AUX_STATE_TOPIC] is not None) or (
self._topic[CONF_AUX_COMMAND_TOPIC] is not None
):
support |= SUPPORT_AUX_HEAT
return support
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._config[CONF_TEMP_MIN]
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._config[CONF_TEMP_MAX]
@property
def precision(self):
"""Return the precision of the system."""
if self._config.get(CONF_PRECISION) is not None:
return self._config.get(CONF_PRECISION)
return super().precision
| {
"content_hash": "500d456602f8ae7f4ad46ea4c05b6eff",
"timestamp": "",
"source": "github",
"line_count": 876,
"max_line_length": 88,
"avg_line_length": 35.60730593607306,
"alnum_prop": 0.6140997691715825,
"repo_name": "qedi-r/home-assistant",
"id": "4b163c523fa6d6ee41b7b1eec2950a035a20b7a4",
"size": "31192",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mqtt/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
"""
bottle plugin of template
~~~~~~~~~~~~~~~~
template for bottle.
[DEPRECATED]
please using TemplatePlugin replace it.
:copyright: 20150904 by raptor.zh@gmail.com.
"""
import sys
PY3=sys.version>"3"
import inspect
import bottle
import logging
logger = logging.getLogger(__name__)
# PluginError is defined in bottle >= 0.10
if not hasattr(bottle, 'PluginError'):
class PluginError(bottle.BottleException):
pass
bottle.PluginError = PluginError
class ViewPlugin(object):
name = 'view'
api = 2
def __init__(self, template=bottle.template):
self.template = template
def setup(self, app):
for other in app.plugins:
if not isinstance(other, ViewPlugin):
continue
raise PluginError("Found another ViewPlugin.")
def apply(self, callback, route):
_template = route.config.get("template", self.template)
_view = route.config.get("view", "")
argspec = inspect.getargspec(route.callback)
if not _template or not _view:
return callback
def wrapper(*args, **kwargs):
return _template(_view, callback(*args, **kwargs))
return wrapper
| {
"content_hash": "6f057e135b6d0cd6aabf89775b8653a3",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 63,
"avg_line_length": 21.982142857142858,
"alnum_prop": 0.6190089358245329,
"repo_name": "raptorz/bottle-plugins",
"id": "0ec5b0c6403b5b1beac35d231df2c75df62ab7a4",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bottle_plugins/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12950"
}
],
"symlink_target": ""
} |
import requests
import xmltodict
import sqlite3
#create db and tables
conn = sqlite3.connect('ordanotkun.db')
c = conn.cursor()
try:
#save speech/speaker/session/speech start/speech end
c.execute('''CREATE TABLE ordanotkun(
speech_text text,
speaker text,
session text,
speech_start text,
speech_end text,
speech_type text
)''')
except:
#table exists
pass
sessions = list(range(151,152))
url = 'http://www.althingi.is/altext/xml/raedulisti/?lthing='
def get_speech(url):
response = requests.get(url)
data = xmltodict.parse(response.text)
results = ''
for mgr in data[u'ræða'][u'ræðutexti'][u'mgr']:
results += mgr + ' '
return results
for session in sessions:
print("session", str(session))
try:
query = url+str(session)
response = requests.get(query)
data = xmltodict.parse(response.text)
for r in data[u'ræðulisti'][u'ræða']:
try:
speech = get_speech(r[u'slóðir'][u'xml'])
speaker = r[u'ræðumaður'][u'nafn']
speech_start = r[u'ræðahófst']
speech_end = r[u'ræðulauk']
speech_type = r[u'tegundræðu']
# vista gögn
values = (speech.lower(), speaker, session, speech_start, speech_end, speech_type)
c.execute('insert into ordanotkun values(?,?,?,?,?,?)', values)
conn.commit()
print(str(session), "Speech saved", speaker)
except:
pass
conn.close()
except Exception as e:
print(e) | {
"content_hash": "267c0eb78dc21c845bd6b482d9ecaa56",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 24.553571428571427,
"alnum_prop": 0.6727272727272727,
"repo_name": "bjornlevi/5thpower",
"id": "217bfa6502e3e9c7d7847e59677a5d079022e109",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ordanotkun/ordanotkun.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7304"
},
{
"name": "Python",
"bytes": "68118"
}
],
"symlink_target": ""
} |
'''
================================================================================
This confidential and proprietary software may be used only
as authorized by a licensing agreement from Thumb o'Cat Inc.
In the event of publication, the following notice is applicable:
Copyright (C) 2013 - 2014 Thumb o'Cat
All right reserved.
The entire notice above must be reproduced on all authorized copies.
================================================================================
File : libvpx.py
Author(s) : Luuvish
Version : 2.0
Revision :
1.0 May 21, 2013 first release
2.0 May 12, 2014 Executor classify
================================================================================
'''
__all__ = ('LibVpx', )
__version__ = '2.0.0'
from . import rootpath, ModelExecutor
class LibVpx(ModelExecutor):
model = 'libvpx'
codecs = ('vp8', 'vp9')
actions = ('decode', 'digest', 'digest_by_frames', 'compare')
def __init__(self, codec, **kwargs):
from os.path import join
super(LibVpx, self).__init__(codec, **kwargs)
binary = 'tool/3rd-party/libvpx-1.3.0.bin/dist/bin/vpxdec'
self._execute = join(rootpath, binary)
self.defaults['decode'] += ['--codec=' + codec, '--i420']
self.defaults['digest'] += ['--codec=' + codec, '--i420', '--md5']
def execute(self):
return self._execute
def options(self, source, target):
return ['-o', target, source]
def digest(self, source, target=None):
from subprocess import call
from os import remove
from os.path import exists, splitext, basename
outname, outext = splitext(basename(source))
output = '.' + outname + '.yuv.md5'
execute = self.execute()
options = self.defaults['digest'] + self.options(source, '%s-%%wx%%h-%%4.i420' % outname)
if target is not None:
self.mkdir(target)
try:
with open(output, 'wt') as f:
call([execute] + options, stdout=f, stderr=self.stderr)
except Exception as e:
raise e
if not exists(output):
raise Exception('digest error: %s' % basename(source))
lines = []
with open(output, 'rt') as f:
lines = [line.rstrip().split()[0].lower() for line in f]
remove(output)
if target is not None:
with open(target, 'wt') as f:
for line in lines:
f.write('%s\n' % line)
return lines
| {
"content_hash": "2e523855b731cfa34208e4fb5aa78c2e",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 97,
"avg_line_length": 29.227272727272727,
"alnum_prop": 0.5171073094867807,
"repo_name": "luuvish/libvio",
"id": "a330655529ea748e2cf1ca14eab9bfbc1a68bed9",
"size": "2597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/test/model/libvpx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "155188"
},
{
"name": "C++",
"bytes": "959797"
},
{
"name": "JavaScript",
"bytes": "119"
},
{
"name": "Objective-C",
"bytes": "2802"
},
{
"name": "Python",
"bytes": "60299"
},
{
"name": "Shell",
"bytes": "15241"
}
],
"symlink_target": ""
} |
import mimetypes
def guess_kind_from_filename(filepath):
"""Return a document kind (image, audio...) guessed from a filename. If
no kind can be guessed, returns None."""
if filepath:
content_type, encoding = mimetypes.guess_type(filepath)
return guess_kind_from_content_type(content_type)
def guess_kind_from_content_type(content_type):
"""Return a document kind (image, audio...) guessed from a content_type. If
no kind can be guessed, returns None."""
lookups = ['image', 'video', 'audio', 'text', 'pdf']
if content_type:
for lookup in lookups:
if lookup in content_type:
return lookup
| {
"content_hash": "834f770c0cb1a6c8e02a4bf8f67362ef",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 35.36842105263158,
"alnum_prop": 0.6502976190476191,
"repo_name": "Lcaracol/ideasbox.lan",
"id": "6626ff35c8c121719cfecf4268770053a9dfe367",
"size": "672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ideasbox/mediacenter/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22376"
},
{
"name": "HTML",
"bytes": "48091"
},
{
"name": "JavaScript",
"bytes": "9347"
},
{
"name": "Makefile",
"bytes": "447"
},
{
"name": "Python",
"bytes": "256949"
}
],
"symlink_target": ""
} |
import requests
from .Backup import Backup
from .BackupContainer import BackupContainer
from .Bridge import Bridge
from .BridgeCreate import BridgeCreate
from .BridgeCreateSetting import BridgeCreateSetting
from .CPUInfo import CPUInfo
from .CPUStats import CPUStats
from .CloudInit import CloudInit
from .Cluster import Cluster
from .ClusterCreate import ClusterCreate
from .Container import Container
from .ContainerListItem import ContainerListItem
from .ContainerNIC import ContainerNIC
from .ContainerNICconfig import ContainerNICconfig
from .ContainerUpdate import ContainerUpdate
from .CoreStateResult import CoreStateResult
from .CoreSystem import CoreSystem
from .CreateContainer import CreateContainer
from .CreateSnapshotReqBody import CreateSnapshotReqBody
from .DHCP import DHCP
from .Dashboard import Dashboard
from .DashboardListItem import DashboardListItem
from .DeleteFile import DeleteFile
from .DiskInfo import DiskInfo
from .DiskPartition import DiskPartition
from .EnumBridgeCreateNetworkMode import EnumBridgeCreateNetworkMode
from .EnumBridgeStatus import EnumBridgeStatus
from .EnumClusterCreateClusterType import EnumClusterCreateClusterType
from .EnumClusterCreateDriveType import EnumClusterCreateDriveType
from .EnumClusterCreateMetaDriveType import EnumClusterCreateMetaDriveType
from .EnumClusterDriveType import EnumClusterDriveType
from .EnumClusterStatus import EnumClusterStatus
from .EnumContainerListItemStatus import EnumContainerListItemStatus
from .EnumContainerNICStatus import EnumContainerNICStatus
from .EnumContainerNICType import EnumContainerNICType
from .EnumContainerStatus import EnumContainerStatus
from .EnumDiskInfoType import EnumDiskInfoType
from .EnumGWNICType import EnumGWNICType
from .EnumGetGWStatus import EnumGetGWStatus
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from .EnumNicLinkType import EnumNicLinkType
from .EnumNodeStatus import EnumNodeStatus
from .EnumStoragePoolCreateDataProfile import EnumStoragePoolCreateDataProfile
from .EnumStoragePoolCreateMetadataProfile import EnumStoragePoolCreateMetadataProfile
from .EnumStoragePoolDataProfile import EnumStoragePoolDataProfile
from .EnumStoragePoolDeviceStatus import EnumStoragePoolDeviceStatus
from .EnumStoragePoolListItemStatus import EnumStoragePoolListItemStatus
from .EnumStoragePoolMetadataProfile import EnumStoragePoolMetadataProfile
from .EnumStoragePoolStatus import EnumStoragePoolStatus
from .EnumStorageServerStatus import EnumStorageServerStatus
from .EnumVMListItemStatus import EnumVMListItemStatus
from .EnumVMStatus import EnumVMStatus
from .EnumVdiskCreateType import EnumVdiskCreateType
from .EnumVdiskListItemStatus import EnumVdiskListItemStatus
from .EnumVdiskListItemType import EnumVdiskListItemType
from .EnumVdiskStatus import EnumVdiskStatus
from .EnumVdiskType import EnumVdiskType
from .EnumZerotierListItemType import EnumZerotierListItemType
from .EnumZerotierType import EnumZerotierType
from .EventType import EventType
from .ExportVM import ExportVM
from .FTPUrl import FTPUrl
from .Filesystem import Filesystem
from .FilesystemCreate import FilesystemCreate
from .GW import GW
from .GWCreate import GWCreate
from .GWHost import GWHost
from .GWNIC import GWNIC
from .GWNICconfig import GWNICconfig
from .GetGW import GetGW
from .Graph import Graph
from .GraphUpdate import GraphUpdate
from .HTTPProxy import HTTPProxy
from .HTTPType import HTTPType
from .HealthCheck import HealthCheck
from .IPProtocol import IPProtocol
from .Image import Image
from .ImageImport import ImageImport
from .ImportVM import ImportVM
from .Job import Job
from .JobListItem import JobListItem
from .JobResult import JobResult
from .ListGW import ListGW
from .MemInfo import MemInfo
from .Message import Message
from .MigrateGW import MigrateGW
from .NicInfo import NicInfo
from .NicLink import NicLink
from .Node import Node
from .NodeHealthCheck import NodeHealthCheck
from .NodeMount import NodeMount
from .NodeReboot import NodeReboot
from .OSInfo import OSInfo
from .PortForward import PortForward
from .Process import Process
from .ProcessSignal import ProcessSignal
from .Snapshot import Snapshot
from .StorageClusterHealthCheck import StorageClusterHealthCheck
from .StoragePool import StoragePool
from .StoragePoolCreate import StoragePoolCreate
from .StoragePoolDevice import StoragePoolDevice
from .StoragePoolListItem import StoragePoolListItem
from .StorageServer import StorageServer
from .VDiskLink import VDiskLink
from .VM import VM
from .VMCreate import VMCreate
from .VMDiskInfo import VMDiskInfo
from .VMInfo import VMInfo
from .VMListItem import VMListItem
from .VMMigrate import VMMigrate
from .VMNicInfo import VMNicInfo
from .VMUpdate import VMUpdate
from .Vdisk import Vdisk
from .VdiskCreate import VdiskCreate
from .VdiskListItem import VdiskListItem
from .VdiskResize import VdiskResize
from .VdiskRollback import VdiskRollback
from .VdiskStorage import VdiskStorage
from .Webhook import Webhook
from .WebhookUpdate import WebhookUpdate
from .WriteFile import WriteFile
from .Zerotier import Zerotier
from .ZerotierBridge import ZerotierBridge
from .ZerotierJoin import ZerotierJoin
from .ZerotierListItem import ZerotierListItem
from .ZerotierRoute import ZerotierRoute
from .client import Client as APIClient
from .oauth2_client_itsyouonline import Oauth2ClientItsyouonline
class Client:
def __init__(self, base_uri=""):
self.api = APIClient(base_uri)
self.oauth2_client_itsyouonline = Oauth2ClientItsyouonline() | {
"content_hash": "467fb52f6311ac826071ed4c6c558a38",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 86,
"avg_line_length": 39.77142857142857,
"alnum_prop": 0.8708692528735632,
"repo_name": "g8os/grid",
"id": "20b8d034f0fdca4d362dc874c1fd1d9f844ae8cf",
"size": "5568",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyclient/zeroos/orchestrator/client/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from sqlalchemy.orm import relationship
from emonitor.extensions import db
from .alarmtype import AlarmType
import yaml
class AlarmSection(db.Model):
"""
AlarmSection class for sections of alarm type :py:class:`emonitor.modules.alarms.alarmtype.AlarmType`
"""
__tablename__ = 'alarmsections'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
tid = db.Column(db.Integer, db.ForeignKey('alarmtypes.id'))
name = db.Column(db.String(32), default="")
key = db.Column(db.String(32), default="")
active = db.Column(db.Integer, default=0)
method = db.Column(db.Text, default="")
orderpos = db.Column(db.Integer, default=0)
_attributes = db.Column('attributes', db.Text)
alarmtype = relationship(AlarmType.__name__, backref="tid", lazy='joined')
def __init__(self, tid, name, key, active, method, orderpos, attributes=''):
self.tid = tid
self.name = name
self.key = key
self.active = active
self.method = method
self.orderpos = orderpos
self._attributes = attributes
def __repr__(self):
return '{}: {} {}'.format(self.orderpos, self.name.encode('utf-8'), self.key.encode('utf-8'))
def __cmp__(self, other):
if hasattr(other, 'orderpos'):
return self.orderpos.__cmp__(other.orderpos)
@property
def attributes(self):
return yaml.load(self._attributes) or {}
@attributes.setter
def attributes(self, values):
self._attributes = yaml.safe_dump(values, encoding='utf-8')
def getSectionMethod(self):
return self.method.split(';')[0]
def getSectionMethodParams(self):
if len(self.method.split(';')) > 1:
return ','.join(self.method.split(';')[1:])
return ''
def getSectionMethodConfig(self, attribute=''):
"""
Get attributes from yaml object
:param attribute: attribute name of method yaml dict
:return: value of given attribute or yaml dict
"""
val = yaml.load(self.method)
if attribute in val.keys():
return val[attribute]
return val
@staticmethod
def getSections(id=0, tid=0):
"""
Get list of sections of current alarmtype
:param optional id: section id, 0 for all sections
:param optional tid: type id, 0 for all types
:return: list of :py:class:`emonitor.modules.alarms.alarmsection.AlarmSection`
"""
if id != 0:
return AlarmSection.query.filter_by(id=id).first()
elif tid != 0:
return AlarmSection.query.filter_by(tid=int(tid)).order_by('orderpos').all()
else:
return AlarmSection.query.order_by('orderpos').all()
| {
"content_hash": "2225a9f696c3433b78c484452b7a10d3",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 105,
"avg_line_length": 34.13414634146341,
"alnum_prop": 0.6120042872454448,
"repo_name": "sambandi/eMonitor",
"id": "92e92094f941c7de5a96742e27912adb2626eca3",
"size": "2799",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "emonitor/modules/alarms/alarmsection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2392"
},
{
"name": "CSS",
"bytes": "73178"
},
{
"name": "HTML",
"bytes": "544937"
},
{
"name": "JavaScript",
"bytes": "509847"
},
{
"name": "Mako",
"bytes": "413"
},
{
"name": "Python",
"bytes": "766528"
}
],
"symlink_target": ""
} |
"""Python client API for CouchDB.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> doc_id = db.create({'type': 'Person', 'name': 'John Doe'})
>>> doc = db[doc_id]
>>> doc['type']
'Person'
>>> doc['name']
'John Doe'
>>> del db[doc.id]
>>> doc.id in db
False
>>> del server['python-tests']
"""
import httplib2
from mimetypes import guess_type
from urllib import quote, urlencode
import re
import socket
try:
import simplejson as json
except ImportError:
import json # Python 2.6
__all__ = ['PreconditionFailed', 'ResourceNotFound', 'ResourceConflict',
'ServerError', 'Server', 'Database', 'Document', 'ViewResults',
'Row']
__docformat__ = 'restructuredtext en'
DEFAULT_BASE_URI = 'http://localhost:5984/'
class PreconditionFailed(Exception):
"""Exception raised when a 412 HTTP error is received in response to a
request.
"""
class ResourceNotFound(Exception):
"""Exception raised when a 404 HTTP error is received in response to a
request.
"""
class ResourceConflict(Exception):
"""Exception raised when a 409 HTTP error is received in response to a
request.
"""
class ServerError(Exception):
"""Exception raised when an unexpected HTTP error is received in response
to a request.
"""
class Server(object):
"""Representation of a CouchDB server.
>>> server = Server('http://localhost:5984/')
This class behaves like a dictionary of databases. For example, to get a
list of database names on the server, you can simply iterate over the
server object.
New databases can be created using the `create` method:
>>> db = server.create('python-tests')
>>> db
<Database 'python-tests'>
You can access existing databases using item access, specifying the database
name as the key:
>>> db = server['python-tests']
>>> db.name
'python-tests'
Databases can be deleted using a ``del`` statement:
>>> del server['python-tests']
"""
def __init__(self, uri=DEFAULT_BASE_URI, cache=None, timeout=None):
"""Initialize the server object.
:param uri: the URI of the server (for example
``http://localhost:5984/``)
:param cache: either a cache directory path (as a string) or an object
compatible with the ``httplib2.FileCache`` interface. If
`None` (the default), no caching is performed.
:param timeout: socket timeout in number of seconds, or `None` for no
timeout
"""
http = httplib2.Http(cache=cache, timeout=timeout)
http.force_exception_to_status_code = False
self.resource = Resource(http, uri)
def __contains__(self, name):
"""Return whether the server contains a database with the specified
name.
:param name: the database name
:return: `True` if a database with the name exists, `False` otherwise
"""
try:
self.resource.head(validate_dbname(name))
return True
except ResourceNotFound:
return False
def __iter__(self):
"""Iterate over the names of all databases."""
resp, data = self.resource.get('_all_dbs')
return iter(data)
def __len__(self):
"""Return the number of databases."""
resp, data = self.resource.get('_all_dbs')
return len(data)
def __nonzero__(self):
"""Return whether the server is available."""
try:
self.resource.head()
return True
except:
return False
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.resource.uri)
def __delitem__(self, name):
"""Remove the database with the specified name.
:param name: the name of the database
:raise ResourceNotFound: if no database with that name exists
"""
self.resource.delete(validate_dbname(name))
def __getitem__(self, name):
"""Return a `Database` object representing the database with the
specified name.
:param name: the name of the database
:return: a `Database` object representing the database
:rtype: `Database`
:raise ResourceNotFound: if no database with that name exists
"""
return Database(uri(self.resource.uri, name), validate_dbname(name),
http=self.resource.http)
def _get_version(self):
resp, data = self.resource.get()
return data['version']
version = property(_get_version, doc="""\
The version number tuple for the CouchDB server.
Note that this results in a request being made, and can also be used
to check for the availability of the server.
:type: `unicode`
""")
def create(self, name):
"""Create a new database with the given name.
:param name: the name of the database
:return: a `Database` object representing the created database
:rtype: `Database`
:raise ResourceConflict: if a database with that name already exists
"""
self.resource.put(validate_dbname(name))
return self[name]
class Database(object):
"""Representation of a database on a CouchDB server.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
New documents can be added to the database using the `create()` method:
>>> doc_id = db.create({'type': 'Person', 'name': 'John Doe'})
This class provides a dictionary-like interface to databases: documents are
retrieved by their ID using item access
>>> doc = db[doc_id]
>>> doc #doctest: +ELLIPSIS
<Document '...'@... {...}>
Documents are represented as instances of the `Row` class, which is
basically just a normal dictionary with the additional attributes ``id`` and
``rev``:
>>> doc.id, doc.rev #doctest: +ELLIPSIS
('...', ...)
>>> doc['type']
'Person'
>>> doc['name']
'John Doe'
To update an existing document, you use item access, too:
>>> doc['name'] = 'Mary Jane'
>>> db[doc.id] = doc
The `create()` method creates a document with an auto-generated ID. If you
want to explicitly specify the ID, you'd use item access just as with
updating:
>>> db['JohnDoe'] = {'type': 'person', 'name': 'John Doe'}
>>> 'JohnDoe' in db
True
>>> len(db)
2
>>> del server['python-tests']
"""
def __init__(self, uri, name=None, http=None):
self.resource = Resource(http, uri)
self._name = name
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.name)
def __contains__(self, id):
"""Return whether the database contains a document with the specified
ID.
:param id: the document ID
:return: `True` if a document with the ID exists, `False` otherwise
"""
try:
self.resource.head(id)
return True
except ResourceNotFound:
return False
def __iter__(self):
"""Return the IDs of all documents in the database."""
return iter([item.id for item in self.view('_all_docs')])
def __len__(self):
"""Return the number of documents in the database."""
resp, data = self.resource.get()
return data['doc_count']
def __nonzero__(self):
"""Return whether the database is available."""
try:
self.resource.head()
return True
except:
return False
def __delitem__(self, id):
"""Remove the document with the specified ID from the database.
:param id: the document ID
"""
resp, data = self.resource.head(id)
self.resource.delete(id, rev=resp['etag'].strip('"'))
def __getitem__(self, id):
"""Return the document with the specified ID.
:param id: the document ID
:return: a `Row` object representing the requested document
:rtype: `Document`
"""
resp, data = self.resource.get(id)
return Document(data)
def __setitem__(self, id, content):
"""Create or update a document with the specified ID.
:param id: the document ID
:param content: the document content; either a plain dictionary for
new documents, or a `Row` object for existing
documents
"""
resp, data = self.resource.put(id, content=content)
content.update({'_id': data['id'], '_rev': data['rev']})
def _get_name(self):
if self._name is None:
self._name = self.info()['db_name']
return self._name
name = property(_get_name)
def create(self, data):
"""Create a new document in the database with a generated ID.
Any keyword arguments are used to populate the fields of the new
document.
:param data: the data to store in the document
:return: the ID of the created document
:rtype: `unicode`
"""
resp, data = self.resource.post(content=data)
return data['id']
def delete(self, doc):
"""Delete the given document from the database.
Use this method in preference over ``__del__`` to ensure you're
deleting the revision that you had previously retrieved. In the case
the document has been updated since it was retrieved, this method will
raise a `PreconditionFailed` exception.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> doc = dict(type='Person', name='John Doe')
>>> db['johndoe'] = doc
>>> doc2 = db['johndoe']
>>> doc2['age'] = 42
>>> db['johndoe'] = doc2
>>> db.delete(doc)
Traceback (most recent call last):
...
PreconditionFailed: ('conflict', 'Document update conflict.')
>>> del server['python-tests']
:param doc: a dictionary or `Document` object holding the document data
:raise PreconditionFailed: if the document was updated in the database
:since: 0.4.1
"""
self.resource.delete(doc['_id'], rev=doc['_rev'])
def get(self, id, default=None, **options):
"""Return the document with the specified ID.
:param id: the document ID
:param default: the default value to return when the document is not
found
:return: a `Row` object representing the requested document, or `None`
if no document with the ID was found
:rtype: `Document`
"""
try:
resp, data = self.resource.get(id, **options)
except ResourceNotFound:
return default
else:
return Document(data)
def info(self):
"""Return information about the database as a dictionary.
The returned dictionary exactly corresponds to the JSON response to
a ``GET`` request on the database URI.
:return: a dictionary of database properties
:rtype: ``dict``
:since: 0.4
"""
resp, data = self.resource.get()
return data
def delete_attachment(self, doc, filename):
"""Delete the specified attachment.
:param doc: the dictionary or `Document` object representing the
document that the attachment belongs to
:param filename: the name of the attachment file
:since: 0.4.1
"""
resp, data = self.resource(doc['_id']).delete(filename, rev=doc['_rev'])
doc.update({'_rev': data['rev']})
def get_attachment(self, id_or_doc, filename, default=None):
"""Return an attachment from the specified doc id and filename.
:param id_or_doc: either a document ID or a dictionary or `Document`
object representing the document that the attachment
belongs to
:param filename: the name of the attachment file
:param default: default value to return when the document or attachment
is not found
:return: the content of the attachment as a string, or the value of the
`default` argument if the attachment is not found
:since: 0.4.1
"""
if isinstance(id_or_doc, basestring):
id = id_or_doc
else:
id = id_or_doc['_id']
try:
resp, data = self.resource(id).get(filename)
return data
except ResourceNotFound:
return default
def put_attachment(self, doc, content, filename=None, content_type=None):
"""Create or replace an attachment.
:param doc: the dictionary or `Document` object representing the
document that the attachment should be added to
:param content: the content to upload, either a file-like object or
a string
:param filename: the name of the attachment file; if omitted, this
function tries to get the filename from the file-like
object passed as the `content` argument value
:param content_type: content type of the attachment; if omitted, the
MIME type is guessed based on the file name
extension
:since: 0.4.1
"""
if hasattr(content, 'read'):
content = content.read()
if filename is None:
if hasattr(content, 'name'):
filename = content.name
else:
raise ValueError('no filename specified for attachment')
if content_type is None:
content_type = ';'.join(filter(None, guess_type(filename)))
resp, data = self.resource(doc['_id']).put(filename, content=content,
headers={
'Content-Type': content_type
}, rev=doc['_rev'])
doc.update({'_rev': data['rev']})
def query(self, map_fun, reduce_fun=None, language='javascript',
wrapper=None, **options):
"""Execute an ad-hoc query (a "temp view") against the database.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> db['johndoe'] = dict(type='Person', name='John Doe')
>>> db['maryjane'] = dict(type='Person', name='Mary Jane')
>>> db['gotham'] = dict(type='City', name='Gotham City')
>>> map_fun = '''function(doc) {
... if (doc.type == 'Person')
... emit(doc.name, null);
... }'''
>>> for row in db.query(map_fun):
... print row.key
John Doe
Mary Jane
>>> for row in db.query(map_fun, descending=True):
... print row.key
Mary Jane
John Doe
>>> for row in db.query(map_fun, key='John Doe'):
... print row.key
John Doe
>>> del server['python-tests']
:param map_fun: the code of the map function
:param reduce_fun: the code of the reduce function (optional)
:param language: the language of the functions, to determine which view
server to use
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: optional query string parameters
:return: the view reults
:rtype: `ViewResults`
"""
return TemporaryView(uri(self.resource.uri, '_temp_view'), map_fun,
reduce_fun, language=language, wrapper=wrapper,
http=self.resource.http)(**options)
def update(self, documents):
"""Perform a bulk update or insertion of the given documents using a
single HTTP request.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> for doc in db.update([
... Document(type='Person', name='John Doe'),
... Document(type='Person', name='Mary Jane'),
... Document(type='City', name='Gotham City')
... ]):
... print repr(doc) #doctest: +ELLIPSIS
<Document '...'@'...' {'type': 'Person', 'name': 'John Doe'}>
<Document '...'@'...' {'type': 'Person', 'name': 'Mary Jane'}>
<Document '...'@'...' {'type': 'City', 'name': 'Gotham City'}>
>>> del server['python-tests']
If an object in the documents list is not a dictionary, this method
looks for an ``items()`` method that can be used to convert the object
to a dictionary. In this case, the returned generator will not update
and yield the original object, but rather yield a dictionary with
``id`` and ``rev`` keys.
:param documents: a sequence of dictionaries or `Document` objects, or
objects providing a ``items()`` method that can be
used to convert them to a dictionary
:return: an iterable over the resulting documents
:rtype: ``generator``
:since: version 0.2
"""
docs = []
for doc in documents:
if isinstance(doc, dict):
docs.append(doc)
elif hasattr(doc, 'items'):
docs.append(dict(doc.items()))
else:
raise TypeError('expected dict, got %s' % type(doc))
resp, data = self.resource.post('_bulk_docs', content={'docs': docs})
assert data['ok'] # FIXME: Should probably raise a proper exception
def _update():
for idx, result in enumerate(data['new_revs']):
doc = documents[idx]
if isinstance(doc, dict):
doc.update({'_id': result['id'], '_rev': result['rev']})
yield doc
else:
yield result
return _update()
def view(self, name, wrapper=None, **options):
"""Execute a predefined view.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> db['gotham'] = dict(type='City', name='Gotham City')
>>> for row in db.view('_all_docs'):
... print row.id
gotham
>>> del server['python-tests']
:param name: the name of the view, including the ``_view/design_docid``
prefix for custom views
:param wrapper: an optional callable that should be used to wrap the
result rows
:param options: optional query string parameters
:return: the view results
:rtype: `ViewResults`
"""
if not name.startswith('_'):
name = '_view/' + name
return PermanentView(uri(self.resource.uri, *name.split('/')), name,
wrapper=wrapper,
http=self.resource.http)(**options)
class Document(dict):
"""Representation of a document in the database.
This is basically just a dictionary with the two additional properties
`id` and `rev`, which contain the document ID and revision, respectively.
"""
def __repr__(self):
return '<%s %r@%r %r>' % (type(self).__name__, self.id, self.rev,
dict([(k,v) for k,v in self.items()
if k not in ('_id', '_rev')]))
id = property(lambda self: self['_id'])
rev = property(lambda self: self['_rev'])
class View(object):
"""Abstract representation of a view or query."""
def __init__(self, uri, wrapper=None, http=None):
self.resource = Resource(http, uri)
self.wrapper = wrapper
def __call__(self, **options):
return ViewResults(self, options)
def __iter__(self):
return self()
def _encode_options(self, options):
retval = {}
for name, value in options.items():
if name in ('key', 'startkey', 'endkey') \
or not isinstance(value, basestring):
value = json.dumps(value, allow_nan=False, ensure_ascii=False)
retval[name] = value
return retval
def _exec(self, options):
raise NotImplementedError
class PermanentView(View):
"""Representation of a permanent view on the server."""
def __init__(self, uri, name, wrapper=None, http=None):
View.__init__(self, uri, wrapper=wrapper, http=http)
self.name = name
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.name)
def _exec(self, options):
if 'keys' in options:
options = options.copy()
keys = {'keys': options.pop('keys')}
resp, data = self.resource.post(content=keys,
**self._encode_options(options))
else:
resp, data = self.resource.get(**self._encode_options(options))
return data
class TemporaryView(View):
"""Representation of a temporary view."""
def __init__(self, uri, map_fun=None, reduce_fun=None,
language='javascript', wrapper=None, http=None):
View.__init__(self, uri, wrapper=wrapper, http=http)
self.map_fun = map_fun
self.reduce_fun = reduce_fun
self.language = language
def __repr__(self):
return '<%s %r %r>' % (type(self).__name__, self.map_fun,
self.reduce_fun)
def _exec(self, options):
body = {'map': self.map_fun, 'language': self.language}
if self.reduce_fun:
body['reduce'] = self.reduce_fun
if 'keys' in options:
options = options.copy()
body['keys'] = options.pop('keys')
content = json.dumps(body, allow_nan=False,
ensure_ascii=False).encode('utf-8')
resp, data = self.resource.post(content=content, headers={
'Content-Type': 'application/json'
}, **self._encode_options(options))
return data
class ViewResults(object):
"""Representation of a parameterized view (either permanent or temporary)
and the results it produces.
This class allows the specification of ``key``, ``startkey``, and
``endkey`` options using Python slice notation.
>>> server = Server('http://localhost:5984/')
>>> db = server.create('python-tests')
>>> db['johndoe'] = dict(type='Person', name='John Doe')
>>> db['maryjane'] = dict(type='Person', name='Mary Jane')
>>> db['gotham'] = dict(type='City', name='Gotham City')
>>> map_fun = '''function(doc) {
... emit([doc.type, doc.name], doc.name);
... }'''
>>> results = db.query(map_fun)
At this point, the view has not actually been accessed yet. It is accessed
as soon as it is iterated over, its length is requested, or one of its
`rows`, `total_rows`, or `offset` properties are accessed:
>>> len(results)
3
You can use slices to apply ``startkey`` and/or ``endkey`` options to the
view:
>>> people = results[['Person']:['Person','ZZZZ']]
>>> for person in people:
... print person.value
John Doe
Mary Jane
>>> people.total_rows, people.offset
(3, 1)
Use plain indexed notation (without a slice) to apply the ``key`` option.
Note that as CouchDB makes no claim that keys are unique in a view, this
can still return multiple rows:
>>> list(results[['City', 'Gotham City']])
[<Row id='gotham', key=['City', 'Gotham City'], value='Gotham City'>]
>>> del server['python-tests']
"""
def __init__(self, view, options):
self.view = view
self.options = options
self._rows = self._total_rows = self._offset = None
def __repr__(self):
return '<%s %r %r>' % (type(self).__name__, self.view, self.options)
def __getitem__(self, key):
options = self.options.copy()
if type(key) is slice:
if key.start is not None:
options['startkey'] = key.start
if key.stop is not None:
options['endkey'] = key.stop
return ViewResults(self.view, options)
else:
options['key'] = key
return ViewResults(self.view, options)
def __iter__(self):
wrapper = self.view.wrapper
for row in self.rows:
if wrapper is not None:
yield wrapper(row)
else:
yield row
def __len__(self):
return len(self.rows)
def _fetch(self):
data = self.view._exec(self.options)
self._rows = [Row(row) for row in data['rows']]
self._total_rows = data.get('total_rows')
self._offset = data.get('offset', 0)
def _get_rows(self):
if self._rows is None:
self._fetch()
return self._rows
rows = property(_get_rows, doc="""\
The list of rows returned by the view.
:type: `list`
""")
def _get_total_rows(self):
if self._rows is None:
self._fetch()
return self._total_rows
total_rows = property(_get_total_rows, doc="""\
The total number of rows in this view.
This value is `None` for reduce views.
:type: `int` or ``NoneType`` for reduce views
""")
def _get_offset(self):
if self._rows is None:
self._fetch()
return self._offset
offset = property(_get_offset, doc="""\
The offset of the results from the first row in the view.
This value is 0 for reduce views.
:type: `int`
""")
class Row(dict):
"""Representation of a row as returned by database views."""
def __repr__(self):
if self.id is None:
return '<%s key=%r, value=%r>' % (type(self).__name__, self.key,
self.value)
return '<%s id=%r, key=%r, value=%r>' % (type(self).__name__, self.id,
self.key, self.value)
def _get_id(self):
return self.get('id')
id = property(_get_id, doc="""\
The associated Document ID if it exists. Returns `None` when it
doesn't (reduce results).
""")
def _get_key(self):
return self['key']
key = property(_get_key, doc='The associated key.')
def _get_value(self):
return self['value']
value = property(_get_value, doc='The associated value.')
def _get_doc(self):
doc = self.get('doc')
if doc:
return Document(doc)
doc = property(_get_doc, doc="""\
The associated document for the row. This is only present when the
view was accessed with ``include_docs=True`` as a query parameter,
otherwise this property will be `None`.
""")
# Internals
class Resource(object):
def __init__(self, http, uri):
if http is None:
http = httplib2.Http()
http.force_exception_to_status_code = False
self.http = http
self.uri = uri
def __call__(self, path):
return type(self)(self.http, uri(self.uri, path))
def delete(self, path=None, headers=None, **params):
return self._request('DELETE', path, headers=headers, **params)
def get(self, path=None, headers=None, **params):
return self._request('GET', path, headers=headers, **params)
def head(self, path=None, headers=None, **params):
return self._request('HEAD', path, headers=headers, **params)
def post(self, path=None, content=None, headers=None, **params):
return self._request('POST', path, content=content, headers=headers,
**params)
def put(self, path=None, content=None, headers=None, **params):
return self._request('PUT', path, content=content, headers=headers,
**params)
def _request(self, method, path=None, content=None, headers=None,
**params):
from couchdb import __version__
headers = headers or {}
headers.setdefault('Accept', 'application/json')
headers.setdefault('User-Agent', 'couchdb-python %s' % __version__)
body = None
if content is not None:
if not isinstance(content, basestring):
body = json.dumps(content, allow_nan=False,
ensure_ascii=False).encode('utf-8')
headers.setdefault('Content-Type', 'application/json')
else:
body = content
headers.setdefault('Content-Length', str(len(body)))
def _make_request(retry=1):
try:
return self.http.request(uri(self.uri, path, **params), method,
body=body, headers=headers)
except socket.error, e:
if retry > 0 and e.args[0] == 54: # reset by peer
return _make_request(retry - 1)
raise
resp, data = _make_request()
status_code = int(resp.status)
if data and resp.get('content-type') == 'application/json':
try:
data = json.loads(data)
except ValueError:
pass
if status_code >= 400:
if type(data) is dict:
error = (data.get('error'), data.get('reason'))
else:
error = data
if status_code == 404:
raise ResourceNotFound(error)
elif status_code == 409:
raise ResourceConflict(error)
elif status_code == 412:
raise PreconditionFailed(error)
else:
raise ServerError((status_code, error))
return resp, data
def uri(base, *path, **query):
"""Assemble a uri based on a base, any number of path segments, and query
string parameters.
>>> uri('http://example.org/', '/_all_dbs')
'http://example.org/_all_dbs'
"""
if base and base.endswith('/'):
base = base[:-1]
retval = [base]
# build the path
path = '/'.join([''] +
[unicode_quote(s.strip('/')) for s in path
if s is not None])
if path:
retval.append(path)
# build the query string
params = []
for name, value in query.items():
if type(value) in (list, tuple):
params.extend([(name, i) for i in value if i is not None])
elif value is not None:
if value is True:
value = 'true'
elif value is False:
value = 'false'
params.append((name, value))
if params:
retval.extend(['?', unicode_urlencode(params)])
return ''.join(retval)
def unicode_quote(string, safe=''):
if isinstance(string, unicode):
string = string.encode('utf-8')
return quote(string, safe)
def unicode_urlencode(data):
if isinstance(data, dict):
data = data.items()
params = []
for name, value in data:
if isinstance(value, unicode):
value = value.encode('utf-8')
params.append((name, value))
return urlencode(params)
VALID_DB_NAME = re.compile(r'^[a-z0-9_$()+-/]+$')
def validate_dbname(name):
if not VALID_DB_NAME.match(name):
raise ValueError('Invalid database name')
return name
| {
"content_hash": "3a36c5bee88a5b6708686cfd1c0dc9e4",
"timestamp": "",
"source": "github",
"line_count": 943,
"max_line_length": 80,
"avg_line_length": 33.787910922587486,
"alnum_prop": 0.5570271797125101,
"repo_name": "kocolosk/couchdb-python",
"id": "b6d3186036689180efd53f190e484b0a61b88a94",
"size": "32082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchdb/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "113112"
}
],
"symlink_target": ""
} |
"""Contains a collection of util functions for model construction.
"""
import numpy
import tensorflow as tf
from tensorflow import logging
from tensorflow import flags
import tensorflow.contrib.slim as slim
def SampleRandomSequence(model_input, num_frames, num_samples):
"""Samples a random sequence of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index_offset = tf.tile(
tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1])
max_start_frame_index = tf.maximum(num_frames - num_samples, 0)
start_frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, 1]),
tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32)
frame_index = tf.minimum(start_frame_index + frame_index_offset,
tf.cast(num_frames - 1, tf.int32))
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def SampleRandomFrames(model_input, num_frames, num_samples):
"""Samples a random set of frames of size num_samples.
Args:
model_input: A tensor of size batch_size x max_frames x feature_size
num_frames: A tensor of size batch_size x 1
num_samples: A scalar
Returns:
`model_input`: A tensor of size batch_size x num_samples x feature_size
"""
batch_size = tf.shape(model_input)[0]
frame_index = tf.cast(
tf.multiply(
tf.random_uniform([batch_size, num_samples]),
tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32)
batch_index = tf.tile(
tf.expand_dims(tf.range(batch_size), 1), [1, num_samples])
index = tf.stack([batch_index, frame_index], 2)
return tf.gather_nd(model_input, index)
def rankPool(frames,approximation=True):
"""Performs rank pooling"""
if approximation:
frame_dims = tf.shape(frames)
T = frame_dims[1]
alpha = 2*tf.range(0,T) - T - 1
alpha = tf.expand_dims(tf.expand_dims(alpha,axis=0),axis=2)
weighted_frames = alpha * frames
pooled_frames = tf.reduce_sum(weighted_frames,1)
else:
raise NotImplementedError
return pooled_frames
def FramePooling(frames, method, **unused_params):
"""Pools over the frames of a video.
Args:
frames: A tensor with shape [batch_size, num_frames, feature_size].
method: "average", "max", "attention", or "none".
Returns:
A tensor with shape [batch_size, feature_size] for average, max, or
attention pooling. A tensor with shape [batch_size*num_frames, feature_size]
for none pooling.
Raises:
ValueError: if method is other than "average", "max", "attention", or
"none".
"""
if method == "average":
return tf.reduce_mean(frames, 1)
elif method == "max":
return tf.reduce_max(frames, 1)
elif method =="rank":
return rankPool(frames)
elif method == "none":
feature_size = frames.shape_as_list()[2]
return tf.reshape(frames, [-1, feature_size])
else:
raise ValueError("Unrecognized pooling method: %s" % method)
| {
"content_hash": "4478f08bd13776c341020f91a6b02acc",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 80,
"avg_line_length": 34.583333333333336,
"alnum_prop": 0.6725903614457831,
"repo_name": "mwoodson1/youtube-8m-competition",
"id": "3890c5cc5fff4022b4822ac0ece32a3c3a3de3e1",
"size": "3918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128264"
}
],
"symlink_target": ""
} |
import os
from tempfile import TemporaryFile
from dateparser.data.languages_info import language_locale_dict
def to_string(data):
result = ''
language_column_width = 18
for language in sorted(data):
result += language
locales = data[language]
if locales:
result += ' ' * (language_column_width - len(language))
result += ', '.join(
"'{}'".format(locale) for locale in sorted(locales)
)
result += '\n'
return result
def main():
readme_path = os.path.join(os.path.dirname(__file__), '..', 'docs', 'supported_locales.rst')
new_data = to_string(language_locale_dict)
temporary_file = TemporaryFile('w+')
with open(readme_path) as readme_file:
delimiter = '============ ================================================================\n'
delimiters_seen = 0
is_inside_table = False
for line in readme_file:
if line == delimiter:
delimiters_seen += 1
is_inside_table = delimiters_seen == 2
elif is_inside_table:
continue
temporary_file.write(line)
if is_inside_table:
temporary_file.write(new_data)
temporary_file.seek(0)
with open(readme_path, 'w') as readme_file:
readme_file.write(temporary_file.read())
temporary_file.close()
if __name__ == '__main__':
main()
| {
"content_hash": "f7de00572b79c0d48b7966a799056b1e",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 104,
"avg_line_length": 31.565217391304348,
"alnum_prop": 0.5365013774104683,
"repo_name": "scrapinghub/dateparser",
"id": "1b72c850fea07c3271436b8150456003e62ada56",
"size": "1476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dateparser_scripts/update_supported_languages_and_locales.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1564308"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import os
import re
import urllib2
import urlparse
from xml.dom.minidom import Document
# Path to the XML file with signatures
MSSQL_XML = os.path.abspath("../../xml/banner/mssql.xml")
# Url to update Microsoft SQL Server XML versions file from
MSSQL_VERSIONS_URL = "http://www.sqlsecurity.com/FAQs/SQLServerVersionDatabase/tabid/63/Default.aspx"
def updateMSSQLXML():
if not os.path.exists(MSSQL_XML):
errMsg = "[ERROR] file '%s' does not exist. Please run the script from its parent directory" % MSSQL_XML
print errMsg
return
infoMsg = "[INFO] retrieving data from '%s'" % MSSQL_VERSIONS_URL
print infoMsg
try:
req = urllib2.Request(MSSQL_VERSIONS_URL)
f = urllib2.urlopen(req)
mssqlVersionsHtmlString = f.read()
f.close()
except urllib2.URLError:
__mssqlPath = urlparse.urlsplit(MSSQL_VERSIONS_URL)
__mssqlHostname = __mssqlPath[1]
warnMsg = "[WARNING] sqlmap was unable to connect to %s," % __mssqlHostname
warnMsg += " check your Internet connection and retry"
print warnMsg
return
releases = re.findall("class=\"BCC_DV_01DarkBlueTitle\">SQL Server\s(.+?)\sBuilds", mssqlVersionsHtmlString, re.I | re.M)
releasesCount = len(releases)
# Create the minidom document
doc = Document()
# Create the <root> base element
root = doc.createElement("root")
doc.appendChild(root)
for index in xrange(0, releasesCount):
release = releases[index]
# Skip Microsoft SQL Server 6.5 because the HTML
# table is in another format
if release == "6.5":
continue
# Create the <signatures> base element
signatures = doc.createElement("signatures")
signatures.setAttribute("release", release)
root.appendChild(signatures)
startIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index])
if index == releasesCount - 1:
stopIdx = len(mssqlVersionsHtmlString)
else:
stopIdx = mssqlVersionsHtmlString.index("SQL Server %s Builds" % releases[index + 1])
mssqlVersionsReleaseString = mssqlVersionsHtmlString[startIdx:stopIdx]
servicepackVersion = re.findall("</td><td>[7\.0|2000|2005|2008|2008 R2]*(.*?)</td><td.*?([\d\.]+)</td>[\r]*\n", mssqlVersionsReleaseString, re.I | re.M)
for servicePack, version in servicepackVersion:
if servicePack.startswith(" "):
servicePack = servicePack[1:]
if "/" in servicePack:
servicePack = servicePack[:servicePack.index("/")]
if "(" in servicePack:
servicePack = servicePack[:servicePack.index("(")]
if "-" in servicePack:
servicePack = servicePack[:servicePack.index("-")]
if "*" in servicePack:
servicePack = servicePack[:servicePack.index("*")]
if servicePack.startswith("+"):
servicePack = "0%s" % servicePack
servicePack = servicePack.replace("\t", " ")
servicePack = servicePack.replace("No SP", "0")
servicePack = servicePack.replace("RTM", "0")
servicePack = servicePack.replace("TM", "0")
servicePack = servicePack.replace("SP", "")
servicePack = servicePack.replace("Service Pack", "")
servicePack = servicePack.replace("<a href=\"http:", "")
servicePack = servicePack.replace(" ", " ")
servicePack = servicePack.replace("+ ", "+")
servicePack = servicePack.replace(" +", "+")
if servicePack.endswith(" "):
servicePack = servicePack[:-1]
if servicePack and version:
# Create the main <card> element
signature = doc.createElement("signature")
signatures.appendChild(signature)
# Create a <version> element
versionElement = doc.createElement("version")
signature.appendChild(versionElement)
# Give the <version> elemenet some text
versionText = doc.createTextNode(version)
versionElement.appendChild(versionText)
# Create a <servicepack> element
servicepackElement = doc.createElement("servicepack")
signature.appendChild(servicepackElement)
# Give the <servicepack> elemenet some text
servicepackText = doc.createTextNode(servicePack)
servicepackElement.appendChild(servicepackText)
# Save our newly created XML to the signatures file
mssqlXml = codecs.open(MSSQL_XML, "w", "utf8")
doc.writexml(writer=mssqlXml, addindent=" ", newl="\n")
mssqlXml.close()
infoMsg = "[INFO] done. retrieved data parsed and saved into '%s'" % MSSQL_XML
print infoMsg
if __name__ == "__main__":
updateMSSQLXML()
| {
"content_hash": "bd1c1eaa7f731c5006179688f44f71c1",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 160,
"avg_line_length": 37.77777777777778,
"alnum_prop": 0.6133333333333333,
"repo_name": "JeyZeta/Dangerous",
"id": "65ec692dab3abb24a12cc34f797aafbc490b6b5f",
"size": "5123",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/extra/mssqlsig/update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
import ta_synckvstore_declare
from splunktaucclib.rest_handler.endpoint import (
field,
validator,
RestModel,
DataInputModel,
)
from splunktaucclib.rest_handler import admin_external, util
from splunk_aoblib.rest_migration import ConfigMigrationHandler
util.remove_http_proxy_env_vars()
fields = [
field.RestField(
'interval',
required=True,
encrypted=False,
default=None,
validator=validator.Pattern(
regex=r"""^\-[1-9]\d*$|^\d*$""",
)
),
field.RestField(
'index',
required=True,
encrypted=False,
default='default',
validator=validator.String(
max_len=80,
min_len=1,
)
),
field.RestField(
'u_splunkserver',
required=True,
encrypted=False,
default=None,
validator=validator.String(
max_len=8192,
min_len=0,
)
),
field.RestField(
'u_srcapp',
required=True,
encrypted=False,
default=None,
validator=validator.String(
max_len=8192,
min_len=0,
)
),
field.RestField(
'u_srccollection',
required=True,
encrypted=False,
default=None,
validator=validator.String(
max_len=8192,
min_len=0,
)
),
field.RestField(
'global_account',
required=True,
encrypted=False,
default=None,
validator=None
),
field.RestField(
'u_desttableaction',
required=True,
encrypted=False,
default='update',
validator=None
),
field.RestField(
'u_destapp',
required=True,
encrypted=False,
default=None,
validator=validator.String(
max_len=8192,
min_len=0,
)
),
field.RestField(
'u_destcollection',
required=True,
encrypted=False,
default=None,
validator=validator.String(
max_len=8192,
min_len=0,
)
),
field.RestField(
'disabled',
required=False,
validator=None
)
]
model = RestModel(fields, name=None)
endpoint = DataInputModel(
'kvstore_to_kvstore',
model,
)
if __name__ == '__main__':
admin_external.handle(
endpoint,
handler=ConfigMigrationHandler,
)
| {
"content_hash": "bd4148901f886cd6a821d9a6f520734f",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 63,
"avg_line_length": 20.454545454545453,
"alnum_prop": 0.5252525252525253,
"repo_name": "georgestarcher/TA-SyncKVStore",
"id": "5bb3bddcda51dbce45db9eef16a73bf046743884",
"size": "2476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/TA_SyncKVStore_rh_kvstore_to_kvstore.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "4504"
},
{
"name": "HTML",
"bytes": "6331"
},
{
"name": "Python",
"bytes": "5668858"
},
{
"name": "Ruby",
"bytes": "32"
}
],
"symlink_target": ""
} |
"""
Try to detect a D compiler from the list of supported compilers::
def options(opt):
opt.load('compiler_d')
def configure(cnf):
cnf.load('compiler_d')
def build(bld):
bld.program(source='main.d', target='app')
Only three D compilers are really present at the moment:
* gdc
* dmd, the ldc compiler having a very similar command-line interface
* ldc2
"""
import re
from waflib import Utils, Logs
d_compiler = {
'default' : ['gdc', 'dmd', 'ldc2']
}
"""
Dict mapping the platform names to lists of names of D compilers to try, in order of preference::
from waflib.Tools.compiler_d import d_compiler
d_compiler['default'] = ['gdc', 'dmd', 'ldc2']
"""
def default_compilers():
build_platform = Utils.unversioned_sys_platform()
possible_compiler_list = d_compiler.get(build_platform, d_compiler['default'])
return ' '.join(possible_compiler_list)
def configure(conf):
"""
Detects a suitable D compiler
:raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found
"""
try:
test_for_compiler = conf.options.check_d_compiler or default_compilers()
except AttributeError:
conf.fatal("Add options(opt): opt.load('compiler_d')")
for compiler in re.split('[ ,]+', test_for_compiler):
conf.env.stash()
conf.start_msg('Checking for %r (D compiler)' % compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError as e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_d: %r', e)
else:
if conf.env.D:
conf.end_msg(conf.env.get_flat('D'))
conf.env.COMPILER_D = compiler
conf.env.commit()
break
conf.env.revert()
conf.end_msg(False)
else:
conf.fatal('could not configure a D compiler!')
def options(opt):
"""
This is how to provide compiler preferences on the command-line::
$ waf configure --check-d-compiler=dmd
"""
test_for_compiler = default_compilers()
d_compiler_opts = opt.add_option_group('Configuration options')
d_compiler_opts.add_option('--check-d-compiler', default=None,
help='list of D compilers to try [%s]' % test_for_compiler, dest='check_d_compiler')
for x in test_for_compiler.split():
opt.load('%s' % x)
| {
"content_hash": "3c898ee2fa0416f07e2c6ab9661f5dcb",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 97,
"avg_line_length": 27,
"alnum_prop": 0.6953703703703704,
"repo_name": "MarekIgnaszak/econ-project-templates",
"id": "43bb1f646ab9b757d5f0fd719c3c5236dcf07119",
"size": "2265",
"binary": false,
"copies": "49",
"ref": "refs/heads/python",
"path": ".mywaflib/waflib/Tools/compiler_d.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "Jupyter Notebook",
"bytes": "3572"
},
{
"name": "Python",
"bytes": "1222989"
},
{
"name": "Shell",
"bytes": "1716"
},
{
"name": "TeX",
"bytes": "14224"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from mcfw.rpc import returns, arguments
from rogerthat.bizz.maps import gipod, reports, services
from rogerthat.bizz.maps.poi import map as poi_map
from rogerthat.rpc import users
from rogerthat.rpc.rpc import expose
from rogerthat.to.maps import GetMapResponseTO, GetMapRequestTO, GetMapItemsResponseTO, GetMapItemsRequestTO, \
SaveMapNotificationsResponseTO, SaveMapNotificationsRequestTO, GetMapItemDetailsRequestTO, \
GetMapItemDetailsResponseTO, SaveMapItemVoteResponseTO, SaveMapItemVoteRequestTO, ToggleMapItemResponseTO, \
ToggleMapItemRequestTO, GetSavedMapItemsResponseTO, GetSavedMapItemsRequestTO, ToggleListSectionItemTO, \
GetMapSearchSuggestionsResponseTO, GetMapSearchSuggestionsRequestTO
@expose(('api',))
@returns(GetMapResponseTO)
@arguments(request=GetMapRequestTO)
def getMap(request):
app_user = users.get_current_user()
if request.tag == gipod.GIPOD_TAG:
return gipod.get_map(app_user)
if request.tag == reports.REPORTS_TAG:
return reports.get_map(app_user)
if request.tag == services.SERVICES_TAG:
return services.get_map(app_user)
if request.tag == poi_map.POI_TAG:
return poi_map.get_map(app_user)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(GetMapSearchSuggestionsResponseTO)
@arguments(request=GetMapSearchSuggestionsRequestTO)
def getMapSearchSuggestions(request):
app_user = users.get_current_user()
if request.tag == services.SERVICES_TAG:
return services.get_map_search_suggestions(app_user, request)
if request.tag == poi_map.POI_TAG:
return poi_map.get_map_search_suggestions(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(GetMapItemsResponseTO)
@arguments(request=GetMapItemsRequestTO)
def getMapItems(request):
app_user = users.get_current_user()
if request.tag == gipod.GIPOD_TAG:
return gipod.get_map_items(app_user, request)
if request.tag == reports.REPORTS_TAG:
return reports.get_map_items(app_user, request)
if request.tag == services.SERVICES_TAG:
return services.get_map_items(app_user, request)
if request.tag == poi_map.POI_TAG:
return poi_map.get_map_items(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(GetMapItemDetailsResponseTO)
@arguments(request=GetMapItemDetailsRequestTO)
def getMapItemDetails(request):
app_user = users.get_current_user()
if request.tag == gipod.GIPOD_TAG:
return gipod.get_map_item_details(app_user, request)
if request.tag == reports.REPORTS_TAG:
return reports.get_map_item_details(app_user, request)
if request.tag == services.SERVICES_TAG:
return services.get_map_item_details(app_user, request)
if request.tag == poi_map.POI_TAG:
return poi_map.get_map_item_details(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(SaveMapNotificationsResponseTO)
@arguments(request=SaveMapNotificationsRequestTO)
def saveMapNotifications(request):
app_user = users.get_current_user()
if request.tag == gipod.GIPOD_TAG:
return gipod.save_map_notifications(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(ToggleMapItemResponseTO)
@arguments(request=ToggleMapItemRequestTO)
def toggleMapItem(request):
app_user = users.get_current_user()
if request.tag == services.SERVICES_TAG:
if request.toggle_id == ToggleListSectionItemTO.TOGGLE_ID_SAVE:
return services.save_map_item(app_user, request)
raise Exception('incorrect_toggle_action')
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(GetSavedMapItemsResponseTO)
@arguments(request=GetSavedMapItemsRequestTO)
def getSavedMapItems(request):
app_user = users.get_current_user()
if request.tag == services.SERVICES_TAG:
return services.get_saved_map_items(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
@expose(('api',))
@returns(SaveMapItemVoteResponseTO)
@arguments(request=SaveMapItemVoteRequestTO)
def saveMapItemVote(request):
app_user = users.get_current_user()
if request.tag == reports.REPORTS_TAG:
return reports.save_map_item_vote(app_user, request)
raise Exception('incorrect_tag') # todo maps error message?
| {
"content_hash": "022e10280f0f96ff55f65280fb3ab77a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 112,
"avg_line_length": 39.94736842105263,
"alnum_prop": 0.7378129117259552,
"repo_name": "our-city-app/oca-backend",
"id": "f2439327b86c9e8d53eeab98340f77948452bbb0",
"size": "5194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/api/maps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
import random, sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from math import sqrt
inside = 0
piresults = {}
errorsize = {}
pi = np.pi
points = []
print ("\n" * 20)
print("****Welcome to Monte Carlo PI Simulator***")
print("Note: Simulating less than 1000 points will show a Cartesian Coordinate System with their locations.")
print("\n")
while True:
try:
total = int(input("How many points to generate: "))
except ValueError:
print("Sorry that's not a number.")
continue
if total == 0:
print("Please try again with a number different from 0")
continue
if 10**7 <= total < 10**9:
print("That may take a while!!!")
elif total >= 10**9:
print("Try with a smaller number of points")
continue
break
def set_window_geometry(x, y, z, k):
mvr = plt.get_current_fig_manager()
mvr.window.setGeometry(x, y, z, k)
if total<=10**3:
plt.figure("Distribution")
plt.title("Point distribution")
set_window_geometry(800, 0, 700, 700)
ax = plt.axes()
ax.set_aspect(1)
theta = np.linspace(-np.pi, np.pi, 200)
plt.plot(np.sin(theta), np.cos(theta))
plt.show(block=False)
def check_if_inside(x,y):
if sqrt(x*x + y*y)<=1:
return 1
else:
return 0
def estimate_pi(inside,total):
return 4*inside/total
for i in range(1,total+1):
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
inside += check_if_inside(x,y)
points.append([x,y])
if total<=10**3:
plt.plot(x,y, 'or')
if i<= 100 or i%(10**2) == 0:
piest = estimate_pi(inside,i)
piresults[i] = piest
errorsize[i] = abs(pi-piest)
if i%(2*(10**6)) == 0:
print("Reached point %s"% i)
print("The final calculation for PI is:%s"%estimate_pi(inside,total))
print("The average calculated PI is %s"%(sum(piresults.values())/len(piresults)))
plt.figure("Absolute Difference")
plt.plot(list(errorsize.keys()), list(errorsize.values()))
mngr = plt.get_current_fig_manager()
set_window_geometry(0, 0, 700, 700)
plt.title("Abs difference from true PI")
plt.xlabel("Points")
plt.ylabel("Diff")
plt.show()
sys.exit() | {
"content_hash": "941042e3bdb3c061244775bd240bcaf5",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 109,
"avg_line_length": 25.415730337078653,
"alnum_prop": 0.6180371352785146,
"repo_name": "eenchev/MontePI",
"id": "7fd4461a476229382be755102c40901f8e75c3ed",
"size": "2286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "estimatepi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2286"
}
],
"symlink_target": ""
} |
"""Execution contexts for the native backend."""
from collections.abc import Sequence
from concurrent import futures
import os
import os.path
import signal
import stat
import subprocess
import sys
import time
from typing import Optional
from absl import logging
import grpc
import lzma # pylint: disable=g-bad-import-order
import portpicker
from tensorflow_federated.python.core.backends.native import compiler
from tensorflow_federated.python.core.backends.native import mergeable_comp_compiler
from tensorflow_federated.python.core.impl.context_stack import context_base
from tensorflow_federated.python.core.impl.context_stack import context_stack_impl
from tensorflow_federated.python.core.impl.execution_contexts import async_execution_context
from tensorflow_federated.python.core.impl.execution_contexts import mergeable_comp_execution_context
from tensorflow_federated.python.core.impl.execution_contexts import sync_execution_context
from tensorflow_federated.python.core.impl.executor_stacks import python_executor_stacks
from tensorflow_federated.python.core.impl.executors import remote_executor
from tensorflow_federated.python.core.impl.executors import remote_executor_grpc_stub
from tensorflow_federated.python.core.impl.types import placements
_LOCALHOST_SERVER_WAIT_TIME_SEC = 1.
_GRPC_MAX_MESSAGE_LENGTH_BYTES = 2 * 1000 * 1000 * 1000
_GRPC_CHANNEL_OPTIONS = [
('grpc.max_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES),
('grpc.max_receive_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES),
('grpc.max_send_message_length', _GRPC_MAX_MESSAGE_LENGTH_BYTES)
]
def _make_basic_python_execution_context(*, executor_fn, compiler_fn,
asynchronous):
"""Wires executor function and compiler into sync or async context."""
if not asynchronous:
context = sync_execution_context.ExecutionContext(
executor_fn=executor_fn, compiler_fn=compiler_fn)
else:
context = async_execution_context.AsyncExecutionContext(
executor_fn=executor_fn, compiler_fn=compiler_fn)
return context
def create_local_python_execution_context(
default_num_clients: int = 0,
max_fanout: int = 100,
clients_per_thread: int = 1,
server_tf_device=None,
client_tf_devices=tuple(),
reference_resolving_clients=False
) -> sync_execution_context.ExecutionContext:
"""Creates an execution context that executes computations locally."""
factory = python_executor_stacks.local_executor_factory(
default_num_clients=default_num_clients,
max_fanout=max_fanout,
clients_per_thread=clients_per_thread,
server_tf_device=server_tf_device,
client_tf_devices=client_tf_devices,
reference_resolving_clients=reference_resolving_clients)
def _compiler(comp):
native_form = compiler.transform_to_native_form(
comp, transform_math_to_tf=not reference_resolving_clients)
return native_form
return _make_basic_python_execution_context(
executor_fn=factory, compiler_fn=_compiler, asynchronous=False)
def set_local_python_execution_context(default_num_clients: int = 0,
max_fanout: int = 100,
clients_per_thread: int = 1,
server_tf_device=None,
client_tf_devices=tuple(),
reference_resolving_clients=False):
"""Sets an execution context that executes computations locally."""
context = create_local_python_execution_context(
default_num_clients=default_num_clients,
max_fanout=max_fanout,
clients_per_thread=clients_per_thread,
server_tf_device=server_tf_device,
client_tf_devices=client_tf_devices,
reference_resolving_clients=reference_resolving_clients,
)
context_stack_impl.context_stack.set_default_context(context)
def create_local_async_python_execution_context(
default_num_clients: int = 0,
max_fanout: int = 100,
clients_per_thread: int = 1,
server_tf_device=None,
client_tf_devices=tuple(),
reference_resolving_clients: bool = False
) -> async_execution_context.AsyncExecutionContext:
"""Creates a context that executes computations locally as coro functions."""
factory = python_executor_stacks.local_executor_factory(
default_num_clients=default_num_clients,
max_fanout=max_fanout,
clients_per_thread=clients_per_thread,
server_tf_device=server_tf_device,
client_tf_devices=client_tf_devices,
reference_resolving_clients=reference_resolving_clients)
def _compiler(comp):
native_form = compiler.transform_to_native_form(
comp, transform_math_to_tf=not reference_resolving_clients)
return native_form
return _make_basic_python_execution_context(
executor_fn=factory, compiler_fn=_compiler, asynchronous=True)
def set_local_async_python_execution_context(
default_num_clients: int = 0,
max_fanout: int = 100,
clients_per_thread: int = 1,
server_tf_device=None,
client_tf_devices=tuple(),
reference_resolving_clients: bool = False):
"""Sets a context that executes computations locally as coro functions."""
context = create_local_async_python_execution_context(
default_num_clients=default_num_clients,
max_fanout=max_fanout,
clients_per_thread=clients_per_thread,
server_tf_device=server_tf_device,
client_tf_devices=client_tf_devices,
reference_resolving_clients=reference_resolving_clients)
context_stack_impl.context_stack.set_default_context(context)
def create_sizing_execution_context(default_num_clients: int = 0,
max_fanout: int = 100,
clients_per_thread: int = 1):
"""Creates an execution context that executes computations locally."""
factory = python_executor_stacks.sizing_executor_factory(
default_num_clients=default_num_clients,
max_fanout=max_fanout,
clients_per_thread=clients_per_thread)
return sync_execution_context.ExecutionContext(
executor_fn=factory, compiler_fn=compiler.transform_to_native_form)
def create_thread_debugging_execution_context(default_num_clients: int = 0,
clients_per_thread=1):
"""Creates a simple execution context that executes computations locally."""
factory = python_executor_stacks.thread_debugging_executor_factory(
default_num_clients=default_num_clients,
clients_per_thread=clients_per_thread,
)
def _debug_compiler(comp):
return compiler.transform_to_native_form(comp, transform_math_to_tf=True)
return sync_execution_context.ExecutionContext(
executor_fn=factory, compiler_fn=_debug_compiler)
def set_thread_debugging_execution_context(default_num_clients: int = 0,
clients_per_thread=1):
"""Sets an execution context that executes computations locally."""
context = create_thread_debugging_execution_context(
default_num_clients=default_num_clients,
clients_per_thread=clients_per_thread)
context_stack_impl.context_stack.set_default_context(context)
def create_remote_python_execution_context(
channels,
thread_pool_executor=None,
dispose_batch_size=20,
max_fanout: int = 100,
default_num_clients: int = 0,
) -> sync_execution_context.ExecutionContext:
"""Creates context to execute computations with workers on `channels`.
Args:
channels: A list of `grpc.Channels` hosting services which can execute TFF
work. Assumes each channel connects to a valid endpoint.
thread_pool_executor: Optional concurrent.futures.Executor used to wait for
the reply to a streaming RPC message. Uses the default Executor if not
specified.
dispose_batch_size: The batch size for requests to dispose of remote worker
values. Lower values will result in more requests to the remote worker,
but will result in values being cleaned up sooner and therefore may result
in lower memory usage on the remote worker.
max_fanout: The maximum fanout at any point in the aggregation hierarchy. If
`num_clients > max_fanout`, the constructed executor stack will consist of
multiple levels of aggregators. The height of the stack will be on the
order of `log(default_num_clients) / log(max_fanout)`.
default_num_clients: The number of clients to use for simulations where the
number of clients cannot be inferred. Usually the number of clients will
be inferred from the number of values passed to computations which accept
client-placed values. However, when this inference isn't possible (such as
in the case of a no-argument or non-federated computation) this default
will be used instead.
Returns:
An instance of `sync_execution_context.ExecutionContext`.
"""
factory = python_executor_stacks.remote_executor_factory(
channels=channels,
thread_pool_executor=thread_pool_executor,
dispose_batch_size=dispose_batch_size,
max_fanout=max_fanout,
default_num_clients=default_num_clients,
)
return _make_basic_python_execution_context(
executor_fn=factory,
compiler_fn=compiler.transform_to_native_form,
asynchronous=False)
def set_remote_python_execution_context(
channels,
thread_pool_executor=None,
dispose_batch_size=20,
max_fanout: int = 100,
default_num_clients: int = 0,
):
"""Installs context to execute computations with workers on `channels`.
Args:
channels: A list of `grpc.Channels` hosting services which can execute TFF
work. Assumes each channel connects to a valid endpoint.
thread_pool_executor: Optional concurrent.futures.Executor used to wait for
the reply to a streaming RPC message. Uses the default Executor if not
specified.
dispose_batch_size: The batch size for requests to dispose of remote worker
values. Lower values will result in more requests to the remote worker,
but will result in values being cleaned up sooner and therefore may result
in lower memory usage on the remote worker.
max_fanout: The maximum fanout at any point in the aggregation hierarchy. If
`num_clients > max_fanout`, the constructed executor stack will consist of
multiple levels of aggregators. The height of the stack will be on the
order of `log(default_num_clients) / log(max_fanout)`.
default_num_clients: The number of clients to use for simulations where the
number of clients cannot be inferred. Usually the number of clients will
be inferred from the number of values passed to computations which accept
client-placed values. However, when this inference isn't possible (such as
in the case of a no-argument or non-federated computation) this default
will be used instead.
"""
context = create_remote_python_execution_context(
channels=channels,
thread_pool_executor=thread_pool_executor,
dispose_batch_size=dispose_batch_size,
max_fanout=max_fanout,
default_num_clients=default_num_clients,
)
context_stack_impl.context_stack.set_default_context(context)
def create_remote_async_python_execution_context(
channels: list[grpc.Channel],
thread_pool_executor: Optional[futures.Executor] = None,
dispose_batch_size: int = 20,
max_fanout: int = 100,
default_num_clients: int = 0
) -> async_execution_context.AsyncExecutionContext:
"""Creates context executing computations async via workers on `channels`.
Args:
channels: A list of `grpc.Channels` hosting services which can execute TFF
work. Assumes each channel connects to a valid endpoint.
thread_pool_executor: Optional concurrent.futures.Executor used to wait for
the reply to a streaming RPC message. Uses the default Executor if not
specified.
dispose_batch_size: The batch size for requests to dispose of remote worker
values. Lower values will result in more requests to the remote worker,
but will result in values being cleaned up sooner and therefore may result
in lower memory usage on the remote worker.
max_fanout: The maximum fanout at any point in the aggregation hierarchy. If
`num_clients > max_fanout`, the constructed executor stack will consist of
multiple levels of aggregators. The height of the stack will be on the
order of `log(default_num_clients) / log(max_fanout)`.
default_num_clients: The number of clients to use for simulations where the
number of clients cannot be inferred. Usually the number of clients will
be inferred from the number of values passed to computations which accept
client-placed values. However, when this inference isn't possible (such as
in the case of a no-argument or non-federated computation) this default
will be used instead.
Returns:
An instance of `async_execution_context.AsyncExecutionContext`.
"""
factory = python_executor_stacks.remote_executor_factory(
channels=channels,
thread_pool_executor=thread_pool_executor,
dispose_batch_size=dispose_batch_size,
max_fanout=max_fanout,
default_num_clients=default_num_clients,
)
return _make_basic_python_execution_context(
executor_fn=factory,
compiler_fn=compiler.transform_to_native_form,
asynchronous=True)
def set_remote_async_python_execution_context(channels,
thread_pool_executor=None,
dispose_batch_size=20,
max_fanout: int = 100,
default_num_clients: int = 0):
"""Installs context executing computations async via workers on `channels`.
Args:
channels: A list of `grpc.Channels` hosting services which can execute TFF
work. Assumes each channel connects to a valid endpoint.
thread_pool_executor: Optional concurrent.futures.Executor used to wait for
the reply to a streaming RPC message. Uses the default Executor if not
specified.
dispose_batch_size: The batch size for requests to dispose of remote worker
values. Lower values will result in more requests to the remote worker,
but will result in values being cleaned up sooner and therefore may result
in lower memory usage on the remote worker.
max_fanout: The maximum fanout at any point in the aggregation hierarchy. If
`num_clients > max_fanout`, the constructed executor stack will consist of
multiple levels of aggregators. The height of the stack will be on the
order of `log(default_num_clients) / log(max_fanout)`.
default_num_clients: The number of clients to use for simulations where the
number of clients cannot be inferred. Usually the number of clients will
be inferred from the number of values passed to computations which accept
client-placed values. However, when this inference isn't possible (such as
in the case of a no-argument or non-federated computation) this default
will be used instead.
"""
context = create_remote_async_python_execution_context(
channels=channels,
thread_pool_executor=thread_pool_executor,
dispose_batch_size=dispose_batch_size,
max_fanout=max_fanout,
default_num_clients=default_num_clients,
)
context_stack_impl.context_stack.set_default_context(context)
def create_mergeable_comp_execution_context(
async_contexts: Sequence[context_base.AsyncContext],
num_subrounds: Optional[int] = None,
) -> mergeable_comp_execution_context.MergeableCompExecutionContext:
"""Creates context which compiles to and executes mergeable comp form.
Args:
async_contexts: Asynchronous TFF execution contexts across which to
distribute work.
num_subrounds: An optional integer, specifying total the number of subrounds
desired. If unspecified, the length of `async_contexts` will determine the
number of subrounds. If more subrounds are requested than contexts are
passed, invocations will be sequentialized. If fewer, the work will be run
in parallel across a subset of the `async_contexts`.
Returns:
An instance of
`mergeable_comp_execution_context.MergeableCompExecutionContext` which
orchestrates work as specified above.
"""
return mergeable_comp_execution_context.MergeableCompExecutionContext(
async_contexts=async_contexts,
# TODO(b/204258376): Enable this py-typecheck when possible.
compiler_fn=mergeable_comp_compiler.compile_to_mergeable_comp_form, # pytype: disable=wrong-arg-types
num_subrounds=num_subrounds,
)
def set_mergeable_comp_execution_context(
async_contexts: Sequence[context_base.AsyncContext],
num_subrounds: Optional[int] = None,
):
"""Sets context which compiles to and executes mergeable comp form.
Args:
async_contexts: Asynchronous TFF execution contexts across which to
distribute work.
num_subrounds: An optional integer, specifying total the number of subrounds
desired. If unspecified, the length of `async_contexts` will determine the
number of subrounds. If more subrounds are requested than contexts are
passed, invocations will be sequentialized. If fewer, the work will be run
in parallel across a subset of the `async_contexts`.
"""
context = create_mergeable_comp_execution_context(
async_contexts=async_contexts,
num_subrounds=num_subrounds,
)
context_stack_impl.context_stack.set_default_context(context)
def set_localhost_cpp_execution_context(
default_num_clients: int = 0,
max_concurrent_computation_calls: int = 1,
):
"""Sets default context to a localhost TFF executor."""
context = create_localhost_cpp_execution_context(
default_num_clients=default_num_clients,
max_concurrent_computation_calls=max_concurrent_computation_calls,
)
context_stack_impl.context_stack.set_default_context(context)
def _decompress_file(compressed_path, output_path):
"""Decompresses a compressed file to the given `output_path`."""
if not os.path.isfile(compressed_path):
raise FileNotFoundError(
f'Did not find a compressed file at: {compressed_path}')
with lzma.open(compressed_path) as compressed_file:
contents = compressed_file.read()
with open(output_path, 'wb') as binary_file:
binary_file.write(contents)
os.chmod(
output_path,
stat.S_IRUSR |
stat.S_IWUSR |
stat.S_IXUSR |
stat.S_IRGRP |
stat.S_IXGRP |
stat.S_IXOTH) # pyformat: disable
def create_localhost_cpp_execution_context(
default_num_clients: int = 0,
max_concurrent_computation_calls: int = 0,
) -> sync_execution_context.ExecutionContext:
"""Creates an execution context backed by TFF-C++ runtime.
This execution context starts a TFF-C++ worker assumed to be at path
`binary_path`, serving on `port`, and constructs a simple (Python) remote
execution context to talk to this worker.
Args:
default_num_clients: The number of clients to use as the default
cardinality, if thus number cannot be inferred by the arguments of a
computation.
max_concurrent_computation_calls: The maximum number of concurrent calls to
a single computation in the CPP runtime. If `None`, there is no limit.
Returns:
An instance of `tff.framework.SyncContext` representing the TFF-C++ runtime.
Raises:
RuntimeError: If an internal C++ worker binary can not be found.
"""
# This path is specified relative to this file because the relative location
# of the worker binary will remain the same when this function is executed
# from the Python package and from a Bazel test.
data_dir = os.path.join(
os.path.dirname(__file__), '..', '..', '..', '..', 'data')
binary_name = 'worker_binary'
binary_path = os.path.join(data_dir, binary_name)
if not os.path.isfile(binary_path):
logging.debug('Did not find a worker binary at: %s', binary_path)
compressed_path = os.path.join(data_dir, f'{binary_name}.xz')
try:
_decompress_file(compressed_path, binary_path)
logging.debug('Did not find a compressed worker binary at: %s',
compressed_path)
except FileNotFoundError as e:
raise RuntimeError(
f'Expected either a worker binary at {binary_path} or a compressed '
f'worker binary at {compressed_path}, found neither.') from e
else:
logging.debug('Found a worker binary at: %s', binary_path)
def start_process() -> tuple[subprocess.Popen[bytes], int]:
port = portpicker.pick_unused_port()
args = [
binary_path,
f'--port={port}',
f'--max_concurrent_computation_calls={max_concurrent_computation_calls}',
]
logging.debug('Starting TFF C++ server on port: %s', port)
return subprocess.Popen(args, stdout=sys.stdout, stderr=sys.stderr), port
class ServiceManager():
"""Class responsible for managing a local TFF executor service."""
def __init__(self):
self._stub = None
self._process = None
def __del__(self):
if isinstance(self._process, subprocess.Popen):
os.kill(self._process.pid, signal.SIGINT)
self._process.wait()
def get_stub(self) -> remote_executor_grpc_stub.RemoteExecutorGrpcStub:
"""Ensures a TFF service is running.
Returns stub representing this service.
This function ensures that the stub it returns is running, and managers
the state of the process hosting the TFF service. It additionally ensures
that it runs only one TFF service at a time.
Returns:
An TFF remote executor stub which is guaranteed to be running.
"""
if self._stub is not None:
if self._stub.is_ready:
return self._stub
# Stub is not ready; since we block below, this must imply that the
# service is down. Kill the process and restart below.
os.kill(self._process.pid, signal.SIGINT)
logging.debug('Waiting for existing processes to complete')
self._process.wait()
# Start a process and block til the associated stub is ready.
process, port = start_process()
target = f'localhost:{port}'
channel = grpc.insecure_channel(target, _GRPC_CHANNEL_OPTIONS)
stub = remote_executor_grpc_stub.RemoteExecutorGrpcStub(channel)
self._process = process
self._stub = stub
while not self._stub.is_ready:
time.sleep(_LOCALHOST_SERVER_WAIT_TIME_SEC)
logging.debug('TFF service manager sleeping; stub is not ready.')
return self._stub
service_manager = ServiceManager()
def stack_fn(cardinalities):
if cardinalities.get(placements.CLIENTS) is None:
cardinalities[placements.CLIENTS] = default_num_clients
stub = service_manager.get_stub()
ex = remote_executor.RemoteExecutor(stub)
ex.set_cardinalities(cardinalities)
return ex
factory = python_executor_stacks.ResourceManagingExecutorFactory(
executor_stack_fn=stack_fn)
return _make_basic_python_execution_context(
executor_fn=factory,
compiler_fn=compiler.desugar_and_transform_to_native,
asynchronous=False)
| {
"content_hash": "9141b9ee962e22f499465f643c9b722b",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 108,
"avg_line_length": 42.4065335753176,
"alnum_prop": 0.7102199777454421,
"repo_name": "tensorflow/federated",
"id": "52df5baa897a8524a28869f0a313f2e06dc3f7d8",
"size": "23965",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/core/backends/native/execution_contexts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.