text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Copyright (C) 2008-2010 Olivier Tilloy <olivier@tilloy.net>
#
# This file is part of the pyexiv2 distribution.
#
# pyexiv2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# pyexiv2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyexiv2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA.
#
# Author: Olivier Tilloy <olivier@tilloy.net>
#
# ******************************************************************************
import unittest
from pyexiv2.utils import Rational
class TestRational(unittest.TestCase):
def test_constructor(self):
r = Rational(2, 1)
self.assertEqual(r.numerator, 2)
self.assertEqual(r.denominator, 1)
self.assertRaises(ZeroDivisionError, Rational, 1, 0)
def test_read_only(self):
r = Rational(3, 4)
try:
r.numerator = 5
except AttributeError:
pass
else:
self.fail('Numerator is not read-only.')
try:
r.denominator = 5
except AttributeError:
pass
else:
self.fail('Denominator is not read-only.')
def test_match_string(self):
self.assertEqual(Rational.match_string('4/3'), (4, 3))
self.assertEqual(Rational.match_string('-4/3'), (-4, 3))
self.assertEqual(Rational.match_string('0/3'), (0, 3))
self.assertEqual(Rational.match_string('0/0'), (0, 0))
self.assertRaises(ValueError, Rational.match_string, '+3/5')
self.assertRaises(ValueError, Rational.match_string, '3 / 5')
self.assertRaises(ValueError, Rational.match_string, '3/-5')
self.assertRaises(ValueError, Rational.match_string, 'invalid')
def test_from_string(self):
self.assertEqual(Rational.from_string('4/3'), Rational(4, 3))
self.assertEqual(Rational.from_string('-4/3'), Rational(-4, 3))
self.assertRaises(ValueError, Rational.from_string, '+3/5')
self.assertRaises(ValueError, Rational.from_string, '3 / 5')
self.assertRaises(ValueError, Rational.from_string, '3/-5')
self.assertRaises(ValueError, Rational.from_string, 'invalid')
self.assertRaises(ZeroDivisionError, Rational.from_string, '1/0')
self.assertRaises(ZeroDivisionError, Rational.from_string, '0/0')
def test_to_string(self):
self.assertEqual(str(Rational(3, 5)), '3/5')
self.assertEqual(str(Rational(-3, 5)), '-3/5')
def test_repr(self):
self.assertEqual(repr(Rational(3, 5)), 'Rational(3, 5)')
self.assertEqual(repr(Rational(-3, 5)), 'Rational(-3, 5)')
self.assertEqual(repr(Rational(0, 3)), 'Rational(0, 3)')
def test_to_float(self):
self.assertEqual(Rational(3, 6).to_float(), 0.5)
self.assertEqual(Rational(11, 11).to_float(), 1.0)
self.assertEqual(Rational(-2, 8).to_float(), -0.25)
self.assertEqual(Rational(0, 3).to_float(), 0.0)
def test_equality(self):
r1 = Rational(2, 1)
r2 = Rational(2, 1)
r3 = Rational(8, 4)
r4 = Rational(3, 2)
self.assertEqual(r1, r2)
self.assertEqual(r1, r3)
self.assertNotEqual(r1, r4)
|
pridkett/pyexiv2
|
test/rational.py
|
Python
|
gpl-2.0
| 3,750
| 0
|
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
STEPS_TRAINED_COUNTER
from ray.rllib.utils.test_utils import framework_iterator
class TestDistributedExecution(unittest.TestCase):
"""General tests for the distributed execution API."""
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_exec_plan_stats(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s": 0,
"framework": fw,
})
result = trainer.train()
assert isinstance(result, dict)
assert "info" in result
assert "learner" in result["info"]
assert STEPS_SAMPLED_COUNTER in result["info"]
assert STEPS_TRAINED_COUNTER in result["info"]
assert "timers" in result
assert "learn_time_ms" in result["timers"]
assert "learn_throughput" in result["timers"]
assert "sample_time_ms" in result["timers"]
assert "sample_throughput" in result["timers"]
assert "update_time_ms" in result["timers"]
def test_exec_plan_save_restore(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s": 0,
"framework": fw,
})
res1 = trainer.train()
checkpoint = trainer.save()
for _ in range(2):
res2 = trainer.train()
assert res2["timesteps_total"] > res1["timesteps_total"], \
(res1, res2)
trainer.restore(checkpoint)
# Should restore the timesteps counter to the same as res2.
res3 = trainer.train()
assert res3["timesteps_total"] < res2["timesteps_total"], \
(res2, res3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
pcmoritz/ray-1
|
rllib/tests/test_exec_api.py
|
Python
|
apache-2.0
| 2,301
| 0
|
import ssl
import tempfile
import pytest
from ...utils.data import get_pkg_data_filename
from ..hub import SAMPHubServer
from ..integrated_client import SAMPIntegratedClient
from ..errors import SAMPProxyError
# By default, tests should not use the internet.
from .. import conf
from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY
def setup_module(module):
conf.use_internet = False
class TestStandardProfile:
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1,
**self.hub_init_kwargs)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1"}
self.metadata2 = {"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2"}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message['samp.mtype'] = "table.load.votable"
message['samp.params'] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification('table.load.votable',
rec2.receive_notification)
self.client2.bind_receive_call('table.load.votable',
rec2.receive_call)
self.client1.bind_receive_response('test-tag', rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id)
assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify(self.client2.get_public_id(),
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all({'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call
params = random_params(self.tmpdir)
self.client1.call(self.client2.get_public_id(), 'test-tag',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall(self.client2.get_public_id(), 'test-tag',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all('tag1',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall_all('tag2',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params}, timeout=5)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
result = self.client1.ecall_and_wait(self.client2.get_public_id(),
"table.load.votable", timeout=5, **params)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# TODO: check that receive_response received the right data
|
funbaker/astropy
|
astropy/samp/tests/test_standard_profile.py
|
Python
|
bsd-3-clause
| 8,591
| 0.001048
|
#!/usr/bin/python
#
# author:
#
# date:
# description:
#
'''Trains a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question a1ing: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs.
Time per epoch: 3s on CPU (core i7).
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import Activation, Dense, Merge, Permute, Dropout
from keras.layers import LSTM, SimpleRNN, Input
from keras.layers.core import Flatten
from keras.utils.data_utils import get_file
from functools import reduce
import tarfile
from data import get_stories, vectorize_stories
path = 'babi-tasks-v1-2.tar.gz'
#origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [a1]) for story, q, a1 in train_stories + test_stories)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, a1):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, a1s_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, a1s_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('a1s: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('a1s_train shape:', a1s_train.shape)
print('a1s_test shape:', a1s_test.shape)
print('-')
print('Compiling...')
print(inputs_train.shape)
print(queries_train.shape)
X = Input(shape=(story_maxlen,), dtype="int32")
Q = Input(shape=(query_maxlen,), dtype="int32")
embedding_dim = story_maxlen
# embed the input sequence into a sequence of vectors
m1 = Sequential()
m1.add(Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=story_maxlen)(X))
# output: (samples, story_maxlen, embedding_dim)
# embed the question into a sequence of vectors
u1 = Sequential()
u1.add(Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=query_maxlen)(Q))
# output: (samples, query_maxlen, embedding_dim)
# compute a 'w1' between input sequence elements (which are vectors)
# and the question vector sequence
w1 = Sequential()
w1.add(Merge([m1, u1], mode='dot', dot_axes=[2, 2]))
#w1.add(Activation('softmax'))
# output: (samples, story_maxlen, query_maxlen)
# embed the input into a single vector with size = story_maxlen:
c1 = Sequential()
c1.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen,
input_length=story_maxlen)(X))
# output: (samples, story_maxlen, query_maxlen)
# sum the w1 vector with the input vector:
o1 = Sequential()
o1.add(Merge([w1, c1], mode='sum'))
# output: (samples, story_maxlen, query_maxlen)
o1.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen)
#u2 = Sequential()
#u2.add(Merge([o1, u1], mode='sum'))
#m2 = Sequential()
#m2.add(Embedding(input_dim=vocab_size,
#output_dim=embedding_dim,
#input_length=story_maxlen))
#w2 = Sequential()
#w2.add(Merge([m2, u2], mode='dot', dot_axes=[2, 2]))
#c2 = Sequential()
#c2.add(Embedding(input_dim=vocab_size,
#output_dim=query_maxlen,
#input_length=story_maxlen))
#o2 = Sequential()
#o2.add(Merge([w2, c2], mode='sum'))
#o2.add(Permute((2, 1)))
# concatenate the w1 vector with the question vector,
# and do logistic regression on top
a1 = Sequential()
a1.add(Merge([o1, u1], mode='sum'))
a1.add(Flatten()) # why not in original format?
# one regularization layer -- more would probably be needed.
a1.add(Dense(vocab_size))
# we output a probability distribution over the vocabulary
a1.add(Activation('softmax'))
a1.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Note: you could use a Graph model to avoid repeat the input twice
a1.fit([inputs_train, queries_train], a1s_train,
batch_size=512,
nb_epoch=10,
validation_data=([inputs_test, queries_test], a1s_test))
from keras.utils.visualize_util import plot
if __name__ == "__main__" and False:
plot(a1, to_file='model.png')
json_model = a1.to_json()
with open("model.json", "w") as fh:
fh.write(json_model)
a1.save_weights("rnn_weights.h5")
|
Rene90/dl4nlp
|
hw6_babi_qa/babi2.py
|
Python
|
mit
| 6,138
| 0.006191
|
# encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class Heading2Client:
def main(self):
UI.OpenDialog(
VBox(
Heading("This Is a Heading."),
Label("This is a Label."),
PushButton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
Heading2Client().main()
|
yast/yast-python-bindings
|
examples/Heading2.py
|
Python
|
gpl-2.0
| 361
| 0.01662
|
# -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import time
from ... import hoster
@hoster.host
class this:
model = hoster.HttpPremiumHoster
name = 'junocloud.me'
patterns = [
hoster.Matcher('https?', '*.junocloud.me', '!/<id>'),
]
max_filesize_free = hoster.GB(2)
max_filesize_premium = hoster.GB(2)
url_template = 'http://junocloud.me/{id}'
login_url = 'http://junocloud.me/login.html'
account_url = 'http://junocloud.me/account.html'
def boot_account(account):
account.set_user_agent()
account.cookies["lang"] = "english"
if account.username is None:
return
data = {
'op': 'login',
'redirect': this.account_url,
'login': account.username,
'password': account.password,
'loginFormSubmit': 'Login',
}
resp = account.post(this.login_url, data=data)
if resp.url != this.account_url:
account.login_failed()
return
return resp
def on_initialize_account(account):
resp = boot_account(account)
if resp:
status = resp.soup.find('div', text=lambda a: 'Status:' in a if a else False).find_next('div').find('strong').text.strip()
if status != 'Premium':
account.premium = False
return
raise NotImplementedError('premium is not implemented')
def check_errors(ctx, resp):
if 'The origin web server timed out responding to this request.' in resp.text:
ctx.maintenance(180)
h1 = resp.soup.find('h1')
if h1:
if 'File Not Found' in h1.text or '404 Not Found' in h1.text:
ctx.set_offline()
def on_check_http(file, resp):
check_errors(file, resp)
name = resp.soup.find('input', attrs={'name': 'fname'}).get('value').strip()
size = resp.soup.find('p', 'request_filesize').text.strip().split(' ', 1)[1].strip()
file.set_infos(name=name, size=size)
def on_download_premium(chunk):
raise NotImplementedError('premium is untested')
def on_download_free(chunk):
resp = chunk.account.get(chunk.url, use_cache=True)
check_errors(chunk, resp)
resp = hoster.xfilesharing_download(resp, 1)[0]()
check_errors(chunk, resp)
m = re.search('You have to wait (.*?) till next download', resp.text)
if m:
wait = hoster.parse_seconds2(m.group(1)) + time.time()
if wait > 300:
chunk.ip_blocked(wait)
submit, data = hoster.xfilesharing_download(resp, 2)
wait = resp.soup.find('span', id='uglrto')
if wait:
wait = int(wait.text.strip().rplit(' ', 1)[1]) + time.time()
for result, challenge in chunk.solve_captcha('recaptcha', parse=resp.text, retries=5):
data['recaptcha_challenge_field'] = challenge
data['recaptcha_response_field'] = result
if wait and wait - time.time() > 0:
chunk.wait(wait - time.time())
resp = submit(allow_redirects=False)
if resp.status_code == 302:
return resp.headers['Location']
check_errors(chunk, resp)
|
bdacode/hoster
|
hoster/junocloud_me.py
|
Python
|
gpl-3.0
| 3,651
| 0.003561
|
""" Exit Status 1 is already used in the script.
Zdd returns with exit status 1 when app is not force
deleted either through argument or through prompt.
Exit Status 2 is used for Unknown Exceptions.
"""
class InvalidArgException(Exception):
""" This exception indicates invalid combination of arguments
passed to zdd"""
def __init__(self, msg):
super(InvalidArgException, self).__init__(msg)
self.error = msg
self.zdd_exit_status = 3
class MissingFieldException(Exception):
""" This exception indicates required fields which are missing
in JSON payload passed to zdd"""
def __init__(self, msg, field):
super(MissingFieldException, self).__init__(msg)
self.error = msg
self.missing_field = field
self.zdd_exit_status = 4
class MarathonLbEndpointException(Exception):
""" This excaption indicates issue with one of the marathonlb
endpoints specified as argument to Zdd"""
def __init__(self, msg, url, error):
super(MarathonLbEndpointException, self).__init__(msg)
self.msg = msg
self.url = url
self.error = error
self.zdd_exit_status = 5
class MarathonEndpointException(Exception):
""" This excaption indicates issue with marathon endpoint
specified as argument to Zdd"""
def __init__(self, msg, url, error):
super(MarathonEndpointException, self).__init__(msg)
self.msg = msg
self.url = url
self.error = error
self.zdd_exit_status = 6
class AppCreateException(Exception):
""" This exception indicates there was a error while creating the
new App and hence it was not created."""
def __init__(self, msg, url, payload, error):
super(AppCreateException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.payload = payload
self.zdd_exit_status = 7
class AppDeleteException(Exception):
""" This exception indicates there was a error while deleting the
old App and hence it was not deleted """
def __init__(self, msg, url, appid, error):
super(AppDeleteException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.zdd_exit_status = 8
class AppScaleException(Exception):
""" This exception indicated there was a error while either scaling up
new app or while scaling down old app"""
def __init__(self, msg, url, payload, error):
super(AppScaleException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.payload = payload
self.zdd_exit_status = 9
|
matt-deboer/marathon-lb
|
zdd_exceptions.py
|
Python
|
apache-2.0
| 2,721
| 0
|
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.utils.encoding import force_text
from django.contrib import messages
from microsofttranslator import Translator, TranslateApiException
from autotranslate.conf import settings as autotranslate_settings
from polib import pofile
from autotranslate.poutil import find_pos, pagination_range, timestamp_with_timezone
from autotranslate.signals import entry_changed, post_save
from autotranslate.storage import get_storage
from autotranslate.access import can_translate, can_translate_language
import json
import re
import autotranslate
import unicodedata
import hashlib
import os
import six
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
def _request_request(key, default=None):
if key in request.GET:
return request.GET.get(key)
elif key in request.POST:
return request.POST.get(key)
return default
storage = get_storage(request)
query = ''
if storage.has('autotranslate_i18n_fn'):
autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn')
autotranslate_i18n_app = get_app_name(autotranslate_i18n_fn)
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code')
autotranslate_i18n_lang_bidi = autotranslate_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
autotranslate_i18n_write = storage.get('autotranslate_i18n_write', True)
if autotranslate_i18n_write:
autotranslate_i18n_pofile = pofile(autotranslate_i18n_fn, wrapwidth=autotranslate_settings.POFILE_WRAP_WIDTH)
for entry in autotranslate_i18n_pofile:
entry.md5hash = hashlib.md5(
(six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or "")).encode('utf8')
).hexdigest()
else:
autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
storage.set('autotranslate_i18n_filter', filter_)
return HttpResponseRedirect(reverse('autotranslate-home'))
autotranslate_i18n_filter = storage.get('autotranslate_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = six.text_type(rx_plural.match(key).groups()[1])
# Above no longer true as of Polib 1.0.4
if plural_id and plural_id.isdigit():
plural_id = int(plural_id)
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = autotranslate_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
plural_string = fix_nls(entry.msgid_plural, value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=autotranslate_i18n_fn,
language_code=autotranslate_i18n_lang_code,
)
else:
storage.set('autotranslate_last_save_error', True)
if file_change and autotranslate_i18n_write:
try:
autotranslate_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (
getattr(request.user, 'first_name', 'Anonymous'),
getattr(request.user, 'last_name', 'User'),
getattr(request.user, 'email', 'anonymous@user.tld')
)).encode('ascii', 'ignore')
autotranslate_i18n_pofile.metadata['X-Translated-Using'] = u"dj-translate %s" % autotranslate.get_version(False)
autotranslate_i18n_pofile.metadata['PO-Revision-Date'] = timestamp_with_timezone()
except UnicodeDecodeError:
pass
try:
autotranslate_i18n_pofile.save()
po_filepath, ext = os.path.splitext(autotranslate_i18n_fn)
if autotranslate_settings.AUTO_COMPILE:
save_as_mo_filepath = po_filepath + '.mo'
autotranslate_i18n_pofile.save_as_mofile(save_as_mo_filepath)
post_save.send(sender=None, language_code=autotranslate_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if autotranslate_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if autotranslate_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except Exception as e:
messages.error(request, e)
storage.set('autotranslate_i18n_write', False)
storage.set('autotranslate_i18n_pofile', autotranslate_i18n_pofile)
# Retain query arguments
query_arg = '?_next=1'
if _request_request('query', False):
query_arg += '&query=%s' % _request_request('query')
if 'page' in request.GET:
query_arg += '&page=%d&_next=1' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('autotranslate-home') + iri_to_uri(query_arg))
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code')
if _request_request('query', False) and _request_request('query', '').strip():
query = _request_request('query', '').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile if not e_.obsolete and rx.search(six.text_type(e_.msgstr) + six.text_type(e_.msgid) + u''.join([o[0] for o in e_.occurrences]))], autotranslate_settings.MESSAGES_PER_PAGE)
else:
if autotranslate_i18n_filter == 'untranslated':
paginator = Paginator(autotranslate_i18n_pofile.untranslated_entries(), autotranslate_settings.MESSAGES_PER_PAGE)
elif autotranslate_i18n_filter == 'translated':
paginator = Paginator(autotranslate_i18n_pofile.translated_entries(), autotranslate_settings.MESSAGES_PER_PAGE)
elif autotranslate_i18n_filter == 'fuzzy':
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile.fuzzy_entries() if not e_.obsolete], autotranslate_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e_ for e_ in autotranslate_i18n_pofile if not e_.obsolete], autotranslate_settings.MESSAGES_PER_PAGE)
if autotranslate_settings.ENABLE_REFLANG:
ref_lang = storage.get('autotranslate_i18n_ref_lang_code', 'msgid')
ref_pofile = None
if ref_lang != 'msgid':
ref_fn = re.sub('/locale/[a-z]{2}/', '/locale/%s/' % ref_lang, autotranslate_i18n_fn)
try:
ref_pofile = pofile(ref_fn)
except IOError:
# there's a syntax error in the PO file and polib can't open it. Let's just
# do nothing and thus display msgids.
pass
for o in paginator.object_list:
# default
o.ref_txt = o.msgid
if ref_pofile is not None:
ref_entry = ref_pofile.find(o.msgid)
if ref_entry is not None and ref_entry.msgstr:
o.ref_txt = ref_entry.msgstr
LANGUAGES = list(settings.LANGUAGES) + [('msgid', 'MSGID')]
else:
ref_lang = None
LANGUAGES = settings.LANGUAGES
page = 1
if 'page' in request.GET:
try:
get_page = int(request.GET.get('page'))
except ValueError:
page = 1 # fall back to page 1
else:
if 0 < get_page <= paginator.num_pages:
page = get_page
if '_next' in request.GET or '_next' in request.POST:
page += 1
if page > paginator.num_pages:
page = 1
query_arg = '?page=%d' % page
return HttpResponseRedirect(reverse('autotranslate-home') + iri_to_uri(query_arg))
autotranslate_messages = paginator.page(page).object_list
main_language = None
if autotranslate_settings.MAIN_LANGUAGE and autotranslate_settings.MAIN_LANGUAGE != autotranslate_i18n_lang_code:
for language in settings.LANGUAGES:
if language[0] == autotranslate_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % autotranslate_settings.MAIN_LANGUAGE).join(autotranslate_i18n_fn.split("/%s/" % autotranslate_i18n_lang_code))
po = pofile(fl)
for message in autotranslate_messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/admin/'
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
ADMIN_IMAGE_DIR = ADMIN_MEDIA_PREFIX + 'img/'
if storage.has('autotranslate_last_save_error'):
storage.delete('autotranslate_last_save_error')
autotranslate_last_save_error = True
else:
autotranslate_last_save_error = False
try:
autotranslate_i18n_lang_name = force_text(_(storage.get('autotranslate_i18n_lang_name')))
except:
autotranslate_i18n_lang_name = force_text(storage.get('autotranslate_i18n_lang_name'))
return render(request, 'autotranslate/pofile.html', dict(
version=autotranslate.get_version(True),
ADMIN_MEDIA_PREFIX=ADMIN_MEDIA_PREFIX,
ADMIN_IMAGE_DIR=ADMIN_IMAGE_DIR,
ENABLE_REFLANG=autotranslate_settings.ENABLE_REFLANG,
LANGUAGES=LANGUAGES,
autotranslate_settings=autotranslate_settings,
autotranslate_i18n_lang_name=autotranslate_i18n_lang_name,
autotranslate_i18n_lang_code=autotranslate_i18n_lang_code,
autotranslate_i18n_lang_bidi=autotranslate_i18n_lang_bidi,
autotranslate_last_save_error=autotranslate_last_save_error,
autotranslate_i18n_filter=autotranslate_i18n_filter,
autotranslate_i18n_write=autotranslate_i18n_write,
autotranslate_messages=autotranslate_messages,
page_range=needs_pagination and page_range,
needs_pagination=needs_pagination,
main_language=main_language,
autotranslate_i18n_app=autotranslate_i18n_app,
page=page,
query=query,
paginator=paginator,
autotranslate_i18n_pofile=autotranslate_i18n_pofile,
ref_lang=ref_lang,
))
else:
return list_languages(request, do_session_warn=True)
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def download_file(request):
import zipfile
storage = get_storage(request)
# original filename
autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn', None)
# in-session modified catalog
autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile', None)
# language code
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code', None)
if not autotranslate_i18n_lang_code or not autotranslate_i18n_pofile or not autotranslate_i18n_fn:
return HttpResponseRedirect(reverse('autotranslate-home'))
try:
if len(autotranslate_i18n_fn.split('/')) >= 5:
offered_fn = '_'.join(autotranslate_i18n_fn.split('/')[-5:])
else:
offered_fn = autotranslate_i18n_fn.split('/')[-1]
po_fn = str(autotranslate_i18n_fn.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = six.BytesIO()
zipf = zipfile.ZipFile(zipdata, mode="w")
zipf.writestr(po_fn, six.text_type(autotranslate_i18n_pofile).encode("utf8"))
zipf.writestr(mo_fn, autotranslate_i18n_pofile.to_binary())
zipf.close()
zipdata.seek(0)
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, autotranslate_i18n_lang_code)
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
return HttpResponseRedirect(reverse('autotranslate-home'))
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def list_languages(request, do_session_warn=False):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
storage = get_storage(request)
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
storage.set('autotranslate_i18n_catalog_filter', filter_)
return HttpResponseRedirect(reverse('autotranslate-pick-file'))
autotranslate_i18n_catalog_filter = storage.get('autotranslate_i18n_catalog_filter', 'project')
third_party_apps = autotranslate_i18n_catalog_filter in ('all', 'third-party')
django_apps = autotranslate_i18n_catalog_filter in ('all', 'django')
project_apps = autotranslate_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
if not can_translate_language(request.user, language[0]):
continue
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(
language[0],
_(language[1]),
sorted([(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos], key=lambda app: app[0]),
)
)
try:
ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX
except AttributeError:
ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'
do_session_warn = do_session_warn and 'SessionAutotranslateStorage' in autotranslate_settings.STORAGE_CLASS and 'signed_cookies' in settings.SESSION_ENGINE
return render(request, 'autotranslate/languages.html', dict(
version=autotranslate.get_version(True),
ADMIN_MEDIA_PREFIX=ADMIN_MEDIA_PREFIX,
do_session_warn=do_session_warn,
languages=languages,
has_pos=has_pos,
autotranslate_i18n_catalog_filter=autotranslate_i18n_catalog_filter
))
def get_app_name(path):
app = path.split("/locale")[0].split("/")[-1]
return app
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
storage = get_storage(request)
if langid not in [l[0] for l in settings.LANGUAGES] or not can_translate_language(request.user, langid):
raise Http404
else:
autotranslate_i18n_catalog_filter = storage.get('autotranslate_i18n_catalog_filter', 'project')
third_party_apps = autotranslate_i18n_catalog_filter in ('all', 'third-party')
django_apps = autotranslate_i18n_catalog_filter in ('all', 'django')
project_apps = autotranslate_i18n_catalog_filter in ('all', 'project')
file_ = sorted(find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps), key=get_app_name)[int(idx)]
storage.set('autotranslate_i18n_lang_code', langid)
storage.set('autotranslate_i18n_lang_name', six.text_type([l[1] for l in settings.LANGUAGES if l[0] == langid][0]))
storage.set('autotranslate_i18n_fn', file_)
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.new(
'md5',
(six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or "")).encode('utf8')
).hexdigest()
storage.set('autotranslate_i18n_pofile', po)
try:
os.utime(file_, None)
storage.set('autotranslate_i18n_write', True)
except OSError:
storage.set('autotranslate_i18n_write', False)
return HttpResponseRedirect(reverse('autotranslate-home'))
def ref_sel(request, langid):
storage = get_storage(request)
ALLOWED_LANGUAGES = [l[0] for l in settings.LANGUAGES] + ['msgid']
if langid not in ALLOWED_LANGUAGES:
raise Http404
storage.set('autotranslate_i18n_ref_lang_code', langid)
return HttpResponseRedirect(reverse('autotranslate-home'))
ref_sel = never_cache(ref_sel)
ref_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(ref_sel)
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def translate_text(request):
language_from = request.GET.get('from', None)
language_to = request.GET.get('to', None)
text = request.GET.get('text', None)
if language_from == language_to:
data = {'success': True, 'translation': text}
else:
# run the translation:
AZURE_CLIENT_ID = getattr(settings, 'AZURE_CLIENT_ID', None)
AZURE_CLIENT_SECRET = getattr(settings, 'AZURE_CLIENT_SECRET', None)
translator = Translator(AZURE_CLIENT_ID, AZURE_CLIENT_SECRET)
try:
translated_text = translator.translate(text, language_to, language_from)
data = {'success': True, 'translation': translated_text}
except TranslateApiException as e:
data = {'success': False, 'error': "Translation API Exception: {0}".format(e.message)}
return HttpResponse(json.dumps(data), content_type='application/json')
|
dadasoz/dj-translate
|
autotranslate/views.py
|
Python
|
mit
| 22,363
| 0.002862
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
|
sjperkins/tensorflow
|
tensorflow/contrib/__init__.py
|
Python
|
apache-2.0
| 3,181
| 0
|
import json
class AbstractionUtility(object):
@staticmethod
def read_json(json_file):
# read json data
with open(json_file, 'r') as f:
data = json.load(f)
# change json key string to int
converted_data = {}
for key, value in data.iteritems():
converted_data[int(key)] = value
return converted_data
@staticmethod
def write_perabstraction(final_abstraction, log_file, perabstraction_file):
# read log file
with open(log_file, 'r') as f:
logs = f.readlines()
# write logs per abstraction to file
f_perabstraction = open(perabstraction_file, 'w')
for abstraction_id, abstraction in final_abstraction.iteritems():
f_perabstraction.write('Abstraction #' + str(abstraction_id) + ' ' + abstraction['abstraction'] + '\n')
for line_id in abstraction['original_id']:
f_perabstraction.write(str(line_id) + ' ' + logs[line_id])
f_perabstraction.write('\n')
f_perabstraction.close()
@staticmethod
def write_perline(final_abstraction, log_file, perline_file):
# read log file
with open(log_file, 'r') as f:
logs = f.readlines()
# get line id and abstraction id
abstraction_label = {}
for abstraction_id, abstraction in final_abstraction.iteritems():
for line_id in abstraction['original_id']:
abstraction_label[line_id] = abstraction_id
# write log per line with abstraction id
f_perline = open(perline_file, 'w')
for line_id, log in enumerate(logs):
f_perline.write(str(abstraction_label[line_id]) + '; ' + log)
f_perline.close()
@staticmethod
def get_abstractionid_from_groundtruth(logid_abstractionid_file, abstractions):
# read ground truth
abstraction_groundtruth = AbstractionUtility.read_json(logid_abstractionid_file)
groundtruth_length = len(abstraction_groundtruth.keys())
abstractions_edited_id = {}
for abstraction_id, abstraction in abstractions.iteritems():
# if abstraction exist in ground truth, get id from dictionary key
if abstraction['abstraction'] in abstraction_groundtruth.values():
new_id = \
abstraction_groundtruth.keys()[abstraction_groundtruth.values().index(abstraction['abstraction'])]
# if not exist, new id is dictionary length + 1
else:
new_id = groundtruth_length
groundtruth_length += 1
abstractions_edited_id[new_id] = abstraction
return abstractions_edited_id
|
studiawan/pygraphc
|
pygraphc/abstraction/AbstractionUtility.py
|
Python
|
mit
| 2,712
| 0.001475
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_travel_time_variable_for_non_interaction_dataset import abstract_travel_time_variable_for_non_interaction_dataset
class SSS_travel_time_to_DDD(abstract_travel_time_variable_for_non_interaction_dataset):
"""Travel time by mode SSS to the zone whose ID is the DDD.
"""
default_value = 999
origin_zone_id = 'zone.zone_id'
def __init__(self, mode, number):
self.travel_data_attribute = "travel_data.%s" % mode
self.destination_zone_id = "destination_zone_id=%s+0*zone.zone_id" % number
abstract_travel_time_variable_for_non_interaction_dataset.__init__(self)
from opus_core.tests import opus_unittest
from numpy import array, arange
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def do(self,sss, ddd, should_be):
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"zone":{
"zone_id":array([1,3])},
"travel_data":{
"from_zone_id":array([3,3,1,1]),
"to_zone_id":array([1,3,1,3]),
sss:array([1.1, 2.2, 3.3, 4.4])}
}
)
instance_name = "sanfrancisco.zone.%s_travel_time_to_%s" % (sss, ddd)
tester.test_is_close_for_family_variable(self, should_be, instance_name)
def test_to_1(self):
should_be = array([3.3, 1.1])
self.do('hwy', 1, should_be)
def test_to_3(self):
should_be = array([4.4, 2.2])
self.do('bart', 3, should_be)
if __name__=='__main__':
opus_unittest.main()
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/sanfrancisco/zone/SSS_travel_time_to_DDD.py
|
Python
|
gpl-2.0
| 1,871
| 0.017103
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_equality
----------------------------------
Tests for the `SetType` low() method
"""
import unittest
from finitio.types import SetType, BuiltinType, Type
builtin_string = BuiltinType(str)
class TestSetTypeLow(unittest.TestCase):
class HighType(Type):
def low(self):
return builtin_string
subject = SetType(HighType(""))
def test_equals_itself(self):
expected = SetType(builtin_string)
self.assertEqual(self.subject.low(), expected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
llambeau/finitio.py
|
tests/type/set_type/test_low.py
|
Python
|
isc
| 619
| 0.001616
|
from djpcms import sites
if sites.settings.CMS_ORM == 'django':
from djpcms.core.cmsmodels._django import *
elif sites.settings.CMS_ORM == 'stdnet':
from djpcms.core.cmsmodels._stdnet import *
else:
raise NotImplementedError('Objecr Relational Mapper {0} not available for CMS models'.format(sites.settings.CMS_ORM))
|
strogo/djpcms
|
djpcms/models.py
|
Python
|
bsd-3-clause
| 354
| 0.019774
|
import time
import arcpy
from arcpy import env
from arcpy.sa import *
# Set environment settings
env.workspace = "" # set your workspace
arcpy.env.overwriteOutput = True
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
tic = time.clock()
a_file = "random_a.tif"
b_file = "random_b.tif"
c_file = "random_c.tif"
out_file = "output.tif"
a = Raster(a_file)
b = Raster(b_file)
c = Raster(c_file)
out = 3 * a + b * c
out.save(out_file)
|
ahhz/raster
|
benchmarks/benchmark_3_layers_arcpy.py
|
Python
|
mit
| 478
| 0.004184
|
# -*- coding: utf-8 -*-
"""
pygments.styles.vs
~~~~~~~~~~~~~~~~~~
Simple style with MS Visual Studio colors.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class VisualStudioStyle(Style):
background_color = "#ffffff"
default_style = ""
styles = {
Comment: "#008000",
Comment.Preproc: "#0000ff",
Keyword: "#0000ff",
Operator.Word: "#0000ff",
Keyword.Type: "#2b91af",
Name.Class: "#2b91af",
String: "#a31515",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
emineKoc/WiseWit
|
wisewit_front_end/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/vs.py
|
Python
|
gpl-3.0
| 1,073
| 0
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.basic.flow_action\
import FlowAction
import unittest
class FlowActionTest(unittest.TestCase):
Type = "FlowActionOutput"
def setUp(self):
self.target = FlowAction(self.Type)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.TYPE], self.Type)
def test_type(self):
self.assertEqual(self.target.type, self.Type)
if __name__ == '__main__':
unittest.main()
|
haizawa/odenos
|
src/test/python/org/o3project/odenos/core/component/network/flow/basic/test_flow_action.py
|
Python
|
apache-2.0
| 1,561
| 0.001281
|
# -*- coding: utf-8 -*-
# © 2013-Today Odoo SA
# © 2016 Chafique DELLI @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, _, fields
from openerp.exceptions import Warning as UserError
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
invoice_method = fields.Selection(
selection_add=[('intercompany', 'Based on intercompany invoice')])
@api.multi
def wkf_confirm_order(self):
""" Generate inter company sale order base on conditions."""
res = super(PurchaseOrder, self).wkf_confirm_order()
for purchase_order in self:
# get the company from partner then trigger action of
# intercompany relation
dest_company = self.env['res.company']._find_company_from_partner(
purchase_order.partner_id.id)
if dest_company:
purchase_order.sudo().\
with_context(force_company=dest_company.id).\
_inter_company_create_sale_order(dest_company.id)
return res
@api.multi
def _get_user_domain(self, dest_company):
self.ensure_one()
group_purchase_user = self.env.ref('purchase.group_purchase_user')
return [
('id', '!=', 1),
('company_id', '=', dest_company.id),
('id', 'in', group_purchase_user.users.ids),
]
@api.multi
def _check_intercompany_product(self, dest_company):
domain = self._get_user_domain(dest_company)
dest_user = self.env['res.users'].search(domain, limit=1)
if dest_user:
for purchase_line in self.order_line:
try:
purchase_line.product_id.sudo(dest_user).read(
['default_code'])
except:
raise UserError(_(
"You cannot create SO from PO because product '%s' "
"is not intercompany") % purchase_line.product_id.name)
@api.multi
def _inter_company_create_sale_order(self, dest_company_id):
""" Create a Sale Order from the current PO (self)
Note : In this method, should be call in sudo with the propert
destination company in the context
:param company : the company of the created PO
:rtype company : res.company record
"""
self.ensure_one()
dest_company = self.env['res.company'].browse(dest_company_id)
# check intercompany product
self._check_intercompany_product(dest_company)
# Accessing to selling partner with selling user, so data like
# property_account_position can be retrieved
company_partner = self.company_id.partner_id
# check pricelist currency should be same with PO/SO document
if self.pricelist_id.currency_id.id != (
company_partner.property_product_pricelist.currency_id.id):
raise UserError(_(
'You cannot create SO from PO because '
'sale price list currency is different from '
'purchase price list currency.'))
# create the SO and generate its lines from the PO lines
sale_order_data = self._prepare_sale_order_data(
self.name, company_partner, dest_company,
self.dest_address_id and self.dest_address_id.id or False)
sale_order = self.env['sale.order'].create(sale_order_data)
for purchase_line in self.order_line:
sale_line_data = self._prepare_sale_order_line_data(
purchase_line, dest_company, sale_order)
self.env['sale.order.line'].create(sale_line_data)
# write supplier reference field on PO
if not self.partner_ref:
self.partner_ref = sale_order.name
# write invoice method field on PO
if self.invoice_method != 'intercompany':
self.invoice_method = 'intercompany'
# Validation of sale order
if dest_company.sale_auto_validation:
sale_order.signal_workflow('order_confirm')
@api.multi
def _prepare_sale_order_data(self, name, partner, dest_company,
direct_delivery_address):
""" Generate the Sale Order values from the PO
:param name : the origin client reference
:rtype name : string
:param partner : the partner reprenseting the company
:rtype partner : res.partner record
:param company : the company of the created SO
:rtype company : res.company record
:param direct_delivery_address : the address of the SO
:rtype direct_delivery_address : res.partner record
"""
self.ensure_one()
partner_addr = partner.address_get(['default',
'invoice',
'delivery',
'contact'])
# find location and warehouse, pick warehouse from company object
warehouse = (
dest_company.warehouse_id and
dest_company.warehouse_id.company_id.id == dest_company.id and
dest_company.warehouse_id or False)
if not warehouse:
raise UserError(_(
'Configure correct warehouse for company (%s) in '
'Menu: Settings/companies/companies' % (dest_company.name)))
partner_shipping_id = (
self.picking_type_id.warehouse_id and
self.picking_type_id.warehouse_id.partner_id and
self.picking_type_id.warehouse_id.partner_id.id or False)
return {
'name': (
self.env['ir.sequence'].next_by_code('sale.order') or '/'
),
'company_id': dest_company.id,
'client_order_ref': name,
'partner_id': partner.id,
'warehouse_id': warehouse.id,
'pricelist_id': partner.property_product_pricelist.id,
'partner_invoice_id': partner_addr['invoice'],
'date_order': self.date_order,
'fiscal_position': (partner.property_account_position and
partner.property_account_position.id or False),
'user_id': False,
'auto_purchase_order_id': self.id,
'partner_shipping_id': (direct_delivery_address or
partner_shipping_id or
partner_addr['delivery']),
'note': self.notes
}
@api.model
def _prepare_sale_order_line_data(
self, purchase_line, dest_company, sale_order):
""" Generate the Sale Order Line values from the PO line
:param line : the origin Purchase Order Line
:rtype line : purchase.order.line record
:param company : the company of the created SO
:rtype company : res.company record
:param sale_order : the Sale Order
"""
context = self._context.copy()
context['company_id'] = dest_company.id
# get sale line data from product onchange
sale_line_obj = self.env['sale.order.line'].browse(False)
sale_line_data = sale_line_obj.with_context(
context).product_id_change_with_wh(
pricelist=sale_order.pricelist_id.id,
product=(purchase_line.product_id and
purchase_line.product_id.id or False),
qty=purchase_line.product_qty,
uom=(purchase_line.product_id and
purchase_line.product_id.uom_id.id or False),
qty_uos=0,
uos=False,
name='',
partner_id=sale_order.partner_id.id,
lang=False,
update_tax=True,
date_order=sale_order.date_order,
packaging=False,
fiscal_position=sale_order.fiscal_position.id,
flag=False,
warehouse_id=sale_order.warehouse_id.id)
sale_line_data['value']['product_id'] = (
purchase_line.product_id and purchase_line.product_id.id or
False)
sale_line_data['value']['order_id'] = sale_order.id
sale_line_data['value']['delay'] = (purchase_line.product_id and
purchase_line.product_id.
sale_delay or 0.0)
sale_line_data['value']['company_id'] = dest_company.id
sale_line_data['value']['product_uom_qty'] = (purchase_line.
product_qty)
sale_line_data['value']['product_uom'] = (
purchase_line.product_id and
purchase_line.product_id.uom_id.id or
purchase_line.product_uom.id)
if sale_line_data['value'].get('tax_id'):
sale_line_data['value']['tax_id'] = ([
[6, 0, sale_line_data['value']['tax_id']]])
sale_line_data['value']['auto_purchase_line_id'] = purchase_line.id
return sale_line_data['value']
@api.multi
def action_cancel(self):
for purchase in self:
for sale_order in self.env['sale.order'].sudo().search([
('auto_purchase_order_id', '=', purchase.id)]):
sale_order.action_cancel()
res = super(PurchaseOrder, purchase).action_cancel()
if purchase.invoice_method == 'intercompany':
purchase.invoice_method = 'order'
if purchase.partner_ref:
purchase.partner_ref = ''
return res
|
acsone/multi-company
|
purchase_sale_inter_company/models/purchase_order.py
|
Python
|
agpl-3.0
| 9,730
| 0.000103
|
#from: http://stackoverflow.com/questions/10361820/simple-twisted-echo-client
#and
#from: http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
from twisted.internet.threads import deferToThread as _deferToThread
from twisted.internet import reactor
class ConsoleInput(object):
def __init__(self, stopFunction, reconnectFunction):
self.stopFunction = stopFunction
self.reconnectFunction = reconnectFunction
def start(self):
self.terminator = 'q'
self.restart = 'r'
self.getKey = _Getch()
self.startReceiving()
def startReceiving(self, s = ''):
if s == self.terminator:
self.stopFunction()
elif s == self.restart:
self.reconnectFunction()
_deferToThread(self.getKey).addCallback(self.startReceiving)
else:
_deferToThread(self.getKey).addCallback(self.startReceiving)
class _Getch:
"""
Gets a single character from standard input. Does not echo to the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
|
tpainter/df_everywhere
|
df_everywhere/util/consoleInput.py
|
Python
|
gpl-2.0
| 1,794
| 0.007246
|
# -*- coding: utf-8 -*-
import mock
from rest_framework import serializers
from waffle.testutils import override_switch
from olympia.amo.tests import (
BaseTestCase, addon_factory, collection_factory, TestCase, user_factory)
from olympia.bandwagon.models import CollectionAddon
from olympia.bandwagon.serializers import (
CollectionAddonSerializer, CollectionAkismetSpamValidator,
CollectionSerializer, CollectionWithAddonsSerializer)
from olympia.lib.akismet.models import AkismetReport
class TestCollectionAkismetSpamValidator(TestCase):
def setUp(self):
self.validator = CollectionAkismetSpamValidator(
('name', 'description'))
serializer = mock.Mock()
serializer.instance = collection_factory(
name='name', description='Big Cheese')
request = mock.Mock()
request.user = user_factory()
request.META = {}
serializer.context = {'request': request}
self.validator.set_context(serializer)
self.data = {
'name': {'en-US': 'Collection', 'fr': u'Collection'},
'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'},
'random_data': {'en-US': 'to ignore'},
'slug': 'cheese'}
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_waffle_off(self, comment_check_mock):
self.validator(self.data)
# No Akismet checks
assert AkismetReport.objects.count() == 0
comment_check_mock.assert_not_called()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_spam(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
with self.assertRaises(serializers.ValidationError):
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
# After the first comment_check was spam, additinal ones are skipped.
assert comment_check_mock.call_count == 1
class TestCollectionSerializer(BaseTestCase):
serializer = CollectionSerializer
def setUp(self):
super(TestCollectionSerializer, self).setUp()
self.user = user_factory()
self.collection = collection_factory()
self.collection.update(author=self.user)
def serialize(self):
return self.serializer(self.collection).data
def test_basic(self):
data = self.serialize()
assert data['id'] == self.collection.id
assert data['uuid'] == self.collection.uuid
assert data['name'] == {'en-US': self.collection.name}
assert data['description'] == {'en-US': self.collection.description}
assert data['url'] == self.collection.get_abs_url()
assert data['addon_count'] == self.collection.addon_count
assert data['modified'] == (
self.collection.modified.replace(microsecond=0).isoformat() + 'Z')
assert data['author']['id'] == self.user.id
assert data['slug'] == self.collection.slug
assert data['public'] == self.collection.listed
assert data['default_locale'] == self.collection.default_locale
class TestCollectionAddonSerializer(BaseTestCase):
def setUp(self):
self.collection = collection_factory()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.item = CollectionAddon.objects.get(addon=self.addon,
collection=self.collection)
self.item.comments = u'Dis is nice'
self.item.save()
def serialize(self):
return CollectionAddonSerializer(self.item).data
def test_basic(self):
data = self.serialize()
assert data['addon']['id'] == self.collection.addons.all()[0].id
assert data['notes'] == {'en-US': self.item.comments}
class TestCollectionWithAddonsSerializer(TestCollectionSerializer):
serializer = CollectionWithAddonsSerializer
def setUp(self):
super(TestCollectionWithAddonsSerializer, self).setUp()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
def serialize(self):
mock_viewset = mock.MagicMock()
collection_addons = CollectionAddon.objects.filter(
addon=self.addon, collection=self.collection)
mock_viewset.get_addons_queryset.return_value = collection_addons
return self.serializer(
self.collection, context={'view': mock_viewset}).data
def test_basic(self):
super(TestCollectionWithAddonsSerializer, self).test_basic()
collection_addon = CollectionAddon.objects.get(
addon=self.addon, collection=self.collection)
data = self.serialize()
assert data['addons'] == [
CollectionAddonSerializer(collection_addon).data
]
assert data['addons'][0]['addon']['id'] == self.addon.id
|
atiqueahmedziad/addons-server
|
src/olympia/bandwagon/tests/test_serializers.py
|
Python
|
bsd-3-clause
| 6,482
| 0
|
from django.conf.urls import include, url
urlpatterns = [
url(r'^avatar/', include('avatar.urls')),
]
|
MachineandMagic/django-avatar
|
tests/urls.py
|
Python
|
bsd-3-clause
| 108
| 0
|
import setuptools
setuptools.setup(
name="sirius",
version="0.5",
author="",
author_email="simon.pintarelli@cscs.ch",
description="pySIRIUS",
url="https://github.com/electronic_structure/SIRIUS",
packages=['sirius'],
install_requires=['mpi4py', 'voluptuous', 'numpy', 'h5py', 'scipy', 'PyYAML'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
electronic-structure/sirius
|
python_module/setup.py
|
Python
|
bsd-2-clause
| 499
| 0.002004
|
from csacompendium.locations.models import Precipitation
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PrecipitationListFilter
from csacompendium.locations.api.precipitation.precipitationserializers import precipitation_serializers
def precipitation_views():
"""
Precipitation views
:return: All precipitation views
:rtype: Object
"""
precipitation_serializer = precipitation_serializers()
class PrecipitationCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationDetailSerializer']
permission_classes = [IsAuthenticated]
class PrecipitationListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PrecipitationListFilter
pagination_class = APILimitOffsetPagination
class PrecipitationDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'pk'
return {
'PrecipitationListAPIView': PrecipitationListAPIView,
'PrecipitationDetailAPIView': PrecipitationDetailAPIView,
'PrecipitationCreateAPIView': PrecipitationCreateAPIView
}
|
nkoech/csacompendium
|
csacompendium/locations/api/precipitation/precipitationviews.py
|
Python
|
mit
| 2,016
| 0.002976
|
#!/usr/bin/env python
import os.path
import sys
import argparse
from poretools.Fast5File import *
#logger
import logging
logger = logging.getLogger('poreminion')
# poreminion imports
import poreminion.version
def run_subtool(parser, args):
if args.command == 'uncalled':
import findUncalled as submodule
elif args.command == 'timetest':
import findTimeErrors as submodule
elif args.command == 'fragstats':
import fragstats as submodule
elif args.command == 'fragsummary':
import fragsummary as submodule
elif args.command == 'fragrobust':
import robust as submodule
elif args.command == 'nx':
import nX as submodule
elif args.command == 'pct2d':
import pct2D as submodule
elif args.command == 'has2d':
import has2D as submodule
elif args.command == 'numevents':
import numevents as submodule
elif args.command == 'events':
import get_events as submodule
elif args.command == 'staypos':
import staypos as submodule
elif args.command == 'info':
import info as submodule
elif args.command == 'g4' or args.command == 'regex':
import quadparsersuite as submodule
elif args.command == 'seqlen':
import seqlen as submodule
elif args.command == 'dataconc':
import dataconc as submodule
elif args.command == 'qualpos':
import qual_v_pos as submodule
elif args.command == 'kmer':
import kmer as submodule
elif args.command == 'kmerplot':
import kmerplot as submodule
elif args.command == 'kmerdiff':
import kmerdiff as submodule
## elif args.command == 'align':
## import align as submodule
elif args.command == 'winner':
import winner as submodule
elif args.command == 'qualdist':
import qualdist as submodule
# run the chosen submodule.
submodule.run(parser, args)
class ArgumentParserWithDefaults(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgumentParserWithDefaults, self).__init__(*args, **kwargs)
self.add_argument("-q", "--quiet", help="Do not output warnings to stderr",
action="store_true",
dest="quiet")
def main():
logging.basicConfig()
#########################################
# create the top-level parser
#########################################
parser = argparse.ArgumentParser(prog='poreminion', description=""" Poreminion - additional tools for analyzing nanopore sequencing data.""", formatter_class=argparse.RawTextHelpFormatter)#ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed poreminion version",
action="version",
version="%(prog)s " + str(poreminion.version.__version__))
subparsers = parser.add_subparsers(title='[sub-commands]', dest='command', parser_class=ArgumentParserWithDefaults)
#########################################
# create the individual tool parsers
#########################################
##########
# find uncalled (not basecalled) files
##########
parser_uncalled = subparsers.add_parser('uncalled',
help='Find Fast5 files that were not base-called.')
parser_uncalled.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_uncalled.add_argument('--outprefix', "-o",
type=str, required=True,
help='Uses this as basename for the following output files: (1) list of files not basecalled because template events not found, (2) list of files not basecalled because too few events found, (3) list of files not basecalled because too many events found. (4) event stats on each.')
parser_uncalled.add_argument('--move', "-m",
action='store_true', default=False,
help='''If specified, will move each non-basecalled file type to an approp labeled dir
inside same dir that has the dir reads with reads in it (e.g. downloads --> pass,
downloads --> fail, downloads --> "notemplate", etc).
Still writes out stats file.''')
parser_uncalled.set_defaults(func=run_subtool)
##########
# findTimeErrors
##########
parser_timetest = subparsers.add_parser('timetest',
help='Find Fast5 files that have event times that are earlier than event times before it suggesting malfunction/erroneous read.')
parser_timetest.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_timetest.add_argument('--outprefix', "-o",
type=str, default=False,
help='Uses this as basename for file containing list of files with time errors.')
parser_timetest.add_argument('--move', "-m",
action='store_true', default=False,
help='''If specified, will move files with time error dir labeled time_errors
inside same dir that has the dir with reads in it (e.g. downloads --> pass,
downloads --> fail, downloads --> "time_errors", etc).
Still writes out list file above.''')
parser_timetest.add_argument('--verbose', "-v",
action='store_true', default=False,
help='''Will print to stderr info about how far along it is in process.''')
parser_timetest.set_defaults(func=run_subtool)
##########
# fragstats
##########
parser_fragstats = subparsers.add_parser('fragstats',
help='''Run this on set of base-called fast5 files.
Returns tab-delimited table with columns:
1 = readname,
2 = estimated molecule/fragment size,
3 = number input events,
4 = if complement detected,
5 = if 2D detected,
6 = num template events,
7 = num complement events,
8 = length of 2D sequence,
9 = length of template sequence,
10 = length of complement sequence,
11 = mean qscore of 2D sequence,
12 = mean qscore of template sequence,
13 = mean qscore of complement,
14 = ratio of number template events to number complement events,
15 = channel number molecule traversed
16 = heat sink temperature while molecule traversed
17 = num called template events (after events pruned during base-calling)
18 = num called complement events (after events pruned during base-calling)
19 = num skips in template (is actually number 0 moves found in extensive analysis)
20 = num skips in complement (is actually number 0 moves found in extensive analysis)
21 = num stays in template (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here)
22 = num stays in complement (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here)
23 = strand score template
24 = strand score complement
25 = num stutters in template
26 = num stutters in complement
If --extensive used:
27 = starttime,
28 = endtime,
29 = slope across all events,
30 = mean duration across all events,
31 = median duration across all events,
32 = sd of all event durations,
33 = min event duration,
34 = max event duration,
35-40 = num temp events with 0,1,2,3,4,5 moves from base-caller,
41-46 = num comp events with 0,1,2,3,4,5 moves from base caller.
If -g4/--quadruplex used:
Final+1 = number of G4 motifs in 2D read: '([gG]{3,}\w{1,7}){3,}[gG]{3,}'
Final+2 = number of G4 motifs in template read
Final+3 = number of G4 motifs in complement read
Final+4 = number of G4 complement motifs in 2D reads: '([cC]{3,}\w{1,7}){3,}[cC]{3,}'
Final+5 = number of G4 complement motifs in template read (i.e. inferred complement strand count given template read)
Final+6 = number of G4 complement motifs in complement read (i.e. inferred template strand count given complement read)
If --checktime used:
Final column (after even G4 info) = 0 or 1 for no/yes there is a time error present.
Estimates molecule/fragment size in the following way.
If has 2D, molecule size is the length of 2D read.
If template only, molecule size is the length of template read.
If template and complement, but no 2D, molecule size is length of the longer read between template and complement.
Molecule size allows calculation of total non-redundant data.
This is the sum of unique molecule lengths rather than summing all read types from each molecule.
From the molecule sizes, the "Molecule N50" can be computed using the nx subcommand on the fragstats file and specifying colum 2.
''')
parser_fragstats.add_argument('--extensive', "-e",
action="store_true", required=False, default=False,
help='''This tacks a number of fields on at the end of the regular frag stats that requires much much more computation time.
The additional fields are: 16=starttime of all events, 17=endtime of all events, 18=slope of all events,
19=mean duration across all events, 20=median duration across all events, 21=sd of all event durations, 22=min event duration, 23=max event duration,
24-29=num temp events with 0,1,2,3,4,5 moves from base-caller, 20-35=num comp events with 0,1,2,3,4,5 moves from base caller.
''')
parser_fragstats.add_argument('--quadruplex', "-g4",
action="store_true", required=False, default=False,
help='''This tacks on info to end (but before checktime if used) about G4 motifs for available read types in each file.
When a read type not available "-" is given.
Analyzing the 2D read likely gives best estimate of counts in template and complement strands.
Analyzing the template strand also gives an inferred count of complement strand given the template sequence and vice versa.
Similar counts between inferred complement (given template) and complement (or inferred template vs template) is only possible when they are similar lengths.
The G4 regular expression is pretty robust to indels and mismatches, especially in the loops/spacer parts of motif.
The poreminion g4 subcommand allows a lot more flexibility in the motif.
For example, one can raise the sensitivity by lowering the minimum poly-G tract length from 3 to 2 and/or raising the maximum loop length from 7 to 15.
''')
parser_fragstats.add_argument('--g4motif', "-g4m",
type=str, required=False, default="3,7",
help='''If specifying -g4, this optional flag (-g4m) allows more flexibility in the G4 motif used.
Use: -g4m minG,maxN -- default: -g4m 3,7.
MinG is minimum number of Gs allowed in poly-G tracts of G4 motifs.
MaxN is maximum number of nucleotides allowed in spacer/loop parts of G4 motif.
Default parameters (3,7) give the standard G4 motif (and its complement): '([gG]{3,}\w{1,7}){3,}[gG]{3,}'.
One can raise the sensitivity (while lowering the specificity), for example, by lowering the minimum poly-G tract length from 3 to 2 and/or raising the maximum loop length from 7 to 15.
''')
parser_fragstats.add_argument('--checktime', "-t",
action="store_true", required=False, default=False,
help='''This tacks on timetest info (search for time errors in start times) as the last field
--> 0 or 1 for no/yes there is a time error present. Adds considerable computation time.
If used with --extensive, will take even more time than that alone.''')
## parser_fragstats.add_argument('--parallel', "-p",
## type=int, required=False, default=1,
## help='''Parallelize (New) - provide integer. Default: 1. Notes: No need to go higher than 1 for small jobs. Higher than 1 may not work on regular mac book pros, but does work on Oscar, Brown University' super computing cluster..''')
parser_fragstats.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_fragstats.set_defaults(func=run_subtool)
##########
# fragsummary
##########
parser_fragsummary = subparsers.add_parser('fragsummary',
help='''To summarize fragstats, use this with a tab-delimited, fragstats table file (output of fragstats subcommand).''')
parser_fragsummary.add_argument('--fragfile', "-f",
type=str, required=True,
help='''Specify path to the fragstats table file (output of fragstats subcommand).
''')
parser_fragsummary.add_argument('--extensive', "-e",
action="store_true", required=False, default=False,
help='''Use this flag if the fragstats file was generated with -e/--extensive option.
''')
parser_fragsummary.add_argument('--quadruplex', "-g4",
action="store_true", required=False, default=False,
help='''Use this flag if the fragstats file was generated with -g4/--quadruplex option.
''')
parser_fragsummary.add_argument('--checktime', "-t",
action="store_true", required=False, default=False,
help='''Use this flag if the fragstats file was generated with -t/--checktime option.''')
parser_fragsummary.set_defaults(func=run_subtool)
##########
# fragsort/plot
##########
##########
# nX
##########
parser_nx = subparsers.add_parser('nx',
help='Computes N50 or NX values on columns of a file or from comma-separated list.')
parser_nx_input = parser_nx.add_mutually_exclusive_group(required=True)
parser_nx_input.add_argument('-i', "--inputfile",
type= str, default=False,
help='''Input file.''')
parser_nx_input.add_argument('--cmdline', '-c',
type= str, default=False,
help='''Input list of numbers on cmd line (comma-separated) -- e.g. -c 3,5,10,30,11 ''')
parser_nx.add_argument('-k', "--colnum",
type=int, default=1,
help='''Column number (1-based) to compute n50 on from Input file. Default is first column.''')
parser_nx.add_argument('-x', "--x",
type=str, default="25,50,75",
help='''Give comma-separated X values for NX function -- i.e. 50 for N50. Default=25,50,75''')
## parser_nx.add_argument('-pctdatagtx',
## type=str, default=False,
## help='''Instead of NX values, return pct of data from lengths greater than X. Provide X with this flag.''')
## parser_nx.add_argument('-pctreadsgtx',
## type=str, default=False,
## help='''Instead of NX values, return pct of items (reads, contigs, etc) in list greater than X. Provide X with this flag.''')
parser_nx.set_defaults(func=run_subtool)
##########
# robust
##########
parser_robust = subparsers.add_parser('fragrobust',
help='''Looks at fragsizes in fragstats. Sees what percent of fragsizes are "robust" to all sequence lengths from same molecule.''')
parser_robust.add_argument('--fragfile', "-f",
type=str, required=False, default=False,
help='''Specify path to the fragstats table file (output of fragstats subcommand).
''')
parser_robust.add_argument('--message', "-m",
action="store_true", required=False, default=False,
help='''Use this flag print caution message on this metric. If used with -f, it is header message.
''')
parser_robust.set_defaults(func=run_subtool)
##########
# pct 2D
##########
parser_pct2d = subparsers.add_parser('pct2d',
help='Get the proportion of reads that have a 2D read')
parser_pct2d.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_pct2d.set_defaults(func=run_subtool)
##########
# has 2D
##########
parser_has2d = subparsers.add_parser('has2d',
help='Prints 2 columns: filename, has2D = True/False')
parser_has2d.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_has2d_filter = parser_has2d.add_mutually_exclusive_group()
parser_has2d_filter.add_argument('--only2d', "-2",
action='store_true', default=False,
help='''If specified, will only print out files that have 2D -- no True/False column.''')
parser_has2d_filter.add_argument('--no2d', "-0",
action='store_true', default=False,
help='''If specified, will only print out files that do not have 2D -- no True/False column.''')
parser_has2d.set_defaults(func=run_subtool)
##########
# get num events
##########
parser_numevents = subparsers.add_parser('numevents',
help='Print 2 column list of file and number of input events in file.')
parser_numevents.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_numevents.set_defaults(func=run_subtool)
##########
# get_events
##########
parser_get_events = subparsers.add_parser('events',
help='''Look at events inside raw and basecalled fast5 files. ''')
parser_get_events.add_argument("-f5", '--fast5', type=str, default=None, help=''' Path to fast5 file.''')
parser_get_events_filetype = parser_get_events.add_mutually_exclusive_group(required=False)
parser_get_events_filetype.add_argument('-r', '--raw', action='store_true', default=False)
parser_get_events_filetype.add_argument('-b', '--basecalled', action='store_true', default=False)
parser_get_events.add_argument("-t", "--type", choices=['input', 'template', 'complement'], default="input",
help='''What events should be returned? Specify: input, template, complement. Default: input.
Template and complement events can only be specified from basecalled fast5 files.''')
parser_get_events.add_argument("-H", "--header", action="store_true", default=False,
help='''Adds header line to top with a "#" at beginning of line.''')
parser_get_events.set_defaults(func=run_subtool)
##########
# staypositions
##########
parser_staypos= subparsers.add_parser('staypos',
help='''Get BED output of stay positions in read(s). ''')
parser_staypos.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_staypos.set_defaults(func=run_subtool)
##########
# info
##########
parser_info = subparsers.add_parser('info',
help='''Get info about run and, if file is basecalled, basecalling. ''')
parser_info.add_argument("-f5", '--fast5', type=str, default=None, help=''' Path to fast5 file.''')
parser_info.add_argument("-b", '--basic', action="store_true", default=False, help='''Some basic info.''')
parser_info.add_argument("-a", '--all', action="store_true", default=False, help='''All info.''')
parser_info.set_defaults(func=run_subtool)
##########
# G4 - quadparsersuite - G4
##########
parser_g4 = subparsers.add_parser('g4',
help='''Use quadparser suite (for identifying G4 motifs) on set of fast5 files (or in a FASTA/FASTQ file) and get a BED file with info for each match.
The default parameters search for '([gG]{3,}\w{1,7}){3,}[gG]{3,}' and its complement '([cC]{3,}\w{1,7}){3,}[cC]{3,}'.
See: http://en.wikipedia.org/wiki/G-quadruplex#Quadruplex_prediction_techniques
This automates the regex sub-command to search for G4s with given paramters.
See regex for more info on output and searching for any regular expression.
''')
parser_g4_file_type = parser_g4.add_mutually_exclusive_group(required=True)
parser_g4_file_type.add_argument('--fast5', '-f5', type=str,
help='''Path to the directory with input FAST5 files.
This, like most poreminion tools, just requires the path to the dir with all fast5 files.
However, unlike most poreminion tools, it requires the -f5 flag to be specified.''')
parser_g4_file_type.add_argument('--fasta', '-fa', type=str,
help='''Path to the single input FASTA file containing one or more sequences.
FASTA files can be piped in to stdin by using "-". e.g. poretools fasta fast5dir/ | poreminion g4 -fa -''')
parser_g4_file_type.add_argument('--fastq', '-fq', type=str,
help='''Path to the single input FASTQ file containing one or more sequences.
FASTQ files can be piped in to stdin by using "-". e.g. poretools fastq fast5dir/ | poreminion g4 -fq -''')
parser_g4.add_argument('--minG', '-g',
type= int,
help='''minG is the minimum number of Gs in a G tract.
A G4 is typically defined as: ([gG]{3}\w{1,7}){3,}[gG]{3}
As such, the default minG value is 3.
This is typically the shortest allowable G-tract, but 2 is used in some cases to increase sensitivity.
Requiring longer G-tracts has more specificity, but lower sensitivity.
''',
default=3)
parser_g4.add_argument('--maxN', '-n',
type= int,
help='''maxN is the maximum number of number of Ns in loops between G tracts.
A G4 is typically defined as: ([gG]{3,}\w{1,7}){3,}[gG]{3,}
As such, the default maxN value is 7.
Recently people have also often used maxN=15 -- i.e. ([gG]{3,}\w{1,15}){3,}[gG]{3,}
In general, allowing longer loops have more sensitivity, but lower specificity.
Some literature suggests that the probability of forming a G4 decreases with length.
''',
default=7)
parser_g4.add_argument('--noreverse',
action= 'store_true',
help='''Do not search the complement G4 regular expression (e.g. ([cC]{3,}\w{1,7}){3,}[cC]{3,} ) in the given sequences.
In each sequence, search only for G4s on the given strand using the G4 regex -- e.g. ([gG]{3,}\w{1,7}){3,}[gG]{3,}.
Note: this does NOT mean to search only template reads for G4s and it does NOT mean complement reads are ignored.
It means for all reads, only pay attention to the read sequence, not the inferred reverse complement of that sequence.
''')
parser_g4.add_argument('--reportseq', '-s',
action= 'store_true', default=False,
help='''Report sequence of reg exp match in output.
''')
parser_g4.add_argument('--outformat', '-o',
type=str, default='name,start,end,strand',
help='''Provide comma-separated list of desired output infomation.
Options are name (sequence name), start (start of match), end (end of match),
strand (strand of match +/-), seq (sequence of match).
Default = 'name,start,end,strand'. --reportSeq/-s option changes default to: 'name,start,end,strand,seq'
Any other combination can be provided.
When using --counts, defaults to name,pos,neg
''')
parser_g4.add_argument('--numtracts', '-t',
action= 'store_true', default=False,
help='''For each G4 location, also report number of poly-G tracts inside G4 motif (and poly-C tracts in G4 complement motif).
''')
parser_g4.add_argument('--counts', '-c',
action= 'store_true', default=False,
help='''Report count for number of matches in each sequence instead of individually reporting all occurences in the sequence.
''')
parser_g4.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Only relevant with -f5. Which type of reads should be analyzed? Default: all. choices=[all, fwd, rev, 2D, fwd,rev]')
parser_g4.set_defaults(func=run_subtool)
##########
# regex
##########
parser_regex = subparsers.add_parser('regex', description="""Regular Expressions. See following site for help in constructing a useful regex: https://docs.python.org/2/library/re.html""",
help='''Search sequences in set of fast5 files (or in a FASTA/FASTQ file) for a regular expression.
Output BED file has default columns:
1. Name of sequence \n
2. Start of the match \n
3. End of the match
4. Strand (+/- relative to sequence given, NOT to be confised with template/complement reads.)
5. Optional Matched sequence (--reportseq/-s)
These can be changed with --outformat/-o which allows you to report name,start,end,strand,seq in any order.
If --counts is used, default columns are:
1. name
2. pos strand count
3. neg strand count
4. total count
This script will write out all positive strand entries of a given sequence followed by all negative strand entries.
If name,start,end are used as first 3 columns, sortBed from BEDtools (or unix sort) can sort the BED file based on coordinates if needed.
''')
parser_regex_file_type = parser_regex.add_mutually_exclusive_group(required=True)
parser_regex_file_type.add_argument('--fast5', '-f5', type=str,
help='''Path to the directory with input FAST5 files.
This, like most poreminion tools, just requires the path to the dir with all fast5 files.
However, unlike most poreminion tools, it requires the -f5 flag to be specified.''')
parser_regex_file_type.add_argument('--fasta', '-fa', type=str,
help='''Path to the single input FASTA file containing one or more sequences.
FASTA files can be piped in to stdin by using "-". e.g. poretools fasta fast5dir/ | poreminion regex -fa - -r "regex"''')
parser_regex_file_type.add_argument('--fastq', '-fq', type=str,
help='''Path to the single input FASTQ file containing one or more sequences.
FASTQ files can be piped in to stdin by using "-". e.g. poretools fastq fast5dir/ | poreminion regex -fq - -r "regex"''')
parser_regex.add_argument('--regex', '-r',
type= str, default=None, required=True,
help='''Required: Regex to be searched in the fasta input.
Matches to this regex will have + strand. This string passed to python
re.compile().
''')
parser_regex.add_argument('--regexrev', '-R',
type= str, default=None, required=False,
help='''The second regex to be searched in fasta input.
Matches to this regex will have - strand.
By default (None), --regexrev will be --regex complemented by replacing
'actguACTGU' with 'tgacaTGACA'.
''')
parser_regex.add_argument('--noreverse',
action= 'store_true',
help='''Do not search for any complement regular expression in the given sequences.
In each sequence, search only for regex given on the given strand.
Note: this does NOT mean to search only template reads for regex and it does NOT mean complement reads are ignored.
It means for all reads, only pay attention to the read sequence, not the inferred reverse complement of that sequence.
''')
parser_regex.add_argument('--reportseq', '-s',
action= 'store_true', default=False,
help='''Report sequence of reg exp match in output.
''')
parser_regex.add_argument('--outformat', '-o',
type=str, default='name,start,end,strand',
help='''Provide comma-separated list of desired output infomation.
Options are name (sequence name), start (start of match), end (end of match),
strand (strand of match +/-), seq (sequence of match).
Default = 'name,start,end,strand'. --reportSeq/-s option changes default to: 'name,start,end,strand,seq'
Any other combination can be provided.
When using --counts, defaults to name,pos,neg
''')
parser_regex.add_argument('--counts', '-c',
action= 'store_true', default=False,
help='''Report count for number of matches in each sequence instead of individually reporting all occurences in the sequence.
''')
parser_regex.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Only relevant with -f5. Which type of reads should be analyzed? Default: all. choices=[all, fwd, rev, 2D, fwd,rev]')
parser_regex.set_defaults(func=run_subtool)
##########
# data_conc (data concentration plot)
##########
parser_dataconc = subparsers.add_parser('dataconc',
help='''Plot sum of read lengths in each bin for a given set of bins for a set of FAST5 files.
This is the type of plot seen in MinKNOW while sequencing.''')
parser_dataconc.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_dataconc.add_argument('--min-length',
dest='min_length',
default=0,
type=int,
help=('Minimum read length to be included in analysis.'))
parser_dataconc.add_argument('--max-length',
dest='max_length',
default=1000000000,
type=int,
help=('Maximum read length to be included in analysis.'))
parser_dataconc.add_argument('--bin-width',
dest='bin_width',
default=500,
type=int,
help=('The width of bins (default: 500 bp).'))
parser_dataconc.add_argument('--saveas',
dest='saveas',
metavar='STRING',
help='''Save the plot to a file named filename.extension (e.g. pdf, jpg)''',
default=None)
parser_dataconc.add_argument('--cumulative',
action="store_true",
help='''For cumulative plot.''',
default=False)
parser_dataconc.add_argument('--percent',
action="store_true",
help='''Plot as percentge of all data.''',
default=False)
parser_dataconc.add_argument('--simulate',
action="store_true",
help='''This will randomly sample N read lengths in the size range from min to max (or according parameters set by --parameters),
where N is the number of reads in the fast5 dir (or N specified with --parameters).
Then it will plot the simulation lengths. INFO about parameters used is printed so that
it can be reproduced with --parameters in the future (much faster).''',
default=False)
parser_dataconc.add_argument('--parameters',
type=str,
help='''--simulate by default will use N=readcount, range=min-to-max. Override this with --parameters N,min,max. e.g. --parameters 350,500,48502''',
default=False)
#
parser_dataconc.add_argument('--start',
dest='start_time',
default=None,
type=int,
help='Only analyze reads from after start timestamp')
parser_dataconc.add_argument('--end',
dest='end_time',
default=None,
type=int,
help='Only analyze reads from before end timestamp')
parser_dataconc.add_argument('--high-quality',
dest='high_quality',
default=False,
action='store_true',
help='Only analyze reads with more complement events than template. Can be used with --type or --one-read-per-molecule to select a specific read type from high quality reads.')
parser_dataconc_readfilter = parser_dataconc.add_mutually_exclusive_group()
parser_dataconc_readfilter.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Which type of reads should be analyzed? Def.=all, choices=[all, fwd, rev, 2D, fwd,rev]. Is mutually exclusive with --one-read-per-molecule.')
parser_dataconc_readfilter.add_argument('-1', '--one-read-per-molecule',
dest='single_read',
default=False,
action='store_true',
help='''Only analyze one read per molecule in priority order: 2D -> template -> complement.
That is, if there is a 2D read use that.If not, then try to use template. etc.
Is mutually exclusive with --type.''')
parser_dataconc.set_defaults(func=run_subtool)
##########
# qual vs. position
##########
parser_qualpos = subparsers.add_parser('qualpos',
help='Get the qual score distribution over positions in reads')
parser_qualpos.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_qualpos.add_argument('--min-length',
dest='min_length',
default=0,
type=int,
help=('Minimum read length to be included in analysis.'))
parser_qualpos.add_argument('--max-length',
dest='max_length',
default=1000000000,
type=int,
help=('Maximum read length to be included in analysis.'))
parser_qualpos.add_argument('--bin-width',
dest='bin_width',
default=1000,
type=int,
help=('The width of bins (default: 1000 bp).'))
parser_qualpos.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Which type of reads should be analyzed? Def.=all, choices=[all, fwd, rev, 2D, fwd,rev]')
parser_qualpos.add_argument('--start',
dest='start_time',
default=None,
type=int,
help='Only analyze reads from after start timestamp')
parser_qualpos.add_argument('--end',
dest='end_time',
default=None,
type=int,
help='Only analyze reads from before end timestamp')
parser_qualpos.add_argument('--high-quality',
dest='high_quality',
default=False,
action='store_true',
help='Only analyze reads with more complement events than template.')
parser_qualpos.add_argument('--zscore',
default=False,
action='store_true',
help='For each read, normalize each bucket score to the mean and stdDev of all scores in read. Z = (bucketScore-mean)/stdDev')
parser_qualpos.add_argument('--qualVsLen',
default=False,
action='store_true',
help='Scatter plot mean score (y-axis) vs. read length (x-axis)')
parser_qualpos.add_argument('--saveas',
dest='saveas',
metavar='STRING',
help='''Save the plot to a file named filename.extension (e.g. pdf, jpg)''',
default=None)
parser_qualpos.set_defaults(func=run_subtool)
##########
# qualdist
##########
parser_qualdist = subparsers.add_parser('qualdist',
help='''Get the qual score composition of a set of FAST5 files.
This tool is from poretools, but poreminion allows you to select the type of read.''')
parser_qualdist.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_qualdist.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Which type of reads should be analyzed? Def.=all, choices=[all, fwd, rev, 2D, fwd,rev]. Is mutually exclusive with --one-read-per-molecule.')
parser_qualdist.set_defaults(func=run_subtool)
##########
# kmerCounting
##########
parser_kmer = subparsers.add_parser('kmer',
help='Count kmers in reads or reference.')
parser_kmer.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_kmer.add_argument('-k', '--kmersize',
dest='k',
default=5,
type=int,
help=('Kmer size. Default = 5. Sizes 1-7 work well with kmerplot on regular Mac OS. Up to 10 is possible. After that it might require too much memory for kmerplot on regular Mac OS.'))
parser_kmer.add_argument('--fasta',
dest='fasta',
default=None,
type=str,
help=('''Specify "--fasta file.fa" for analyzing a fasta file instead of fast5dir/.
While min and max length arguments remain meaningful for fasta files, the following arguments do not: start time, end time, high quality, type, single read per molecule.'''))
parser_kmer.add_argument('--fastq',
dest='fastq',
default=None,
type=str,
help=('''Specify "--fasta file.fq" for analyzing a fastq file instead of fast5dir/.
While min and max length arguments remain meaningful for fastq files, the following arguments do not: start time, end time, high quality, type, single read per molecule.'''))
parser_kmer.add_argument('--rev-comp',
dest='rev_comp',
default=False,
action="store_true",
help='''Created to be used with --fasta and --fastq options.
When creating kmer counts, it counts both the fwd and reverse complement kmers.
For now, it does nothing when used with fast5 dirs (minION data files).''')
## parser_kmer_output = parser_kmer.add_mutually_exclusive_group()
## parser_kmer_output.add_argument('-t', '--table',
## dest='table',
## default=True,
## action='store_true',
## help=('''Output option: report tab-delimited table of kmer, count, and proportion of all kmers seen.
## Default = True (to stdout). Use --saveas to specify file to save to.'''))
## parser_kmer_output.add_argument('-p', '--plot',
## dest='plot',
## default=False,
## action='store_true',
## help=('''Output option: show or write out plot.
## Default = False (to stdout). Use --saveas to specify file to save to.'''))
parser_kmer.add_argument('--min-length',
dest='min_length',
default=0,
type=int,
help=('Minimum read length to be included in analysis.'))
parser_kmer.add_argument('--max-length',
dest='max_length',
default=1000000000,
type=int,
help=('Maximum read length to be included in analysis.'))
parser_kmer.add_argument('--start',
dest='start_time',
default=None,
type=int,
help='Only analyze reads from after start timestamp')
parser_kmer.add_argument('--end',
dest='end_time',
default=None,
type=int,
help='Only analyze reads from before end timestamp')
parser_kmer.add_argument('--high-quality',
dest='high_quality',
default=False,
action='store_true',
help='Only analyze reads with more complement events than template.')
parser_kmer.add_argument('--saveas',
dest='saveas',
metavar='STRING',
help='''Save tab-delimited kmer + counts to file.''',
default=None)
parser_kmer_readfilter = parser_kmer.add_mutually_exclusive_group()
parser_kmer_readfilter.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev'],
default='all',
help='Which type of reads should be analyzed? Def.=all, choices=[all, fwd, rev, 2D, fwd,rev]. Is mutually exclusive with --one-read-per-molecule.')
parser_kmer_readfilter.add_argument('-1', '--one-read-per-molecule',
dest='single_read',
default=False,
action='store_true',
help='''Only analyze one read per molecule in priority order: 2D -> template -> complement.
That is, if there is a 2D read use that.If not, then try to use template. etc.
Is mutually exclusive with --type.''')
parser_kmer.set_defaults(func=run_subtool)
##########
# kmerplotting
##########
parser_kmerplot = subparsers.add_parser('kmerplot',
help='Plot kmer counts in reads or reference.')
## parser_kmerplot.add_argument('files', metavar='FILES', nargs='+',
## help='The input FAST5 files.')
parser_kmerplot.add_argument('-t1', '--kmer-count-in-reads',
dest='table1',
type=str,
help='''Provide path to file with kmer count table from reads (or any kmer count table).
This argument is required and when used alone, just generates a bar plot of kmer counts.''',
default=None)
parser_kmerplot.add_argument('-t2', '--kmer-count-in-reference',
dest='table2',
type=str,
help='''Provide path to file with kmer count table from reference sequence (or any second kmer count table).
This argument is not required and if used, results in a scatterplot of the 2 kmer count tables.''',
default=None)
parser_kmerplot.add_argument('--matplotlib',
dest='mpl',
action='store_true',
help='''Temp option: plot in matplotlib''',
default=False)
parser_kmerplot.add_argument('--ggplot2',
dest='gg',
action='store_true',
help='''Temp option: plot in ggplot2''',
default=False)
parser_kmerplot.add_argument('--saveas',
dest='saveas',
metavar='STRING',
help='''Save to file. e.g. --saveas "filename.extension" where extension can be only pdf and jpg for now.''',
default=None)
parser_kmerplot.set_defaults(func=run_subtool)
##########
# kmer diff abundance
##########
parser_kmerdiff = subparsers.add_parser('kmerdiff',
help='Get fold-enrichment values of kmers in reads vs reference.')
parser_kmerdiff.add_argument('-t1', '--kmer-count-in-reads',
dest='table1',
type=str,
help='''Provide path to file with kmer count table from reads (or any kmer count table).
This argument is required and when used alone, just generates a bar plot of kmer counts.''',
default=None)
parser_kmerdiff.add_argument('-t2', '--kmer-count-in-reference',
dest='table2',
type=str,
help='''Provide path to file with kmer count table from reference sequence (or any second kmer count table).
This argument is not required and if used, results in a scatterplot of the 2 kmer count tables.''',
default=None)
parser_kmerdiff.add_argument('--saveas',
dest='saveas',
metavar='STRING',
help='''Save to file. e.g. --saveas "filename.extension" where extension can be only pdf and jpg for now.''',
default=None)
parser_kmerdiff.add_argument('-bcv', '--square-root-dispersion',
dest='bcv',
type=float,
help='''When there are no replicates in edgeR, dispersion must be determined by the user.
The default is 0.2. Other values to try could be 0.01-0.4 (or any).
p-values will be sensitive to choice of bcv. Fold change will not.''',
default=0.2)
parser_kmerdiff.add_argument('--volcano',
dest='volcano',
type=str,
help='''If you want the analysis to generate a volcano plot,
(log(fold change) vs. -log10(pvalue)), then use this flag
and provide the name and extension of volcano plot file (e.g. volcano.jpg).''',
default=None)
parser_kmerdiff.add_argument('--smear',
dest='smear',
type=str,
help='''If you want the analysis to generate a smear plot,
(log(fold change) vs. log(CPM)), then use this flag
and provide the name and extension of smear plot file (e.g. smear.jpg).''',
default=None)
parser_kmerdiff.add_argument('--nt-content',
dest='nt_content',
type=str,
help='''If you want the analysis to generate a table analyzing the nucleotide content
of kmers >= abs(fold change) and pval <= p, then use this flag with those values as in these
examples: (a) --nt-content fc:2.5,p:0.001 (b) --nt-content fc:2,fdr:0.1)''',
default=None)
parser_kmerdiff.set_defaults(func=run_subtool)
##########
# winner -- adds "each" and "details" functionalities to poretools winner
##########
parser_winner = subparsers.add_parser('winner',
help='''Get the longest read from a set of FAST5 files.
Similar to poretools winner, only allows type=each and offers a details only option.''')
parser_winner.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_winner.add_argument('--type',
dest='type',
metavar='STRING',
choices=['all', 'fwd', 'rev', '2D', 'fwd,rev', 'each'],
default='all',
help='''Which type of FASTA entries should be reported? Def.=all.
Choices: 'all', 'fwd', 'rev', '2D', 'fwd,rev', 'each'.
'each' will give longest for each 2D, fwd, rev.''')
parser_winner.add_argument('--details',
action="store_true",
default=False,
help='If set, it will only print details: readname, length')
parser_winner.set_defaults(func=run_subtool)
##########
# seqlen
##########
parser_seqlen = subparsers.add_parser('seqlen',
help='''Get sequence lengths from set of FAST5 files.
By default it will attempt to give read lengths for template, complement, and 2d.
Use optional flags to exclude any of these read types. ''')
parser_seqlen.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_seqlen.add_argument('--not2d',
action="store_true",
default=False,
help='Exclude 2d read lengths.')
parser_seqlen.add_argument('--nottemp',
action="store_true",
default=False,
help='Exclude template read lengths.')
parser_seqlen.add_argument('--notcomp',
action="store_true",
default=False,
help='Exclude complement read lengths.')
parser_seqlen.set_defaults(func=run_subtool)
##########
## # viz time
## ##########
## parser_numevents = subparsers.add_parser('viztime',
## help='Visualize the relative start time across events - e.g. to visualize a time error or lack thereof.')
## parser_numevents.add_argument('files', metavar='FILES', nargs='+',
## help='The input FAST5 files.')
## parser_numevents.set_defaults(func=run_subtool)
##########
# alignment
##########
## parser_align = subparsers.add_parser('align',
## help='NEW FEATURE -- NOT YET STABLE/FINISHED. Performs alignments -- returns alignments, stats, plots')
## align_subparsers = parser_align.add_subparsers(title='[align-commands]', dest='align_command', parser_class=ArgumentParserWithDefaults)
## parser_align_fitting = align_subparsers.add_parser('fitting', help="fitting of 1 DNA seq to another")
## parser_align_blasr = align_subparsers.add_parser('blasr', help="BLASR")
## parser_align_blastn = align_subparsers.add_parser('blastn', help="BLASTN")
## parser_align_last = align_subparsers.add_parser('last', help="LAST")
## ## BLASR
## parser_align_blasr.add_argument("--plot")
## parser_align_blasr.add_argument("--blasr_file", type=str,default=None)
## parser_align.add_argument('--sequence',
## dest='sequence',
## type=str,
## help='''Provide a sequence < min read length. Default is the KanRR fragment: CGTACGCTGCAGGTCG''',
## default='CGTACGCTGCAGGTCG') ##shortest version of KanR fragment that maximized scores on some pilot reads
## parser_align.add_argument('-m', '--multiple-sequences',
## dest='multiple_sequences',
## type=str, default=None,
## help='''Provide a path to a file with 1 sequence per line.
## For each read in the fastx file, it will report the fitting alignment for the
## sequence in this file with the best fitting aln.
## Each time it encounters a score that ties the current max score, it exchanges the older fiting aln
## info for the new fitting aln info with a 50%% probability.
## This way there is a random assignment of the best barcode.
## Use --all-scores instead to get an output with all max scores and barcodes returned.''')
## parser_align.add_argument('-w', '--with-read-names',
## dest='with_read_names', action="store_true",
## default=False,
## help='''If set, will print "readname, startPosInRead, fitAlnScore, fitAlnScore/queryLen";
## else just "startPosInRead,fitAlnScore, fitAlnScore/queryLen".
## Start position is in pythonese (0-based).''')
## parser_align.add_argument('-e', '--with-edit-distances',
## dest='with_edit_distances', action="store_true",
## default=False,
## help='''If set, edit dist will be incl in output''')
## parser_align.add_argument('-a', '--with-aln-seqs',
## dest='with_aln_seqs', action="store_true",
## default=False,
## help='''If set, the aligned versions of sequences 1 (read) and 2 (provided) will be printed.''')
## parser_align.add_argument('-r', '--random-sequence',
## dest='random_sequence', type=int,
## default=False,
## help='''Provide integer for random sequence length. This option overrides --sequence.''')
## parser_align_seqtransform = parser_align.add_mutually_exclusive_group()
## parser_align_seqtransform.add_argument("-c", "--complement", action="store_true", default=False,
## help=''' Use complement of provided sequence -- right now only works on single seq.
## e.g. AACC -> TTGG''')
## parser_align_seqtransform.add_argument("-rc", "--reverse_complement", action="store_true", default=False,
## help=''' Use reverse complement of provided sequence -- right now only works on single seq.
## e.g. AACC -> GGTT''')
## parser_align_seqtransform.add_argument("-rs", "--reverse_sequence", action="store_true", default=False,
## help=''' Use reverse sequence of provided sequence -- right now only works on single seq.
## e.g. AACC -> CCAA''')
##
## parser_align.set_defaults(func=run_subtool)
##
#######################################################
# parse the args and call the selected function
#######################################################
args = parser.parse_args()
if args.quiet:
logger.setLevel(logging.ERROR)
try:
args.func(parser, args)
except IOError, e:
if e.errno != 32: # ignore SIGPIPE
raise
if __name__ == "__main__":
main()
## Would have to parellize from here....
## from joblib import Parallel, delayed
## import time
## from glob import glob
## folder = "del"
## files = glob('{}/*.txt'.format(folder))
## def shit(f):
## print f
## time.sleep(0.001)
## ## for f in files:
## ## shit(f) #args.parallel
## Parallel(n_jobs=2)(delayed(shit)(f) for f in files)
|
JohnUrban/poreminion
|
poreminion/poreminion_main.py
|
Python
|
mit
| 59,570
| 0.012641
|
# Copyright (c) 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import os.path
import time
from oslo_log import log as logging
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent.strategies.restore import base
LOG = logging.getLogger(__name__)
class CbBackup(base.RestoreRunner):
"""
Implementation of Restore Strategy for Couchbase.
"""
__strategy_name__ = 'cbbackup'
base_restore_cmd = 'sudo tar xpPf -'
def __init__(self, *args, **kwargs):
super(CbBackup, self).__init__(*args, **kwargs)
def pre_restore(self):
try:
operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True)
except exception.ProcessExecutionError:
LOG.exception(_("Error during pre-restore phase."))
raise
def post_restore(self):
try:
# Root enabled for the backup
pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY
if os.path.exists(pwd_file):
with open(pwd_file, "r") as f:
pw = f.read().rstrip("\n")
root = service.CouchbaseRootAccess()
root.set_password(pw)
# Get current root password
root = service.CouchbaseRootAccess()
root_pwd = root.get_password()
# Iterate through each bucket config
buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON
with open(buckets_json, "r") as f:
out = f.read()
if out == "[]":
# No buckets or data to restore. Done.
return
d = json.loads(out)
for i in range(len(d)):
bucket_name = d[i]["name"]
bucket_type = d[i]["bucketType"]
if bucket_type == "membase":
bucket_type = "couchbase"
ram = int(utils.to_mb(d[i]["quota"]["ram"]))
auth_type = d[i]["authType"]
password = d[i]["saslPassword"]
port = d[i]["proxyPort"]
replica_number = d[i]["replicaNumber"]
replica_index = 1 if d[i]["replicaIndex"] else 0
threads = d[i]["threadsNumber"]
flush = 1 if "flush" in d[i]["controllers"] else 0
# cbrestore requires you to manually create dest buckets
create_bucket_cmd = ('curl -X POST -u root:' + root_pwd +
' -d name="' +
bucket_name + '"' +
' -d bucketType="' +
bucket_type + '"' +
' -d ramQuotaMB="' +
str(ram) + '"' +
' -d authType="' +
auth_type + '"' +
' -d saslPassword="' +
password + '"' +
' -d proxyPort="' +
str(port) + '"' +
' -d replicaNumber="' +
str(replica_number) + '"' +
' -d replicaIndex="' +
str(replica_index) + '"' +
' -d threadsNumber="' +
str(threads) + '"' +
' -d flushEnabled="' +
str(flush) + '" ' +
system.COUCHBASE_REST_API +
'/pools/default/buckets')
utils.execute_with_timeout(create_bucket_cmd,
shell=True, timeout=300)
if bucket_type == "memcached":
continue
# Wait for couchbase (membase) bucket creation to complete
# (follows same logic as --wait for couchbase-cli)
timeout_in_seconds = 120
start = time.time()
bucket_exist = False
while ((time.time() - start) <= timeout_in_seconds and
not bucket_exist):
url = (system.COUCHBASE_REST_API +
'/pools/default/buckets/')
outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all'
utils.execute_with_timeout('curl -u root:' + root_pwd +
' ' + url + ' > ' + outfile,
shell=True, timeout=300)
with open(outfile, "r") as file:
out = file.read()
buckets = json.loads(out)
for bucket in buckets:
if bucket["name"] == bucket_name:
bucket_exist = True
break
if not bucket_exist:
time.sleep(2)
if not bucket_exist:
raise base.RestoreError("Failed to create bucket '%s' "
"within %s seconds"
% (bucket_name,
timeout_in_seconds))
# Query status
# (follows same logic as --wait for couchbase-cli)
healthy = False
while ((time.time() - start) <= timeout_in_seconds):
url = (system.COUCHBASE_REST_API +
'/pools/default/buckets/' +
bucket_name)
outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name
utils.execute_with_timeout('curl -u root:' + root_pwd +
' ' + url + ' > ' + outfile,
shell=True, timeout=300)
all_node_ready = True
with open(outfile, "r") as file:
out = file.read()
bucket = json.loads(out)
for node in bucket["nodes"]:
if node["status"] != "healthy":
all_node_ready = False
break
if not all_node_ready:
time.sleep(2)
else:
healthy = True
break
if not healthy:
raise base.RestoreError("Bucket '%s' is created but "
"not ready to use within %s "
"seconds"
% (bucket_name,
timeout_in_seconds))
# Restore
restore_cmd = ('/opt/couchbase/bin/cbrestore ' +
system.COUCHBASE_DUMP_DIR + ' ' +
system.COUCHBASE_REST_API +
' --bucket-source=' + bucket_name +
' --bucket-destination=' + bucket_name +
' -u root' + ' -p ' + root_pwd)
try:
utils.execute_with_timeout(restore_cmd,
shell=True,
timeout=300)
except exception.ProcessExecutionError:
# cbrestore fails or hangs at times:
# http://www.couchbase.com/issues/browse/MB-10832
# Retrying typically works
LOG.exception(_("cbrestore failed. Retrying..."))
utils.execute_with_timeout(restore_cmd,
shell=True,
timeout=300)
except exception.ProcessExecutionError as p:
LOG.error(p)
raise base.RestoreError("Couchbase restore failed.")
|
zhangg/trove
|
trove/guestagent/strategies/restore/experimental/couchbase_impl.py
|
Python
|
apache-2.0
| 9,593
| 0
|
from __future__ import division
import numpy as np
from .._shared.utils import assert_nD
from . import _hoghistogram
def hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), visualise=False, normalise=False):
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalisation
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalising across blocks
5. flattening into a feature vector
Parameters
----------
image : (M, N) ndarray
Input image (greyscale).
orientations : int
Number of orientation bins.
pixels_per_cell : 2 tuple (int, int)
Size (in pixels) of a cell.
cells_per_block : 2 tuple (int,int)
Number of cells in each block.
visualise : bool, optional
Also return an image of the HOG.
normalise : bool, optional
Apply power law compression to normalise the image before
processing.
Returns
-------
newarr : ndarray
HOG for the image as a 1D (flattened) array.
hog_image : ndarray (if visualise=True)
A visualisation of the HOG image.
References
----------
* http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
* Dalal, N and Triggs, B, Histograms of Oriented Gradients for
Human Detection, IEEE Computer Society Conference on Computer
Vision and Pattern Recognition 2005 San Diego, CA, USA
"""
image = np.atleast_2d(image)
"""
The first stage applies an optional global image normalisation
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each colour channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
"""
assert_nD(image, 2)
if normalise:
image = np.sqrt(image)
"""
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
colour channel is used, which provides colour invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
"""
if image.dtype.kind == 'u':
# convert uint image to float
# to avoid problems with subtracting unsigned numbers in np.diff()
image = image.astype('float')
gx = np.empty(image.shape, dtype=np.double)
gx[:, 0] = 0
gx[:, -1] = 0
gx[:, 1:-1] = image[:, 2:] - image[:, :-2]
gy = np.empty(image.shape, dtype=np.double)
gy[0, :] = 0
gy[-1, :] = 0
gy[1:-1, :] = image[2:, :] - image[:-2, :]
"""
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [Lowe 2004]
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
"""
sy, sx = image.shape
cx, cy = pixels_per_cell
bx, by = cells_per_block
n_cellsx = int(np.floor(sx // cx)) # number of cells in x
n_cellsy = int(np.floor(sy // cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
_hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy,
orientations, orientation_histogram)
# now for each cell, compute the histogram
hog_image = None
if visualise:
from .. import draw
radius = min(cx, cy) // 2 - 1
orientations_arr = np.arange(orientations)
dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)
dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)
cr2 = cy + cy
cc2 = cx + cx
hog_image = np.zeros((sy, sx), dtype=float)
for x in range(n_cellsx):
for y in range(n_cellsy):
for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):
centre = tuple([y * cr2 // 2, x * cc2 // 2])
rr, cc = draw.line(int(centre[0] - dx),
int(centre[1] + dy),
int(centre[0] + dx),
int(centre[1] - dy))
hog_image[rr, cc] += orientation_histogram[y, x, o]
"""
The fourth stage computes normalisation, which takes local groups of
cells and contrast normalises their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalise each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalisations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalisations. This may seem redundant but it improves the performance.
We refer to the normalised block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
"""
n_blocksx = (n_cellsx - bx) + 1
n_blocksy = (n_cellsy - by) + 1
normalised_blocks = np.zeros((n_blocksy, n_blocksx,
by, bx, orientations))
for x in range(n_blocksx):
for y in range(n_blocksy):
block = orientation_histogram[y:y + by, x:x + bx, :]
eps = 1e-5
normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2 + eps)
"""
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
"""
if visualise:
return normalised_blocks.ravel(), hog_image
else:
return normalised_blocks.ravel()
|
michaelpacer/scikit-image
|
skimage/feature/_hog.py
|
Python
|
bsd-3-clause
| 7,022
| 0.000142
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='murano')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
olivierlemasle/murano
|
murano/common/i18n.py
|
Python
|
apache-2.0
| 1,149
| 0
|
# -*- coding: utf-8 -*-
" WebSite models "
import os
import shutil
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.db import connection
from django.db import models
from django.db.utils import IntegrityError
from django.utils.translation import ugettext_lazy as _
from django.template.loader import get_template
from ionyweb.file_manager.models import FileManager, Directory
# Sites
class WebSite(models.Model):
''' WebSite
New contract of WebSite.
Everything is linked to an instance of this model.
(Pages, Files, ...)
'''
slug = models.SlugField(_(u"url"),
max_length=100,
unique=True)
title = models.CharField(_(u"title"),
max_length=50)
logo = models.ImageField(_(u"Logo"),
upload_to='media_root',
# TEMP -> pbs with PIL...
blank=True)
ndds = models.ManyToManyField(Site,
related_name="website")
owners = models.ManyToManyField(User,
through='WebSiteOwner')
domain = models.ForeignKey(Site,
related_name="website_set",
unique=True,
on_delete=models.PROTECT,
help_text=_(u"Represents the main domain of the "
"website."))
analytics_key = models.CharField(_("Analytics key"), max_length=20, blank=True, null=True,
#regex=r'UA-[0-9]{7}-[0-9]{1}',
help_text=u'e.g. "UA-2456069-3"')
main_menu_levels = models.PositiveIntegerField(_("Main menu levels"), default=1)
meta_keywords = models.CharField(_(u"META Keywords"),
max_length="255", blank=True)
meta_description = models.TextField(_(u"META Description"), blank=True)
theme = models.CharField(_(u'Theme slug'),
max_length=100)
default_template = models.CharField(_(u'Default template'),
max_length=100, blank=True)
default_layout = models.CharField(_(u'Default layout'),
max_length=100)
# Warning, please use directory() to access the Files Library object
files_library = models.ForeignKey(FileManager,
related_name="website",
blank=True,
null=True,
help_text=_(u"Files Library"))
in_maintenance = models.BooleanField(_(u'Maintenance mode'), default=False, blank=True)
class Meta:
verbose_name = _(u"website")
verbose_name_plural = _(u'websites')
def __unicode__(self):
return u'%s' % self.title
def delete(self, *args, **kwargs):
""" Delete this domain names linked to it and the files """
for ndd in self.ndds.all():
if ndd != self.domain:
ndd.delete()
save_ndd = self.domain
#shutil.rmtree(self.media_root())
super(WebSite, self).delete(*args, **kwargs)
# The domain name is protected until the website is deleted successfully
save_ndd.delete()
def get_theme(self):
if len(self.theme.split('/')) <= 1:
return "%s/default" % self.theme
return self.theme
def file_manager(self):
if self.files_library:
return self.files_library
else:
# Create root directory
root = Directory.objects.create(name=self.slug)
self.files_library = FileManager.objects.create(root=root)
self.save()
try:
os.makedirs(self.media_root())
except OSError:
pass
# Create
try:
os.makedirs(os.path.join(self.media_root(), 'storage'))
except OSError:
pass
return self.files_library
def media_root(self):
"Get the filemanager site root"
return os.path.join('websites', self.slug, 'storage')
def get_size(self):
"Give the size used for quota in bytes"
return folder_size(self.media_root())
def get_screenshot(self):
"Return the url of the screenshot or None for the default image"
return None
def get_absolute_url(self):
if getattr(settings, 'SERVER_PORT', 80) != 80:
return u'http://%s:%d' % (self.domain.domain,
settings.SERVER_PORT)
else:
return u'http://%s' % self.domain.domain
def get_medias(self):
# medias_list = []
# # Add css file of the template
# medias_list.append(
# u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % (
# self.domain.domain, self.skin.template.css_file ))
# # Add css file of the skin
# medias_list.append(
# u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % (
# self.domain.domain, self.skin.css_file ))
# return u"\n".join(medias_list)
return ""
medias = property(get_medias)
def _get_layout(self, layout_name=None):
if layout_name is not None:
return 'layouts/%s' % layout_name
else:
return ''
def get_default_layout(self):
return self._get_layout(self.default_layout)
layout = property(get_default_layout)
# def get_header_layout(self):
# return self._get_layout(self.header_layout)
# def get_footer_layout(self):
# return self._get_layout(self.footer_layout)
# def render_header(self, request):
# """
# Returns the header rendering of website.
# """
# return render_plugins_header_or_footer(
# request,
# plugins_list=self.header_plugins.order_by('plugin_order'),
# layout=self.get_header_layout())
# def render_footer(self, request):
# """
# Returns the footer rendering of website.
# """
# return render_plugins_header_or_footer(
# request,
# plugins_list=self.footer_plugins.order_by('plugin_order'),
# layout=self.get_footer_layout())
def get_url_home_page(self):
return u'/'
class WebSiteOwner(models.Model):
website = models.ForeignKey(WebSite, related_name='websites_owned')
user = models.ForeignKey(User, related_name='websites_owned')
is_superuser = models.BooleanField(_('superuser status'),
default=False,
help_text=_("Designates that this user "
"has all permissions without "
"explicitly assigning them."))
def __unicode__(self):
return u'%s owns %d (%s)' % (self.user, self.website.id, self.is_superuser)
def delete(self, *args, **kwargs):
number_of_owners = self.website.websites_owned.filter(is_superuser=True).count()
if number_of_owners <= 1 and self.is_superuser:
raise IntegrityError('This user is the only superuser of this website')
else:
super(WebSiteOwner, self).delete(*args, **kwargs)
# SIGNALS
def catch_wrong_deletion_of_user(sender, instance, **kwargs):
''' Verify that if we delete the website owner, it will still have
no orphans websites
'''
cursor = connection.cursor()
cursor.execute("""
SELECT ws.title, COUNT(*) as owners FROM website_website ws
INNER JOIN website_websiteowner wso
ON ws.id = wso.website_id
AND wso.is_superuser = TRUE
AND ws.id IN (SELECT website_id
FROM website_websiteowner
WHERE user_id = %s) GROUP BY ws.title
""", [instance.id])
websites_owned = cursor.fetchall()
websites_alone = []
for website_title, owner_count in websites_owned:
if website_title is not None and owner_count <= 1:
websites_alone.append(website_title)
if len(websites_alone) > 0:
raise IntegrityError(
'This user is the only owner of the website(s) : %s' % (
', '.join(websites_alone)))
models.signals.pre_delete.connect(catch_wrong_deletion_of_user, sender=User)
def create_filemanager_media_site_root(sender, instance, **kwargs):
"""Create the filemanager when creating a WebSite"""
try:
os.mkdir(instance.media_root())
return True
except OSError:
return False
models.signals.post_save.connect(create_filemanager_media_site_root, sender=WebSite)
|
makinacorpus/ionyweb
|
ionyweb/website/models.py
|
Python
|
bsd-3-clause
| 9,120
| 0.004715
|
"""
Utility classes and functions to handle connection to a libvirt host system
The entire contents of callables in this module (minus the names defined in
NOCLOSE below), will become methods of the Virsh and VirshPersistent classes.
A Closure class is used to wrap the module functions, lambda does not
properly store instance state in this implementation.
Because none of the methods have a 'self' parameter defined, the classes
are defined to be dict-like, and get passed in to the methods as a the
special ``**dargs`` parameter. All virsh module functions _MUST_ include a
special ``**dargs`` (variable keyword arguments) to accept non-default
keyword arguments.
The standard set of keyword arguments to all functions/modules is declared
in the VirshBase class. Only the 'virsh_exec' key is guaranteed to always
be present, the remainder may or may not be provided. Therefor, virsh
functions/methods should use the dict.get() method to retrieve with a default
for non-existant keys.
:copyright: 2012 Red Hat Inc.
"""
import signal
import logging
import re
import weakref
import time
import select
import locale
import base64
import aexpect
from avocado.utils import path
from avocado.utils import process
from six.moves import urllib
from virttest import propcan
from virttest import remote
from virttest import utils_misc
# list of symbol names NOT to wrap as Virsh class methods
# Everything else from globals() will become a method of Virsh class
NOCLOSE = list(globals().keys()) + [
'NOCLOSE', 'SCREENSHOT_ERROR_COUNT', 'VIRSH_COMMAND_CACHE',
'VIRSH_EXEC', 'VirshBase', 'VirshClosure', 'VirshSession', 'Virsh',
'VirshPersistent', 'VirshConnectBack', 'VIRSH_COMMAND_GROUP_CACHE',
'VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL',
]
# Needs to be in-scope for Virsh* class screenshot method and module function
SCREENSHOT_ERROR_COUNT = 0
# Cache of virsh commands, used by help_command_group() and help_command_only()
# TODO: Make the cache into a class attribute on VirshBase class.
VIRSH_COMMAND_CACHE = None
VIRSH_COMMAND_GROUP_CACHE = None
VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL = False
# This is used both inside and outside classes
try:
VIRSH_EXEC = path.find_command("virsh")
except path.CmdNotFoundError:
logging.warning("Virsh executable not set or found on path, "
"virsh module will not function normally")
VIRSH_EXEC = '/bin/true'
class VirshBase(propcan.PropCanBase):
"""
Base Class storing libvirt Connection & state to a host
"""
__slots__ = ('uri', 'ignore_status', 'debug', 'virsh_exec', 'readonly')
def __init__(self, *args, **dargs):
"""
Initialize instance with virsh_exec always set to something
"""
init_dict = dict(*args, **dargs)
init_dict['virsh_exec'] = init_dict.get('virsh_exec', VIRSH_EXEC)
init_dict['uri'] = init_dict.get('uri', None)
init_dict['debug'] = init_dict.get('debug', False)
init_dict['ignore_status'] = init_dict.get('ignore_status', False)
init_dict['readonly'] = init_dict.get('readonly', False)
super(VirshBase, self).__init__(init_dict)
def get_uri(self):
"""
Accessor method for 'uri' property that must exist
"""
# self.get() would call get_uri() recursivly
try:
return self.__dict_get__('uri')
except KeyError:
return None
class VirshSession(aexpect.ShellSession):
"""
A virsh shell session, used with Virsh instances.
"""
# No way to get virsh sub-command "exit" status
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['error:\s*.+$', '.*failed.*']
def __init__(self, virsh_exec=None, uri=None, a_id=None,
prompt=r"virsh\s*[\#\>]\s*", remote_ip=None,
remote_user=None, remote_pwd=None,
ssh_remote_auth=False, readonly=False,
unprivileged_user=None,
auto_close=False, check_libvirtd=True):
"""
Initialize virsh session server, or client if id set.
:param virsh_exec: path to virsh executable
:param uri: uri of libvirt instance to connect to
:param id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param prompt: Regular expression describing the shell's prompt line.
:param remote_ip: Hostname/IP of remote system to ssh into (if any)
:param remote_user: Username to ssh in as (if any)
:param remote_pwd: Password to use, or None for host/pubkey
:param auto_close: Param to init ShellSession.
:param ssh_remote_auth: ssh to remote first.(VirshConnectBack).
Then execute virsh commands.
Because the VirshSession is designed for class VirshPersistent, so
the default value of auto_close is False, and we manage the reference
to VirshSession in VirshPersistent manually with counter_increase and
counter_decrease. If you really want to use it directly over VirshPe-
rsistent, please init it with auto_close=True, then the session will
be closed in __del__.
* session = VirshSession(virsh.VIRSH_EXEC, auto_close=True)
"""
self.uri = uri
self.remote_ip = remote_ip
self.remote_user = remote_user
self.remote_pwd = remote_pwd
# Special handling if setting up a remote session
if ssh_remote_auth: # remote to remote
if remote_pwd:
pref_auth = "-o PreferredAuthentications=password"
else:
pref_auth = "-o PreferredAuthentications=hostbased,publickey"
# ssh_cmd is not None flags this as remote session
ssh_cmd = ("ssh -o UserKnownHostsFile=/dev/null %s -p %s %s@%s"
% (pref_auth, 22, self.remote_user, self.remote_ip))
if uri:
self.virsh_exec = ("%s \"%s -c '%s'\""
% (ssh_cmd, virsh_exec, self.uri))
else:
self.virsh_exec = ("%s \"%s\"" % (ssh_cmd, virsh_exec))
else: # setting up a local session or re-using a session
self.virsh_exec = virsh_exec
if self.uri:
self.virsh_exec += " -c '%s'" % self.uri
ssh_cmd = None # flags not-remote session
if readonly:
self.virsh_exec += " -r"
if unprivileged_user:
self.virsh_exec = "su - %s -c '%s'" % (unprivileged_user,
self.virsh_exec)
# aexpect tries to auto close session because no clients connected yet
aexpect.ShellSession.__init__(self, self.virsh_exec, a_id,
prompt=prompt, auto_close=auto_close)
# Handle remote session prompts:
# 1.remote to remote with ssh
# 2.local to remote with "virsh -c uri"
if ssh_remote_auth or self.uri:
# Handle ssh / password prompts
remote.handle_prompts(self, self.remote_user, self.remote_pwd,
prompt, debug=True)
# fail if libvirtd is not running
if check_libvirtd:
if self.cmd_status('list', timeout=60) != 0:
logging.debug("Persistent virsh session is not responding, "
"libvirtd may be dead.")
self.auto_close = True
raise aexpect.ShellStatusError(virsh_exec, 'list')
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None, safe=False):
"""
Send a virsh command and return its exit status and output.
:param cmd: virsh command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param safe: Whether using safe mode when execute cmd.
In serial sessions, frequently the kernel might print debug or
error messages that make read_up_to_prompt to timeout. Let's
try to be a little more robust and send a carriage return, to
see if we can get to the prompt when safe=True.
:return: A tuple (status, output) where status is the exit status and
output is the output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
out = self.cmd_output(cmd, timeout, internal_timeout, print_func, safe)
for line in out.splitlines():
if self.match_patterns(line, self.ERROR_REGEX_LIST) is not None:
return 1, out
return 0, out
def cmd_result(self, cmd, ignore_status=False, debug=False, timeout=60):
"""Mimic process.run()"""
exit_status, stdout = self.cmd_status_output(cmd, timeout=timeout)
stderr = '' # no way to retrieve this separately
result = process.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise process.CmdError(cmd, result,
"Virsh Command returned non-zero exit status")
if debug:
logging.debug(result)
return result
def read_until_output_matches(self, patterns, filter_func=lambda x: x,
timeout=60, internal_timeout=None,
print_func=None, match_func=None):
"""
Read from child using read_nonblocking until a pattern matches.
Read using read_nonblocking until a match is found using match_patterns,
or until timeout expires. Before attempting to search for a match, the
data is filtered using the filter_func function provided.
:param patterns: List of strings (regular expression patterns)
:param filter_func: Function to apply to the data read from the child before
attempting to match it against the patterns (should take and
return a string)
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param match_func: Function to compare the output and patterns.
:return: Tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
if not match_func:
match_func = self.match_patterns
fd = self._get_fd("expect")
o = ""
end_time = time.time() + timeout
while True:
try:
r, w, x = select.select([fd], [], [],
max(0, end_time - time.time()))
except (select.error, TypeError):
break
if not r:
raise aexpect.ExpectTimeoutError(patterns, o)
# Read data from child
data = self.read_nonblocking(internal_timeout,
end_time - time.time())
if not data:
break
# Print it if necessary
if print_func:
for line in data.splitlines():
print_func(line)
# Look for patterns
o += data
out = ''
match = match_func(filter_func(o), patterns)
if match is not None:
output = o.splitlines()
# Find the second match in output reverse list, only return
# the content between the last match and the second last match.
# read_nonblocking might include output of last command or help
# info when session initiated,
# e.g.
# When use VirshPersistent initiate a virsh session, an list
# command is send in to test libvirtd status, and the first
# command output will be like:
# Welcome to virsh, the virtualization interactive terminal.
#
# Type: 'help' for help with commands
# 'quit' to quit
#
# virsh # Id Name State
# ----------------------------------------------------
#
# virsh #
# the session help info is included, and the exact output
# should be the content start after first virsh # prompt.
# The list command did no harm here with help info included,
# but sometime other commands get list command output included,
# e.g.
# Running virsh command: net-list --all
# Sending command: net-list --all
# Id Name State
# ----------------------------------------------------
#
# virsh # Name State Autostart Persistent
# ----------------------------------------------------------
# default active yes yes
#
# virsh #
# The list command output is mixed in the net-list command
# output, this will fail to extract network name if use set
# number 2 in list of output splitlines like in function
# virsh.net_state_dict.
for i in reversed(list(range(len(output) - 1))):
if match_func(output[i].strip(), patterns) is not None:
if re.split(patterns[match], output[i])[-1]:
output[i] = re.split(patterns[match],
output[i])[-1]
output_slice = output[i:]
else:
output_slice = output[i + 1:]
for j in range(len(output_slice) - 1):
output_slice[j] = output_slice[j] + '\n'
for k in range(len(output_slice)):
out += output_slice[k]
return match, out
return match, o
# Check if the child has terminated
if utils_misc.wait_for(lambda: not self.is_alive(), 5, 0, 0.1):
raise aexpect.ExpectProcessTerminatedError(patterns,
self.get_status(), o)
else:
# This shouldn't happen
raise aexpect.ExpectError(patterns, o)
# Work around for inconsistent builtin closure local reference problem
# across different versions of python
class VirshClosure(object):
"""
Callable with weak ref. to override ``**dargs`` when calling reference_function
"""
def __init__(self, reference_function, dict_like_instance):
"""
Callable reference_function with weak ref dict_like_instance
"""
if not issubclass(dict_like_instance.__class__, dict):
raise ValueError("dict_like_instance %s must be dict or subclass"
% dict_like_instance.__class__.__name__)
self.reference_function = reference_function
self.dict_like_weakref = weakref.ref(dict_like_instance)
def __call__(self, *args, **dargs):
"""
Call reference_function with dict_like_instance augmented by **dargs
:param args: Passthrough to reference_function
:param dargs: Updates dict_like_instance copy before call
"""
new_dargs = self.dict_like_weakref()
if new_dargs is None:
new_dargs = {}
for key in list(new_dargs.keys()):
if key not in list(dargs.keys()):
dargs[key] = new_dargs[key]
return self.reference_function(*args, **dargs)
class Virsh(VirshBase):
"""
Execute libvirt operations, using a new virsh shell each time.
"""
__slots__ = []
def __init__(self, *args, **dargs):
"""
Initialize Virsh instance with persistent options
:param args: Initial property keys/values
:param dargs: Initial property keys/values
"""
super(Virsh, self).__init__(*args, **dargs)
# Define the instance callables from the contents of this module
# to avoid using class methods and hand-written aliases
for sym, ref in list(globals().items()):
if sym not in NOCLOSE and callable(ref):
# Adding methods, not properties, so avoid special __slots__
# handling. __getattribute__ will still find these.
self.__super_set__(sym, VirshClosure(ref, self))
class VirshPersistent(Virsh):
"""
Execute libvirt operations using persistent virsh session.
"""
__slots__ = ('session_id', 'remote_pwd', 'remote_user', 'uri',
'remote_ip', 'ssh_remote_auth', 'unprivileged_user',
'readonly')
# B/c the auto_close of VirshSession is False, we
# need to manage the ref-count of it manually.
COUNTERS = {}
def __init__(self, *args, **dargs):
super(VirshPersistent, self).__init__(*args, **dargs)
if self.get('session_id') is None:
# set_uri does not call when INITIALIZED = False
# and no session_id passed to super __init__
self.new_session()
# increase the counter of session_id in COUNTERS.
self.counter_increase()
def __del__(self):
"""
Clean up any leftover sessions
"""
self.close_session()
def counter_increase(self):
"""
Method to increase the counter to self.a_id in COUNTERS.
"""
session_id = self.__dict_get__("session_id")
try:
counter = self.__class__.COUNTERS[session_id]
except KeyError as e:
VirshPersistent.COUNTERS[session_id] = 1
return
# increase the counter of session_id.
VirshPersistent.COUNTERS[session_id] += 1
def counter_decrease(self):
"""
Method to decrease the counter to self.a_id in COUNTERS.
If the counter is less than 1, it means there is no more
VirshSession instance referring to the session. So close
this session, and return True.
Else, decrease the counter in COUNTERS and return False.
"""
session_id = self.__dict_get__("session_id")
self.__class__.COUNTERS[session_id] -= 1
counter = self.__class__.COUNTERS[session_id]
if counter <= 0:
# The last reference to this session. Closing it.
session = VirshSession(a_id=session_id)
# try nicely first
session.close()
if session.is_alive():
# Be mean, in case it's hung
session.close(sig=signal.SIGTERM)
del self.__class__.COUNTERS[session_id]
return True
else:
return False
def close_session(self):
"""
If a persistent session exists, close it down.
"""
try:
session_id = self.__dict_get__('session_id')
if session_id:
try:
existing = VirshSession(a_id=session_id)
if existing.is_alive():
self.counter_decrease()
except (aexpect.ShellStatusError,
aexpect.ShellProcessTerminatedError):
# session was already closed
pass # don't check is_alive or update counter
self.__dict_del__("session_id")
except KeyError:
# Allow other exceptions to be raised
pass # session was closed already
def new_session(self):
"""
Open new session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
virsh_exec = self.__dict_get__('virsh_exec')
uri = self.__dict_get__('uri') # Must exist, can be None
readonly = self.__dict_get__('readonly')
try:
remote_user = self.__dict_get__('remote_user')
except KeyError:
remote_user = "root"
try:
remote_pwd = self.__dict_get__('remote_pwd')
except KeyError:
remote_pwd = None
try:
remote_ip = self.__dict_get__('remote_ip')
except KeyError:
remote_ip = None
try:
ssh_remote_auth = self.__dict_get__('ssh_remote_auth')
except KeyError:
ssh_remote_auth = False
try:
unprivileged_user = self.__dict_get__('unprivileged_user')
except KeyError:
unprivileged_user = None
self.close_session()
# Always create new session
new_session = VirshSession(virsh_exec, uri, a_id=None,
remote_ip=remote_ip,
remote_user=remote_user,
remote_pwd=remote_pwd,
ssh_remote_auth=ssh_remote_auth,
unprivileged_user=unprivileged_user,
readonly=readonly)
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
def set_uri(self, uri):
"""
Accessor method for 'uri' property, create new session on change
"""
if not self.INITIALIZED:
# Allow __init__ to call new_session
self.__dict_set__('uri', uri)
else:
# If the uri is changing
if self.__dict_get__('uri') != uri:
self.__dict_set__('uri', uri)
self.new_session()
# otherwise do nothing
class VirshConnectBack(VirshPersistent):
"""
Persistent virsh session connected back from a remote host
"""
__slots__ = ('remote_ip', )
def new_session(self):
"""
Open new remote session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
virsh_exec = self.__dict_get__('virsh_exec')
uri = self.__dict_get__('uri') # Must exist, can be None
remote_ip = self.__dict_get__('remote_ip')
try:
remote_user = self.__dict_get__('remote_user')
except KeyError:
remote_user = 'root'
try:
remote_pwd = self.__dict_get__('remote_pwd')
except KeyError:
remote_pwd = None
super(VirshConnectBack, self).close_session()
new_session = VirshSession(virsh_exec, uri, a_id=None,
remote_ip=remote_ip,
remote_user=remote_user,
remote_pwd=remote_pwd,
ssh_remote_auth=True)
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
@staticmethod
def kosher_args(remote_ip, uri):
"""
Convenience static method to help validate argument sanity before use
:param remote_ip: ip/hostname of remote libvirt helper-system
:param uri: fully qualified libvirt uri of local system, from remote.
:return: True/False if checks pass or not
"""
if remote_ip is None or uri is None:
return False
all_false = [
# remote_ip checks
bool(remote_ip.count("EXAMPLE.COM")),
bool(remote_ip.count("localhost")),
bool(remote_ip.count("127.")),
# uri checks
uri is None,
uri == "",
bool(uri.count("default")),
bool(uri.count(':///')),
bool(uri.count("localhost")),
bool(uri.count("127."))
]
return True not in all_false
# virsh module functions follow (See module docstring for API) #####
def command(cmd, **dargs):
"""
Interface to cmd function as 'cmd' symbol is polluted.
:param cmd: Command line to append to virsh command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
:raise: CmdError if non-zero exit status and ignore_status=False
"""
virsh_exec = dargs.get('virsh_exec', VIRSH_EXEC)
uri = dargs.get('uri', None)
virsh_opt = dargs.get('virsh_opt', '')
debug = dargs.get('debug', False)
# Caller deals with errors
ignore_status = dargs.get('ignore_status', True)
session_id = dargs.get('session_id', None)
readonly = dargs.get('readonly', False)
quiet = dargs.get('quiet', False)
unprivileged_user = dargs.get('unprivileged_user', None)
timeout = dargs.get('timeout', None)
allow_output_check = dargs.get('allow_output_check', None)
# Check if this is a VirshPersistent method call
if session_id:
# Retrieve existing session
session = VirshSession(a_id=session_id)
else:
session = None
if debug:
logging.debug("Running virsh command: %s", cmd)
if timeout:
try:
timeout = int(timeout)
except ValueError:
logging.error("Ignore the invalid timeout value: %s", timeout)
timeout = None
if session:
# Utilize persistent virsh session, not suit for readonly mode
if readonly:
logging.debug("Ignore readonly flag for this virsh session")
if timeout is None:
timeout = 60
ret = session.cmd_result(cmd, ignore_status=ignore_status,
debug=debug, timeout=timeout)
# Mark return value with session it came from
ret.from_session_id = session_id
else:
# Normal call to run virsh command
# Readonly mode
if readonly:
cmd = " -r " + cmd
if quiet:
cmd = " -q " + cmd
if uri:
# uri argument IS being used
uri_arg = " -c '%s' " % uri
else:
uri_arg = " " # No uri argument being used
cmd = "%s%s%s%s" % (virsh_exec, virsh_opt, uri_arg, cmd)
if unprivileged_user:
# Run cmd as unprivileged user
cmd = "su - %s -c '%s'" % (unprivileged_user, cmd)
# Raise exception if ignore_status is False
ret = process.run(cmd, timeout=timeout, verbose=debug,
ignore_status=ignore_status,
allow_output_check=allow_output_check,
shell=True)
# Mark return as not coming from persistent virsh session
ret.from_session_id = None
# Always log debug info, if persistent session or not
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout_text.strip())
logging.debug("stderr: %s", ret.stderr_text.strip())
# Return CmdResult instance when ignore_status is True
return ret
def domname(dom_id_or_uuid, **dargs):
"""
Convert a domain id or UUID to domain name
:param dom_id_or_uuid: a domain id or UUID.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("domname --domain %s" % dom_id_or_uuid, **dargs)
def qemu_monitor_command(name, cmd, options="", **dargs):
"""
This helps to execute the qemu monitor command through virsh command.
:param name: Name of monitor domain
:param cmd: monitor command to execute
:param options: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-monitor-command %s %s --cmd \'%s\'" % (name, options, cmd)
return command(cmd_str, **dargs)
def qemu_agent_command(name, cmd, options="", **dargs):
"""
This helps to execute the qemu agent command through virsh command.
:param name: Name of monitor domain
:param cmd: agent command to execute
:param options: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-agent-command %s %s --cmd \'%s\'" % (name, options, cmd)
return command(cmd_str, **dargs)
def qemu_attach(pid, extra="", **dargs):
"""
This helps to execute the qemu-attach command through virsh command.
:param pid: pid of qemu process
:param extra: extra options
:param dargs: standardized virsh function API keywords
"""
cmd_str = "qemu-attach --pid %s %s" % (pid, extra)
return command(cmd_str, **dargs)
def setvcpus(name, count, extra="", **dargs):
"""
Change the number of virtual CPUs in the guest domain.
:param name: name of vm to affect
:param count: value for vcpu parameter
:param options: any extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object from command
"""
cmd = "setvcpus %s %s %s" % (name, count, extra)
return command(cmd, **dargs)
def setvcpu(name, cpulist, extra="", **dargs):
"""
attach/detach vcpu or groups of threads
:param name: name of vm to affect
:param cpulist: group of vcpu numbers
:param options: any extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object from command
"""
cmd = "setvcpu %s %s %s" % (name, cpulist, extra)
return command(cmd, **dargs)
def guestvcpus(name, cpu_list=None, options=None, **dargs):
"""
Query or modify state of vcpu in the guest (via agent)
:param name: name of domain
:param cpu_list: list of cpus to enable or disable
:param options: --enable, --disable
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "guestvcpus --domain %s" % name
if cpu_list:
cmd += " --cpulist %s" % cpu_list
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def vcpupin(name, vcpu=None, cpu_list=None, options=None, **dargs):
"""
Changes the cpu affinity for respective vcpu.
:param name: name of domain
:param vcpu: virtual CPU to modify
:param cpu_list: physical CPU specification (string)
:param dargs: standardized virsh function API keywords
:param options: --live, --current or --config.
:return: CmdResult object.
"""
cmd = "vcpupin --domain %s" % name
if vcpu is not None:
cmd += " --vcpu %s" % vcpu
if cpu_list is not None:
cmd += " --cpulist %s" % cpu_list
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def vcpuinfo(name, **dargs):
"""
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("vcpuinfo %s" % name, **dargs)
def freecell(cellno=None, options="", **dargs):
"""
Prints the available amount of memory on the machine or within a NUMA cell.
:param cellno: number of cell to show.
:param options: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "freecell "
if cellno:
cmd = "%s --cellno %s " % (cmd, cellno)
cmd = "%s %s" % (cmd, options)
return command(cmd, **dargs)
def nodeinfo(extra="", **dargs):
"""
Returns basic information about the node,like number and type of CPU,
and size of the physical memory.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd_nodeinfo = "nodeinfo %s" % extra
return command(cmd_nodeinfo, **dargs)
def nodecpumap(extra="", **dargs):
"""
Displays the node's total number of CPUs, the number of online
CPUs and the list of online CPUs.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodecpumap %s" % extra
return command(cmd, **dargs)
def nodesuspend(target, duration, extra='', **dargs):
"""
Suspend the host node for a given time duration.
:param target: Suspend target mem/disk/hybrid.
mem(Suspend-to-RAM)
disk(Suspend-to-Disk)
hybrid(Hybrid-Suspend)
:param duration: Suspend duration in seconds, at least 60.
:param extra: extra argument string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodesuspend %s %s" % (target, duration)
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def canonical_uri(option='', **dargs):
"""
Return the hypervisor canonical URI.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
result = command("uri %s" % option, **dargs)
return result.stdout_text.strip()
def hostname(option='', **dargs):
"""
Return the hypervisor hostname.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
result = command("hostname %s" % option, **dargs)
return result.stdout_text.strip()
def version(option='', **dargs):
"""
Return the major version info about what this built from.
:param option: additional option string to pass
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("version %s" % option, **dargs)
def maxvcpus(option='', **dargs):
"""
Return the connection vcpu maximum number.
:param: option: additional option string to pass
:param: dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "maxvcpus %s" % option
return command(cmd, **dargs)
def dom_list(options="", **dargs):
"""
Return the list of domains.
:param options: options to pass to list command
:return: CmdResult object
"""
return command("list %s" % options, **dargs)
def reboot(name, options="", **dargs):
"""
Run a reboot command in the target domain.
:param name: Name of domain.
:param options: options to pass to reboot command
:return: CmdResult object
"""
return command("reboot --domain %s %s" % (name, options), **dargs)
def managedsave(name, options="", **dargs):
"""
Managed save of a domain state.
:param name: Name of domain to save
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave --domain %s %s" % (name, options), **dargs)
def managedsave_remove(name, **dargs):
"""
Remove managed save of a domain
:param name: name of managed-saved domain to remove
:return: CmdResult object
"""
return command("managedsave-remove --domain %s" % name, **dargs)
def managedsave_dumpxml(name, options="", **dargs):
"""
Dump XML of domain information for a managed save state file.
:param name: Name of domain to dump
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-dumpxml --domain %s %s" % (name, options), **dargs)
def managedsave_edit(name, options="", **dargs):
"""
Edit the domain XML associated with the managed save state file.
:param name: Name of domain to edit
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-edit --domain %s %s" % (name, options), **dargs)
def managedsave_define(name, xml_path, options="", **dargs):
"""
Replace the domain XML associated with a managed save state file.
:param name: Name of domain to define
:param xml_path: Path of xml file to be defined
:param options: options to pass to list command
:return: CmdResult object
"""
return command("managedsave-define --domain %s %s %s" % (name, xml_path, options), **dargs)
def driver(**dargs):
"""
Return the driver by asking libvirt
:param dargs: standardized virsh function API keywords
:return: VM driver name
"""
# libvirt schme composed of driver + command
# ref: http://libvirt.org/uri.html
scheme = urllib.parse.urlsplit(canonical_uri(**dargs))[0]
# extract just the driver, whether or not there is a '+'
return scheme.split('+', 2)[0]
def domstate(name, extra="", **dargs):
"""
Return the state about a running domain.
:param name: VM name
:param extra: command options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("domstate %s %s" % (name, extra), **dargs)
def domid(name_or_uuid, **dargs):
"""
Return VM's ID.
:param name_or_uuid: VM name or uuid
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domid %s" % (name_or_uuid), **dargs)
def dominfo(name, **dargs):
"""
Return the VM information.
:param name: VM's name or id,uuid.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dominfo %s" % (name), **dargs)
def domfsinfo(name, **dargs):
"""
Return the info of domain mounted fssystems
:param name: VM's name or uuid.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domfsinfo %s" % (name), **dargs)
def domuuid(name_or_id, **dargs):
"""
Return the Converted domain name or id to the domain UUID.
:param name_or_id: VM name or id
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domuuid %s" % name_or_id, **dargs)
def screenshot(name, filename, **dargs):
"""
Capture a screenshot of VM's console and store it in file on host
:param name: VM name
:param filename: name of host file
:param dargs: standardized virsh function API keywords
:return: filename
"""
# Don't take screenshots of shut-off domains
if is_dead(name, **dargs):
return None
global SCREENSHOT_ERROR_COUNT
dargs['ignore_status'] = False
try:
command("screenshot %s %s" % (name, filename), **dargs)
except process.CmdError as detail:
if SCREENSHOT_ERROR_COUNT < 1:
logging.error("Error taking VM %s screenshot. You might have to "
"set take_regular_screendumps=no on your "
"tests.cfg config file \n%s. This will be the "
"only logged error message.", name, detail)
SCREENSHOT_ERROR_COUNT += 1
return filename
def screenshot_test(name, filename="", options="", **dargs):
"""
Capture a screenshot of VM's console and store it in file on host
:param name: VM name or id
:param filename: name of host file
:param options: command options
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("screenshot %s %s %s" % (name, filename, options), **dargs)
def domblkstat(name, device, option, **dargs):
"""
Store state of VM into named file.
:param name: VM's name.
:param device: VM's device.
:param option: command domblkstat option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domblkstat %s %s %s" % (name, device, option), **dargs)
def domblkthreshold(name, device, threshold, option="", **dargs):
"""
Set the threshold for block-threshold event for a given block device or it's backing chain element.
:param name: VM's name.
:param device: VM's device.
:param threshold: threshold value with unit such as 100M.
:param option: command domblkthreshold option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domblkthreshold %s %s %s %s" % (name, device, threshold, option), **dargs)
def dumpxml(name, extra="", to_file="", **dargs):
"""
Return the domain information as an XML dump.
:param name: VM name
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
cmd = "dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout_text.strip())
result_file.close()
return result
def domifstat(name, interface, **dargs):
"""
Get network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:return: CmdResult object
"""
return command("domifstat %s %s" % (name, interface), **dargs)
def domjobinfo(name, **dargs):
"""
Get domain job information.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("domjobinfo %s" % name, **dargs)
def edit(options, **dargs):
"""
Edit the XML configuration for a domain.
:param options: virsh edit options string.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("edit %s" % options, **dargs)
def dompmsuspend(name, target, duration=0, **dargs):
"""
Suspends a running domain using guest OS's power management.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "dompmsuspend %s %s --duration %s" % (name, target, duration)
return command(cmd, **dargs)
def dompmwakeup(name, **dargs):
"""
Wakeup a domain that was previously suspended by power management.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("dompmwakeup %s" % name, **dargs)
def domjobabort(name, **dargs):
"""
Aborts the currently running domain job.
:param name: VM's name, id or uuid.
:param dargs: standardized virsh function API keywords
:return: result from command
"""
return command("domjobabort %s" % name, **dargs)
def domxml_from_native(info_format, native_file, options=None, **dargs):
"""
Convert native guest configuration format to domain XML format.
:param info_format:The command's options. For exmple:qemu-argv.
:param native_file:Native information file.
:param options:extra param.
:param dargs: standardized virsh function API keywords.
:return: result from command
"""
cmd = "domxml-from-native %s %s %s" % (info_format, native_file, options)
return command(cmd, **dargs)
def domxml_to_native(info_format, name, options, **dargs):
"""
Convert existing domain or its XML config to a native guest configuration format.
:param info_format:The command's options. For example: `qemu-argv`.
:param name: XML file or domain name/UUID.
:param options: --xml or --domain
:param dargs: standardized virsh function API keywords
:return: result from command
"""
cmd = "domxml-to-native %s %s %s" % (info_format, options, name)
return command(cmd, **dargs)
def vncdisplay(name, **dargs):
"""
Output the IP address and port number for the VNC display.
:param name: VM's name or id,uuid.
:param dargs: standardized virsh function API keywords.
:return: result from command
"""
return command("vncdisplay %s" % name, **dargs)
def is_alive(name, **dargs):
"""
Return True if the domain is started/alive.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
return not is_dead(name, **dargs)
def is_dead(name, **dargs):
"""
Return True if the domain is undefined or not started/dead.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
dargs['ignore_status'] = False
try:
state = domstate(name, **dargs).stdout_text.strip()
except process.CmdError:
return True
if state not in ('running', 'idle', 'paused', 'in shutdown', 'shut off',
'crashed', 'pmsuspended', 'no state'):
logging.debug("State '%s' not known", state)
if state in ('shut off', 'crashed', 'no state'):
return True
return False
def suspend(name, **dargs):
"""
True on successful suspend of VM - kept in memory and not scheduled.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("suspend %s" % (name), **dargs)
def resume(name, **dargs):
"""
True on successful moving domain out of suspend
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("resume %s" % (name), **dargs)
def dommemstat(name, extra="", **dargs):
"""
Store state of VM into named file.
:param name: VM name
:param extra: extra options to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dommemstat %s %s" % (name, extra), **dargs)
def dump(name, path, option="", **dargs):
"""
Dump the core of a domain to a file for analysis.
:param name: VM name
:param path: absolute path to state file
:param option: command's option.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("dump %s %s %s" % (name, path, option), **dargs)
def save(name, path, options="", **dargs):
"""
Store state of VM into named file.
:param name: VM'name, id or uuid.
:param path: absolute path to state file
:param options: command's options.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("save %s %s %s" % (name, path, options), **dargs)
def restore(path, options="", **dargs):
"""
Load state of VM from named file and remove file.
:param path: absolute path to state file.
:param options: options for virsh restore.
:param dargs: standardized virsh function API keywords
"""
return command("restore %s %s" % (path, options), **dargs)
def start(name, options="", **dargs):
"""
True on successful start of (previously defined) inactive domain.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
return command("start %s %s" % (name, options), **dargs)
def shutdown(name, options="", **dargs):
"""
True on successful domain shutdown.
:param name: VM name
:param options: options for virsh shutdown.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("shutdown %s %s" % (name, options), **dargs)
def destroy(name, options="", **dargs):
"""
True on successful domain destruction
:param name: VM name
:param options: options for virsh destroy
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("destroy %s %s" % (name, options), **dargs)
def define(xml_path, options=None, **dargs):
"""
Return cmd result of domain define.
:param xml_path: XML file path
:param options: options for virsh define
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "define --file %s" % xml_path
if options is not None:
cmd += " %s" % options
logging.debug("Define VM from %s", xml_path)
return command(cmd, **dargs)
def undefine(name, options=None, **dargs):
"""
Return cmd result of domain undefine (after shutdown/destroy).
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "undefine %s" % name
if options is not None:
cmd += " %s" % options
logging.debug("Undefine VM %s", name)
return command(cmd, **dargs)
def remove_domain(name, options=None, **dargs):
"""
Return True after forcefully removing a domain if it exists.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
if domain_exists(name, **dargs):
if is_alive(name, **dargs):
destroy(name, **dargs)
try:
dargs['ignore_status'] = False
undefine(name, options, **dargs)
except process.CmdError as detail:
logging.error("Undefine VM %s failed:\n%s", name, detail)
return False
return True
def domain_exists(name, **dargs):
"""
Return True if a domain exits.
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: True operation was successful
"""
dargs['ignore_status'] = False
try:
command("domstate %s" % name, **dargs)
return True
except process.CmdError as detail:
logging.warning("VM %s does not exist", name)
if dargs.get('debug', False):
logging.warning(str(detail))
return False
def migrate_postcopy(name, **dargs):
"""
Trigger postcopy migration
:param name: VM name
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "migrate-postcopy %s" % name
return command(cmd, **dargs)
def migrate(name="", dest_uri="", option="", extra="", **dargs):
"""
Migrate a guest to another host.
:param name: name of guest on uri.
:param dest_uri: libvirt uri to send guest to
:param option: Free-form string of options to virsh migrate
:param extra: Free-form string of options to follow <domain> <desturi>
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "migrate"
if option:
cmd += " %s" % option
if name:
cmd += " --domain %s" % name
if dest_uri:
cmd += " --desturi %s" % dest_uri
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_setspeed(domain, bandwidth, extra=None, **dargs):
"""
Set the maximum migration bandwidth (in MiB/s) for
a domain which is being migrated to another host.
:param domain: name/uuid/id of guest
:param bandwidth: migration bandwidth limit in MiB/s
:param dargs: standardized virsh function API keywords
"""
cmd = "migrate-setspeed %s %s" % (domain, bandwidth)
if extra is not None:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_getspeed(domain, extra="", **dargs):
"""
Get the maximum migration bandwidth (in MiB/s) for
a domain.
:param domain: name/uuid/id of guest
:param extra: extra options to migrate-getspeed
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "migrate-getspeed %s" % domain
if extra:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_setmaxdowntime(domain, downtime, extra=None, **dargs):
"""
Set maximum tolerable downtime of a domain (in ms)
which is being live-migrated to another host.
:param domain: name/uuid/id of guest
:param downtime: downtime number of live migration
"""
cmd = "migrate-setmaxdowntime %s %s" % (domain, downtime)
if extra is not None:
cmd += " %s" % extra
return command(cmd, **dargs)
def migrate_getmaxdowntime(domain, **dargs):
"""
Get maximum tolerable downtime of a domain.
:param domain: name/uuid/id of guest
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "migrate-getmaxdowntime %s" % domain
return command(cmd, **dargs)
def migrate_compcache(domain, size=None, **dargs):
"""
Get/set compression cache size for migration.
:param domain: name/uuid/id of guest
:param size: compression cache size to be set.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = 'migrate-compcache %s' % domain
if size is not None:
cmd += ' --size %s' % size
return command(cmd, **dargs)
def _adu_device(action, domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, **dargs):
"""
Private helper for attach, detach, update device commands
"""
# N/B: Parameter order is significant: RH BZ 1018369
cmd = action
if domain_opt is not None:
cmd += " --domain %s" % domain_opt
if domainarg is not None:
cmd += " %s" % domainarg
if file_opt is not None:
cmd += " --file %s" % file_opt
if filearg is not None:
cmd += " %s" % filearg
if flagstr is not None:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def attach_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, **dargs):
"""
Attach a device using full parameter/argument set.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return _adu_device("attach-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
def detach_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr=None, wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a device using full parameter/argument set.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
detach_cmd_rv = _adu_device("detach-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
if wait_remove_event:
event(domain=domainarg, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def update_device(domainarg=None, filearg=None,
domain_opt=None, file_opt=None,
flagstr="", **dargs):
"""
Update device from an XML <file>.
:param domainarg: Domain name (first pos. parameter)
:param filearg: File name (second pos. parameter)
:param domain_opt: Option to --domain parameter
:param file_opt: Option to --file parameter
:param flagstr: string of "--force, --persistent, etc."
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return _adu_device("update-device", domainarg=domainarg, filearg=filearg,
domain_opt=domain_opt, file_opt=file_opt,
flagstr=flagstr, **dargs)
def attach_disk(name, source, target, extra="", **dargs):
"""
Attach a disk to VM.
:param name: name of guest
:param source: source of disk device
:param target: target of disk device
:param extra: additional arguments to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "attach-disk --domain %s --source %s --target %s %s"\
% (name, source, target, extra)
return command(cmd, **dargs)
def detach_disk(name, target, extra="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a disk from VM.
:param name: name of guest
:param target: target of disk device
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-disk --domain %s --target %s %s" % (name, target, extra)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def detach_device_alias(name, alias, extra="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a device with alias
:param name: name of guest
:param alias: alias of device
:param extra: additional arguments to command
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-device-alias --domain %s --alias %s %s" % (name, alias, extra)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def attach_interface(name, option="", **dargs):
"""
Attach a NIC to VM.
:param name: name of guest
:param option: options to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "attach-interface "
if name:
cmd += "--domain %s" % name
if option:
cmd += " %s" % option
return command(cmd, **dargs)
def detach_interface(name, option="", wait_remove_event=False, event_timeout=7, **dargs):
"""
Detach a NIC to VM.
:param name: name of guest
:param option: options to pass to command
:param wait_remove_event: wait until device_remove event comes
:param event_timeout: timeout for virsh event command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
detach_cmd = "detach-interface --domain %s %s" % (name, option)
detach_cmd_rv = command(detach_cmd, **dargs)
if wait_remove_event:
event(domain=name, event='device-removed', event_timeout=event_timeout, **dargs)
return detach_cmd_rv
def net_dumpxml(name, extra="", to_file="", **dargs):
"""
Dump XML from network named param name.
:param name: Name of a network
:param extra: Extra parameters to pass to command
:param to_file: Send result to a file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "net-dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def net_create(xml_file, extra="", **dargs):
"""
Create _transient_ network from a XML file.
:param xml_file: xml defining network
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-create %s %s" % (xml_file, extra), **dargs)
def net_define(xml_file, extra="", **dargs):
"""
Define network from a XML file, do not start
:param xml_file: xml defining network
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-define %s %s" % (xml_file, extra), **dargs)
def net_list(options, extra="", **dargs):
"""
List networks on host.
:param options: options to pass to command
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-list %s %s" % (options, extra), **dargs)
def net_state_dict(only_names=False, virsh_instance=None, **dargs):
"""
Return network name to state/autostart/persistent mapping
:param only_names: When true, return network names as keys and None values
:param virsh_instance: Call net_list() on this instance instead of module
:param dargs: standardized virsh function API keywords
:return: dictionary
"""
# Using multiple virsh commands in different ways
dargs['ignore_status'] = False # force problem detection
if virsh_instance is not None:
net_list_result = virsh_instance.net_list("--all", **dargs)
else:
net_list_result = net_list("--all", **dargs)
# If command failed, exception would be raised here
netlist = net_list_result.stdout_text.strip().splitlines()
# First two lines contain table header followed by entries
# for each network on the host, such as:
#
# Name State Autostart Persistent
# ----------------------------------------------------------
# default active yes yes
#
# TODO: Double-check first-two lines really are header
netlist = netlist[2:]
result = {}
for line in netlist:
# Split on whitespace, assume 3 columns
linesplit = line.split(None, 3)
name = linesplit[0]
# Several callers in libvirt_xml only require defined names
if only_names:
result[name] = None
continue
# Keep search fast & avoid first-letter capital problems
active = not bool(linesplit[1].count("nactive"))
autostart = bool(linesplit[2].count("es"))
if len(linesplit) == 4:
persistent = bool(linesplit[3].count("es"))
else:
# There is no representation of persistent status in output
# in older libvirt. When libvirt older than 0.10.2 no longer
# supported, this block can be safely removed.
try:
# Rely on net_autostart will raise() if not persistent state
if autostart: # Enabled, try enabling again
# dargs['ignore_status'] already False
if virsh_instance is not None:
virsh_instance.net_autostart(name, **dargs)
else:
net_autostart(name, **dargs)
else: # Disabled, try disabling again
if virsh_instance is not None:
virsh_instance.net_autostart(
name, "--disable", **dargs)
else:
net_autostart(name, "--disable", **dargs)
# no exception raised, must be persistent
persistent = True
except process.CmdError as detail:
# Exception thrown, could be transient or real problem
if bool(str(detail.result).count("ransient")):
persistent = False
else: # A unexpected problem happened, re-raise it.
raise
# Warning: These key names are used by libvirt_xml and test modules!
result[name] = {'active': active,
'autostart': autostart,
'persistent': persistent}
return result
def net_start(network, extra="", **dargs):
"""
Start network on host.
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-start %s %s" % (network, extra), **dargs)
def net_destroy(network, extra="", **dargs):
"""
Destroy (stop) an activated network on host.
:param network: name/parameter for network option/argument
:param extra: extra string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-destroy %s %s" % (network, extra), **dargs)
def net_undefine(network, extra="", **dargs):
"""
Undefine a defined network on host.
:param network: name/parameter for network option/argument
:param extra: extra string to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-undefine %s %s" % (network, extra), **dargs)
def net_name(uuid, extra="", **dargs):
"""
Get network name on host.
:param uuid: network UUID.
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-name %s %s" % (uuid, extra), **dargs)
def net_uuid(network, extra="", **dargs):
"""
Get network UUID on host.
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-uuid %s %s" % (network, extra), **dargs)
def net_autostart(network, extra="", **dargs):
"""
Set/unset a network to autostart on host boot
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command (e.g. --disable)
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("net-autostart %s %s" % (network, extra), **dargs)
def net_info(network, extra="", **dargs):
"""
Get network information
:param network: name/parameter for network option/argument
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("net-info %s %s" % (network, extra), **dargs)
def net_update(network, update_cmd, section, xml, extra="", **dargs):
"""
Update parts of an existing network's configuration
:param network: network name or uuid
:param update_cmd: type of update (add-first, add-last, delete, or modify)
:param section: which section of network configuration to update
:param xml: name of file containing xml
:param extra: extra parameters to pass to command.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-update %s %s %s %s %s" \
% (network, update_cmd, section, xml, extra)
return command(cmd, **dargs)
def _pool_type_check(pool_type):
"""
check if the pool_type is supported or not
:param pool_type: pool type
:return: valid pool type or None
"""
valid_types = ['dir', 'fs', 'netfs', 'disk', 'iscsi', 'logical',
'gluster', 'rbd', 'scsi', 'iscsi-direct']
if pool_type and pool_type not in valid_types:
logging.error("Specified pool type '%s' not in '%s'",
pool_type, valid_types)
pool_type = None
elif not pool_type:
# take the first element as default pool_type
pool_type = valid_types[0]
return pool_type
def pool_info(name, **dargs):
"""
Returns basic information about the storage pool.
:param name: name of pool
:param dargs: standardized virsh function API keywords
"""
cmd = "pool-info %s" % name
return command(cmd, **dargs)
def pool_destroy(name, **dargs):
"""
Forcefully stop a given pool.
:param name: name of pool
:param dargs: standardized virsh function API keywords
"""
cmd = "pool-destroy %s" % name
dargs['ignore_status'] = False
try:
command(cmd, **dargs)
return True
except process.CmdError as detail:
logging.error("Failed to destroy pool: %s.", detail)
return False
def pool_create(xml_file, extra="", **dargs):
"""
Create a pool from an xml file.
:param xml_file: file containing an XML pool description
:param extra: extra parameters to pass to command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-create %s %s" % (extra, xml_file), **dargs)
def pool_create_as(name, pool_type, target, extra="", **dargs):
"""
Create a pool from a set of args.
:param name: name of pool
:param pool_type: storage pool type such as 'dir'
:param target: libvirt uri to send guest to
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool creation command was successful
"""
if not name:
logging.error("Please give a pool name")
pool_type = _pool_type_check(pool_type)
if pool_type is None:
return False
logging.info("Create %s type pool %s", pool_type, name)
cmd = "pool-create-as --name %s --type %s --target %s %s" \
% (name, pool_type, target, extra)
dargs['ignore_status'] = False
try:
command(cmd, **dargs)
return True
except process.CmdError as detail:
logging.error("Failed to create pool: %s.", detail)
return False
def pool_list(option="", extra="", **dargs):
"""
Prints the pool information of Host.
:param option: options given to command
all
gives all pool details, including inactive
inactive
gives only inactive pool details
details
Gives the complete details about the pools
:param extra: to provide extra options(to enter invalid options)
"""
return command("pool-list %s %s" % (option, extra), **dargs)
def pool_uuid(name, **dargs):
"""
Convert a pool name to pool UUID
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-uuid %s" % name, **dargs)
def pool_name(uuid, **dargs):
"""
Convert a pool UUID to pool name
:param name: UUID of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-name %s" % uuid, **dargs)
def pool_refresh(name, **dargs):
"""
Refresh a pool
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-refresh %s" % name, **dargs)
def pool_delete(name, **dargs):
"""
Delete the resources used by a given pool object
:param name: Name of the pool
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("pool-delete %s" % name, **dargs)
def pool_state_dict(only_names=False, **dargs):
"""
Return pool name to state/autostart mapping
:param only_names: When true, return pool names as keys and None values
:param dargs: standardized virsh function API keywords
:return: dictionary
"""
# Using multiple virsh commands in different ways
dargs['ignore_status'] = False # force problem detection
pool_list_result = pool_list("--all", **dargs)
# If command failed, exception would be raised here
poollist = pool_list_result.stdout_text.strip().splitlines()
# First two lines contain table header followed by entries
# for each pool on the host, such as:
#
# Name State Autostart
# -------------------------------------------
# default active yes
# iscsi-net-pool active yes
#
# TODO: Double-check first-two lines really are header
poollist = poollist[2:]
result = {}
for line in poollist:
# Split on whitespace, assume 3 columns
linesplit = line.split(None, 3)
name = linesplit[0]
# Several callers in libvirt_xml only require defined names
# TODO: Copied from net_state_dict where this is true, but
# as of writing only caller is virsh_pool_create test
# which doesn't use this 'feature'.
if only_names:
result[name] = None
continue
# Keep search fast & avoid first-letter capital problems
active = not bool(linesplit[1].count("nactive"))
autostart = bool(linesplit[2].count("es"))
# Warning: These key names are used by libvirt_xml and test modules!
result[name] = {'active': active,
'autostart': autostart}
return result
def pool_define_as(name, pool_type, target="", extra="", **dargs):
"""
Define the pool from the arguments
:param name: Name of the pool to be defined
:param pool_type: Type of the pool to be defined
dir
file system directory
disk
Physical Disk Device
fs
Pre-formatted Block Device
netfs
Network Exported Directory
iscsi
iSCSI Target
logical
LVM Volume Group
mpath
Multipath Device Enumerater
scsi
SCSI Host Adapter
rbd
Rados Block Device
:param target: libvirt uri to send guest to
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool define command was successful
"""
pool_type = _pool_type_check(pool_type)
if pool_type is None:
return False
logging.debug("Try to define %s type pool %s", pool_type, name)
cmd = "pool-define-as --name %s --type %s %s" \
% (name, pool_type, extra)
# Target is not a must
if target:
cmd += " --target %s" % target
return command(cmd, **dargs)
def pool_start(name, extra="", **dargs):
"""
Start the defined pool
:param name: Name of the pool to be started
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool start command was successful
"""
return command("pool-start %s %s" % (name, extra), **dargs)
def pool_autostart(name, extra="", **dargs):
"""
Mark for autostart of a pool
:param name: Name of the pool to be mark for autostart
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool autostart command was successful
"""
return command("pool-autostart %s %s" % (name, extra), **dargs)
def pool_edit(name, **dargs):
"""
Edit XML configuration for a storage pool.
:param name: pool name or uuid
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pool-edit %s" % name
return command(cmd, **dargs)
def pool_undefine(name, extra="", **dargs):
"""
Undefine the given pool
:param name: Name of the pool to be undefined
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool undefine command was successful
"""
return command("pool-undefine %s %s" % (name, extra), **dargs)
def pool_build(name, options="", **dargs):
"""
Build pool.
:param name: Name of the pool to be built
:param options: options for pool-build
"""
return command("pool-build %s %s" % (name, options), **dargs)
def find_storage_pool_sources_as(source_type, options="", **dargs):
"""
Find potential storage pool sources
:param source_type: type of storage pool sources to find
:param options: cmd options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("find-storage-pool-sources-as %s %s"
% (source_type, options), **dargs)
def find_storage_pool_sources(source_type, srcSpec, **dargs):
"""
Find potential storage pool sources
:param source_type: type of storage pool sources to find
:param srcSpec: file of source xml to qurey for pools
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("find-storage-pool-sources %s %s"
% (source_type, srcSpec), **dargs)
def pool_dumpxml(name, extra="", to_file="", **dargs):
"""
Return the pool information as an XML dump.
:param name: pool_name name
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "pool-dumpxml %s %s" % (name, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Virsh dumpxml returned non-zero exit status")
return result.stdout_text.strip()
def pool_define(xml_path, **dargs):
"""
To create the pool from xml file.
:param xml_path: XML file path
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pool-define --file %s" % xml_path
return command(cmd, **dargs)
def vol_create(pool_name, xml_file, extra="", **dargs):
"""
To create the volumes from xml file.
:param pool_name: Name of the pool to be used
:param xml_file: file containing an XML vol description
:param extra: string of extra options
:return: CmdResult object
"""
cmd = "vol-create --pool %s --file %s %s" % (pool_name, xml_file, extra)
return command(cmd, **dargs)
def vol_create_as(volume_name, pool_name, capacity,
allocation, frmt, extra="", **dargs):
"""
To create the volumes on different available pool
:param name: Name of the volume to be created
:param pool_name: Name of the pool to be used
:param capacity: Size of the volume
:param allocaltion: Size of the volume to be pre-allocated
:param frmt: volume formats(e.g. raw, qed, qcow2)
:param extra: Free-form string of options
:param dargs: standardized virsh function API keywords
:return: True if pool undefine command was successful
"""
cmd = "vol-create-as --pool %s" % pool_name
cmd += " %s --capacity %s" % (volume_name, capacity)
if allocation:
cmd += " --allocation %s" % (allocation)
if frmt:
cmd += " --format %s" % (frmt)
if extra:
cmd += " %s" % (extra)
return command(cmd, **dargs)
def vol_create_from(pool_name, vol_file, input_vol, input_pool, extra="",
**dargs):
"""
Create a vol, using another volume as input
:param: pool_name: Name of the pool to create the volume in
:param: vol_file: XML <file> with the volume definition
:param: input_vol: Name of the source volume
:param: input_pool: Name of the pool the source volume is in
:param: extra: Free-form string of options
:return: True if volume create successfully
"""
cmd = ("vol-create-from --pool %s --file %s --vol %s --inputpool %s" %
(pool_name, vol_file, input_vol, input_pool))
if extra:
cmd += " %s" % (extra)
return command(cmd, **dargs)
def vol_list(pool_name, extra="", **dargs):
"""
List the volumes for a given pool
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-list %s %s" % (pool_name, extra), **dargs)
def vol_delete(volume_name, pool_name, extra="", **dargs):
"""
Delete a given volume
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-delete %s %s %s" %
(volume_name, pool_name, extra), **dargs)
def vol_key(volume_name, pool_name, extra="", **drags):
"""
Prints the key of the given volume name
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-key --vol %s --pool %s %s" %
(volume_name, pool_name, extra), **drags)
def vol_info(volume_name, pool_name, extra="", **drags):
"""
Prints the given volume info
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
cmd = "vol-info --vol %s" % volume_name
if pool_name:
cmd += " --pool %s" % pool_name
if extra:
cmd += " %s" % extra
return command(cmd, **drags)
def vol_name(volume_key, extra="", **drags):
"""
Prints the given volume name
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-name --vol %s %s" % (volume_key, extra), **drags)
def vol_path(volume_name, pool_name, extra="", **dargs):
"""
Prints the give volume path
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-path --vol %s --pool %s %s" %
(volume_name, pool_name, extra), **dargs)
def vol_dumpxml(volume_name, pool_name, to_file=None, options="", **dargs):
"""
Dumps volume details in xml
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param to_file: path of the file to store the output
:param options: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
cmd = ('vol-dumpxml --vol %s --pool %s %s' %
(volume_name, pool_name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def vol_pool(volume_name, extra="", **dargs):
"""
Returns pool name for a given vol-key
:param volume_name: Name of the volume
:param extra: Free-form string options
:param dargs: standardized virsh function API keywords
:return: returns the output of the command
"""
return command("vol-pool %s %s" % (volume_name, extra), **dargs)
def vol_clone(volume_name, new_name, pool_name="", extra="", **dargs):
"""
Clone an existing volume.
:param volume_name: Name of the original volume
:param new_name: Clone name
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-clone --vol %s --newname %s %s" % (volume_name, new_name, extra)
if pool_name:
cmd += " --pool %s" % pool_name
return command(cmd, **dargs)
def vol_wipe(volume_name, pool_name="", alg="", **dargs):
"""
Ensure data previously on a volume is not accessible to future reads.
:param volume_name: Name of the volume
:param pool_name: Name of the pool
:param alg: Perform selected wiping algorithm
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-wipe --vol %s" % volume_name
if pool_name:
cmd += " --pool %s" % pool_name
if alg:
cmd += " --algorithm %s" % alg
return command(cmd, **dargs)
def vol_resize(volume_name, capacity, pool_name="", extra="", **dargs):
"""
Resizes a storage volume.
:param volume_name: Name of the volume
:param capacity: New capacity for the volume (default bytes)
:param pool_name: Name of the pool
:param extra: Free-form string options
:param dargs: Standardized virsh function API keywords
:return: Returns the output of the command
"""
cmd = "vol-resize --vol %s --capacity %s " % (volume_name, capacity)
if pool_name:
cmd += " --pool %s " % pool_name
if extra:
cmd += extra
return command(cmd, **dargs)
def capabilities(option='', to_file=None, **dargs):
"""
Return output from virsh capabilities command
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
cmd_result = command('capabilities %s' % option, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(cmd_result.stdout.strip())
result_file.close()
return cmd_result.stdout_text.strip()
def nodecpustats(option='', **dargs):
"""
Returns basic information about the node CPU statistics
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
cmd_nodecpustat = "nodecpustats %s" % option
return command(cmd_nodecpustat, **dargs)
def nodememstats(option='', **dargs):
"""
Returns basic information about the node Memory statistics
:param option: additional options (takes none)
:param dargs: standardized virsh function API keywords
"""
return command('nodememstats %s' % option, **dargs)
def memtune_set(name, options, **dargs):
"""
Set the memory controller parameters
:param domname: VM Name
:param options: contains the values limit, state and value
"""
return command("memtune %s %s" % (name, options), **dargs)
def memtune_list(name, **dargs):
"""
List the memory controller value of a given domain
:param domname: VM Name
"""
return command("memtune %s" % (name), **dargs)
def memtune_get(name, key):
"""
Get the specific memory controller value
:param domname: VM Name
:param key: memory controller limit for which the value needed
:return: the memory value of a key in Kbs
"""
memtune_output = memtune_list(name).stdout.strip()
logging.info("memtune output is %s" % memtune_output)
memtune_value = re.findall(r"%s\s*:\s+(\S+)" % key, str(memtune_output))
if memtune_value:
return int(memtune_value[0] if memtune_value[0] != "unlimited" else -1)
else:
return -1
def help_command(options='', cache=False, **dargs):
"""
Return list of commands and groups in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of command and group names
"""
# Combine virsh command list and virsh group list.
virsh_command_list = help_command_only(options, cache, **dargs)
virsh_group_list = help_command_group(options, cache, **dargs)
virsh_command_group = None
virsh_command_group = virsh_command_list + virsh_group_list
return virsh_command_group
def help_command_only(options='', cache=False, **dargs):
"""
Return list of commands in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of command names
"""
# global needed to support this function's use in Virsh method closure
global VIRSH_COMMAND_CACHE
if not VIRSH_COMMAND_CACHE or cache is False:
VIRSH_COMMAND_CACHE = []
regx_command_word = re.compile(r"\s+([a-z0-9-]+)\s+")
result = help(options, **dargs)
for line in result.stdout_text.strip().splitlines():
# Get rid of 'keyword' line
if line.find("keyword") != -1:
continue
mobj_command_word = regx_command_word.search(line)
if mobj_command_word:
VIRSH_COMMAND_CACHE.append(mobj_command_word.group(1))
# Prevent accidental modification of cache itself
return list(VIRSH_COMMAND_CACHE)
def help_command_group(options='', cache=False, **dargs):
"""
Return list of groups in help command output
:param options: additional options to pass to help command
:param cache: Return cached result if True, or refreshed cache if False
:param dargs: standardized virsh function API keywords
:return: List of group names
"""
# global needed to support this function's use in Virsh method closure
global VIRSH_COMMAND_GROUP_CACHE, VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL
if VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL:
return []
if not VIRSH_COMMAND_GROUP_CACHE or cache is False:
VIRSH_COMMAND_GROUP_CACHE = []
regx_group_word = re.compile(r"[\']([a-zA-Z0-9]+)[\']")
result = help(options, **dargs)
for line in result.stdout_text.strip().splitlines():
# 'keyword' only exists in group line.
if line.find("keyword") != -1:
mojb_group_word = regx_group_word.search(line)
if mojb_group_word:
VIRSH_COMMAND_GROUP_CACHE.append(mojb_group_word.group(1))
if len(list(VIRSH_COMMAND_GROUP_CACHE)) == 0:
VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL = True
# Prevent accidental modification of cache itself
return list(VIRSH_COMMAND_GROUP_CACHE)
def has_help_command(virsh_cmd, options='', **dargs):
"""
String match on virsh command in help output command list
:param virsh_cmd: Name of virsh command or group to look for
:param options: Additional options to send to help command
:param dargs: standardized virsh function API keywords
:return: True/False
"""
return bool(help_command_only(options, cache=True,
**dargs).count(virsh_cmd))
def has_command_help_match(virsh_cmd, regex, **dargs):
"""
Regex search on subcommand help output
:param virsh_cmd: Name of virsh command or group to match help output
:param regex: regular expression string to match
:param dargs: standardized virsh function API keywords
:return: re match object
"""
result = help(virsh_cmd, **dargs)
command_help_output = result.stdout_text.strip()
return re.search(regex, command_help_output)
def help(virsh_cmd='', **dargs):
"""
Prints global help, command specific help, or help for a
group of related commands
:param virsh_cmd: Name of virsh command or group
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("help %s" % virsh_cmd, **dargs)
def schedinfo(domain, options="", **dargs):
"""
Show/Set scheduler parameters.
:param domain: vm's name id or uuid.
:param options: additional options.
:param dargs: standardized virsh function API keywords
"""
cmd = "schedinfo %s %s" % (domain, options)
return command(cmd, **dargs)
def setmem(domainarg=None, sizearg=None, domain=None,
size=None, use_kilobytes=False, flagstr="", **dargs):
"""
Change the current memory allocation in the guest domain.
:param domainarg: Domain name (first pos. parameter)
:param sizearg: Memory size in KiB (second. pos. parameter)
:param domain: Option to --domain parameter
:param size: Option to --size or --kilobytes parameter
:param use_kilobytes: True for --kilobytes, False for --size
:param dargs: standardized virsh function API keywords
:param flagstr: string of "--config, --live, --current, etc."
:return: CmdResult instance
:raise: process.CmdError: if libvirtd is not running
"""
cmd = "setmem"
if domainarg is not None: # Allow testing of ""
cmd += " %s" % domainarg
if domain is not None: # Allow testing of --domain ""
cmd += " --domain %s" % domain
if sizearg is not None: # Allow testing of 0 and ""
cmd += " %s" % sizearg
if size is not None: # Allow testing of --size "" or --size 0
if use_kilobytes:
cmd += " --kilobytes %s" % size
else:
cmd += " --size %s" % size
if len(flagstr) > 0:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def setmaxmem(domainarg=None, sizearg=None, domain=None,
size=None, use_kilobytes=False, flagstr="", **dargs):
"""
Change the maximum memory allocation for the guest domain.
:param domainarg: Domain name (first pos. parameter)
:param sizearg: Memory size in KiB (second. pos. parameter)
:param domain: Option to --domain parameter
:param size: Option to --size or --kilobytes parameter
:param use_kilobytes: True for --kilobytes, False for --size
:param flagstr: string of "--config, --live, --current, etc."
:return: CmdResult instance
:raise: process.CmdError: if libvirtd is not running.
"""
cmd = "setmaxmem"
if domainarg is not None: # Allow testing of ""
cmd += " %s" % domainarg
if sizearg is not None: # Allow testing of 0 and ""
cmd += " %s" % sizearg
if domain is not None: # Allow testing of --domain ""
cmd += " --domain %s" % domain
if size is not None: # Allow testing of --size "" or --size 0
if use_kilobytes:
cmd += " --kilobytes %s" % size
else:
cmd += " --size %s" % size
if len(flagstr) > 0:
cmd += " %s" % flagstr
return command(cmd, **dargs)
def set_user_password(domain=None, user=None, password=None,
encrypted=False, option=True, **dargs):
"""
Set the user password inside the domain
:param domain: Option to --domain parameter
:param user: Option to --user parameter
:param password: Option to --password
:param encrypted: True for --encrypted
:param option: True for --domain/user/password
:return: CmdResult instance
"""
cmd = "set-user-password"
if option:
if domain:
cmd += " --domain %s" % domain
if user:
cmd += " --user %s" % user
if password:
cmd += " --password %s" % password
else:
if domain:
cmd += " %s" % domain
if user:
cmd += " %s" % user
if password:
cmd += " %s" % password
if encrypted:
cmd += " --encrypted"
return command(cmd, **dargs)
def snapshot_create(name, options="", **dargs):
"""
Create snapshot of domain.
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
cmd = "snapshot-create %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_edit(name, options="", **dargs):
"""
Edit snapshot xml
:param name: name of domain
:param options: options of snapshot-edit command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "snapshot-edit %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_create_as(name, options="", **dargs):
"""
Create snapshot of domain with options.
:param name: name of domain
:param options: options of snapshot-create-as
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
# CmdResult is handled here, force ignore_status
cmd = "snapshot-create-as %s" % name
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def snapshot_parent(name, options, **dargs):
"""
Get name of snapshot parent
:param name: name of domain
:param options: options of snapshot-parent
:param dargs: standardized virsh function API keywords
:return: name of snapshot
"""
cmd = "snapshot-parent %s %s" % (name, options)
return command(cmd, **dargs)
def snapshot_current(name, options="--name", **dargs):
"""
Get name or xml of current snapshot.
:param name: name of domain
:param options: options of snapshot-current, default is --name
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "snapshot-current %s" % name
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def snapshot_list(name, options=None, **dargs):
"""
Get list of snapshots of domain.
:param name: name of domain
:param options: options of snapshot_list
:param dargs: standardized virsh function API keywords
:return: list of snapshot names
"""
# CmdResult is handled here, force ignore_status
dargs['ignore_status'] = True
ret = []
cmd = "snapshot-list %s" % name
if options is not None:
cmd += " %s" % options
sc_output = command(cmd, **dargs)
if sc_output.exit_status != 0:
raise process.CmdError(
cmd, sc_output, "Failed to get list of snapshots")
data = re.findall("\S* *\d*-\d*-\d* \d*:\d*:\d* [+-]\d* \w*",
sc_output.stdout_text)
for rec in data:
if not rec:
continue
ret.append(re.match("\S*", rec).group())
return ret
def snapshot_dumpxml(name, snapshot, options=None, to_file=None, **dargs):
"""
Get dumpxml of snapshot
:param name: name of domain
:param snapshot: name of snapshot
:param options: options of snapshot_list
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
cmd = "snapshot-dumpxml %s %s" % (name, snapshot)
if options is not None:
cmd += " %s" % options
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def snapshot_info(name, snapshot, **dargs):
"""
Check snapshot information.
:param name: name of domain
:param snapshot: name os snapshot to verify
:param dargs: standardized virsh function API keywords
:return: snapshot information dictionary
"""
# CmdResult is handled here, force ignore_status
dargs['ignore_status'] = True
ret = {}
values = ["Name", "Domain", "Current", "State", "Parent",
"Children", "Descendants", "Metadata"]
cmd = "snapshot-info %s %s" % (name, snapshot)
sc_output = command(cmd, **dargs)
if sc_output.exit_status != 0:
raise process.CmdError(cmd, sc_output, "Failed to get snapshot info")
for val in values:
data = re.search("(?<=%s:) *(\w.*|\w*)" % val,
sc_output.stdout_text)
if data is None:
continue
ret[val] = data.group(0).strip()
if ret["Parent"] == "":
ret["Parent"] = None
return ret
def snapshot_revert(name, snapshot, options="", **dargs):
"""
Revert domain state to saved snapshot.
:param name: name of domain
:param dargs: standardized virsh function API keywords
:param snapshot: snapshot to revert to
:return: CmdResult instance
"""
cmd = "snapshot-revert %s %s %s" % (name, snapshot, options)
return command(cmd, **dargs)
def snapshot_delete(name, snapshot, options='', **dargs):
"""
Remove domain snapshot
:param name: name of domain
:param dargs: standardized virsh function API keywords
:param snapshot: snapshot to delete
:return: CmdResult instance
"""
cmd = "snapshot-delete %s %s %s" % (name, snapshot, options)
return command(cmd, **dargs)
def blockcommit(name, path, options="", **dargs):
"""
Start a block commit operation.
:param name: name of domain
:param options: options of blockcommit
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blockcommit %s %s" % (name, path)
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def blockpull(name, path, options="", **dargs):
"""
Start a block pull operation.
:param name: name of domain
:param options: options of blockpull
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blockpull %s %s" % (name, path)
if options is not None:
cmd += " %s" % options
return command(cmd, **dargs)
def blockresize(name, path, size, **dargs):
"""
Resize block device of domain.
:param name: name of domain
:param path: path of block device
:size: new size of the block device
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("blockresize %s %s %s" % (name, path, size), **dargs)
def domblkinfo(name, device, **dargs):
"""
Get block device size info for a domain.
:param name: VM's name or id,uuid.
:param device: device of VM.
:param dargs: standardized virsh function API keywords.
:return: CmdResult object.
"""
return command("domblkinfo %s %s" % (name, device), **dargs)
def domblklist(name, options=None, **dargs):
"""
Get domain devices.
:param name: name of domain
:param options: options of domblklist.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domblklist %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def domiflist(name, options='', extra='', **dargs):
"""
Get the domain network devices
:param name: name of domain
:param options: options of domiflist
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command('domiflist %s %s %s' % (name, options, extra), **dargs)
def cpu_stats(name, options, **dargs):
"""
Display per-CPU and total statistics about domain's CPUs
:param name: name of domain
:param options: options of cpu_stats
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "cpu-stats %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def change_media(name, device, options, **dargs):
"""
Change media of CD or floppy drive.
:param name: VM's name.
:param path: Fully-qualified path or target of disk device
:param options: command change_media options.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "change-media %s %s " % (name, device)
if options:
cmd += " %s " % options
return command(cmd, **dargs)
def cpu_compare(xml_file, **dargs):
"""
Compare host CPU with a CPU described by an XML file
:param xml_file: file containing an XML CPU description.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("cpu-compare %s" % xml_file, **dargs)
def hypervisor_cpu_compare(xml_file, options="", **dargs):
"""
Compare CPU provided by hypervisor on the host with a CPU described by an XML file
:param xml_file: file containing an XML CPU description
:param options: extra options passed to virsh command
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("hypervisor-cpu-compare %s %s" % (xml_file, options), **dargs)
def cpu_baseline(xml_file, **dargs):
"""
Compute baseline CPU for a set of given CPUs.
:param xml_file: file containing an XML CPU description.
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("cpu-baseline %s" % xml_file, **dargs)
def numatune(name, mode=None, nodeset=None, options=None, **dargs):
"""
Set or get a domain's numa parameters
:param name: name of domain
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "numatune %s" % name
if options:
cmd += " --%s" % options
if mode:
cmd += " --mode %s" % mode
if nodeset:
cmd += " --nodeset %s" % nodeset
return command(cmd, **dargs)
def nodedev_reset(name, options="", **dargs):
"""
Trigger a device reset for device node.
:param name: device node name to be reset.
:param options: additional options passed to virsh command
:param dargs: standardized virsh function API keywords
:return: cmdresult object.
"""
cmd = ("nodedev-reset --device %s %s" % (name, options))
return command(cmd, **dargs)
def ttyconsole(name, **dargs):
"""
Print tty console device.
:param name: name, uuid or id of domain
:return: CmdResult instance
"""
return command("ttyconsole %s" % name, **dargs)
def nodedev_dumpxml(name, options="", to_file=None, **dargs):
"""
Do dumpxml for node device.
:param name: the name of device.
:param options: extra options to nodedev-dumpxml cmd.
:param to_file: optional file to write XML output to.
:return: Cmdobject of virsh nodedev-dumpxml.
"""
cmd = ('nodedev-dumpxml %s %s' % (name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def connect(connect_uri="", options="", **dargs):
"""
Run a connect command to the uri.
:param connect_uri: target uri connect to.
:param options: options to pass to connect command
:return: CmdResult object.
"""
return command("connect %s %s" % (connect_uri, options), **dargs)
def domif_setlink(name, interface, state, options=None, **dargs):
"""
Set network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:param state: new state of the device up or down
:param options: command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "domif-setlink %s %s %s " % (name, interface, state)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def domif_getlink(name, interface, options=None, **dargs):
"""
Get network interface stats for a running domain.
:param name: Name of domain
:param interface: interface device
:param options: command options.
:param dargs: standardized virsh function API keywords
:return: domif state
"""
cmd = "domif-getlink %s %s " % (name, interface)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def nodedev_list(tree=False, cap="", options="", **dargs):
"""
List the node devices.
:param tree: list devices in a tree
:param cap: capability names, separated by comma
:param options: extra command options.
:param dargs: standardized virsh function API keywords
:return: CmdResult object.
"""
cmd = "nodedev-list"
if tree:
cmd += " --tree"
if cap:
cmd += " --cap %s" % cap
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def nodedev_detach(name, options="", **dargs):
"""
Detach node device from host.
:return: cmdresult object.
"""
cmd = ("nodedev-detach --device %s %s" % (name, options))
return command(cmd, **dargs)
def nodedev_dettach(name, options="", **dargs):
"""
Detach node device from host.
:return: nodedev_detach(name).
"""
return nodedev_detach(name, options, **dargs)
def nodedev_reattach(name, options="", **dargs):
"""
If node device is detached, this action will
reattach it to its device driver.
:return: cmdresult object.
"""
cmd = ("nodedev-reattach --device %s %s" % (name, options))
return command(cmd, **dargs)
def vcpucount(name, options="", **dargs):
"""
Get the vcpu count of guest.
:param name: name of domain.
:param options: options for vcpucoutn command.
:return: CmdResult object.
"""
cmd = "vcpucount %s %s" % (name, options)
return command(cmd, **dargs)
def blockcopy(name, path, dest, options="", **dargs):
"""
Start a block copy operation.
:param name: name of domain.
:param path: fully-qualified path or target of disk.
:param dest: path of the copy to create.
:param options: options of blockcopy.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "blockcopy %s %s %s %s" % (name, path, dest, options)
return command(cmd, **dargs)
def blockjob(name, path, options="", **dargs):
"""
Manage active block operations.
:param name: name of domain.
:param path: fully-qualified path or target of disk.
:param options: options of blockjob.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "blockjob %s %s %s" % (name, path, options)
return command(cmd, **dargs)
def domiftune(name, interface, options=None, inbound=None,
outbound=None, **dargs):
"""
Set/get parameters of a virtual interface.
:param name: name of domain.
:param interface: interface device (MAC Address).
:param inbound: control domain's incoming traffics.
:param outbound: control domain's outgoing traffics.
:param options: options may be live, config and current.
:param dargs: standardized virsh function API keywords.
:return: CmdResult instance.
"""
cmd = "domiftune %s %s" % (name, interface)
if inbound:
cmd += " --inbound %s" % inbound
if outbound:
cmd += " --outbound %s" % outbound
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def desc(name, options, desc_str, **dargs):
"""
Show or modify description or title of a domain.
:param name: name of domain.
:param options: options for desc command.
:param desc_str: new desc message.
:param dargs: standardized virsh function API keywords.
:return: CmdResult object.
"""
if desc_str:
options = options + " \"%s\"" % desc_str
cmd = "desc %s %s" % (name, options)
return command(cmd, **dargs)
def autostart(name, options, **dargs):
"""
Autostart a domain
:return: cmdresult object.
"""
cmd = ("autostart %s %s" % (name, options))
return command(cmd, **dargs)
def node_memtune(shm_pages_to_scan=None, shm_sleep_millisecs=None,
shm_merge_across_nodes=None, options=None, **dargs):
"""
Get or set node memory parameters.
:param options: Extra options to virsh.
:param shm-pages-to-scan: Pages to scan.
:param shm-sleep-millisecs: Sleep time (ms).
:param shm-merge-across-nodes: Merge across nodes.
:param dargs: Standardized virsh function API keywords.
:return: CmdResult instance
"""
cmd = "node-memory-tune"
if shm_pages_to_scan:
cmd += " --shm-pages-to-scan %s" % shm_pages_to_scan
if shm_sleep_millisecs:
cmd += " --shm-sleep-millisecs %s" % shm_sleep_millisecs
if shm_merge_across_nodes:
cmd += " --shm-merge-across-nodes %s" % shm_merge_across_nodes
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def iface_list(extra="", **dargs):
"""
List physical host interfaces.
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-list %s" % extra, **dargs)
def iface_define(xml_path, **dargs):
"""
Define (but don't start) a physical host interface from an XML file.
:param xml_path: XML file path
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-define --file %s" % xml_path, **dargs)
def iface_start(iface, **dargs):
"""
Start a physical host interface.
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-start %s" % iface, **dargs)
def iface_destroy(iface, **dargs):
"""
Destroy a physical host interface.
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-destroy %s" % iface, **dargs)
def iface_undefine(iface, **dargs):
"""
Undefine a physical host interface (remove it from configuration).
:param iface: Interface name or MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-undefine %s" % iface, **dargs)
def iface_dumpxml(iface, extra="", to_file="", **dargs):
"""
Interface information in XML.
:param iface: Interface name or MAC address
:param extra: Free-form string of options
:param to_file: Optional file to write xml
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "iface-dumpxml %s %s" % (iface, extra)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Dumpxml returned non-zero exit status")
return result.stdout_text.strip()
def iface_name(mac, **dargs):
"""
Convert an interface MAC address to interface name.
:param mac: Interface MAC address
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-name %s" % mac, **dargs)
def iface_mac(name, **dargs):
"""
Convert an interface name to interface MAC address.
:param name: Interface name
:param dargs: Standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-mac %s" % name, **dargs)
def iface_edit(iface, **dargs):
"""
Edit XML configuration for a physical host interface.
:param iface: Interface name or MAC address
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
return command("iface-edit %s" % iface, **dargs)
def iface_bridge(iface, bridge, extra="", **dargs):
"""
Create a bridge device and attach an existing network device to it.
:param iface: Interface name or MAC address
:param bridge: New bridge device name
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-bridge %s %s %s" % (iface, bridge, extra), **dargs)
def iface_unbridge(bridge, extra="", **dargs):
"""
Undefine a bridge device after detaching its slave device.
:param bridge: Current bridge device name
:param extra: Free-form string of options
:param dargs: Standardized virsh functiont API keywords
:return: CmdResult object
"""
return command("iface-unbridge %s %s" % (bridge, extra), **dargs)
def iface_begin(**dargs):
"""
Create a snapshot of current interfaces settings
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-begin", **dargs)
def iface_commit(**dargs):
"""
Commit changes made since iface-begin and free restore point
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-commit", **dargs)
def iface_rollback(**dargs):
"""
Rollback to previous saved configuration created via iface-begin
:param: dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
return command("iface-rollback", **dargs)
def emulatorpin(name, cpulist=None, options=None, **dargs):
"""
Control or query domain emulator affinity
:param name: name of domain
:param cpulist: a list of physical CPU numbers
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "emulatorpin %s" % name
if options:
cmd += " --%s" % options
if cpulist:
cmd += " --cpulist %s" % cpulist
return command(cmd, **dargs)
def secret_list(options="", **dargs):
"""
Get list of secret.
:param options: the option may be '--ephemeral'
:param dargs: standardized virsh function API keywords
:return: list of secret
"""
# CmdResult is handled here, force ignore_status
cmd = "secret-list %s" % options
return command(cmd, **dargs)
def secret_define(xml_file, options=None, **dargs):
"""
Return cmd result of secret define.
:param xml_file: secret XML file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "secret-define --file %s" % xml_file
if options is not None:
cmd += " %s" % options
logging.debug("Define secret from %s", xml_file)
return command(cmd, **dargs)
def secret_undefine(uuid, options=None, **dargs):
"""
Return cmd result of secret undefine.
:param uuid: secret UUID
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "secret-undefine %s" % uuid
if options is not None:
cmd += " %s" % options
logging.debug("Undefine secret %s", uuid)
return command(cmd, **dargs)
def secret_dumpxml(uuid, to_file="", options=None, **dargs):
"""
Return the secret information as an XML dump.
:param uuid: secret UUID
:param to_file: optional file to write XML output to
:param dargs: standardized virsh function API keywords
:return: standard output from command
"""
dargs['ignore_status'] = True
cmd = "secret-dumpxml %s" % uuid
if options is not None:
cmd += " %s" % options
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
if result.exit_status:
raise process.CmdError(cmd, result,
"Virsh secret-dumpxml returned \
non-zero exit status")
return result
def secret_get_value(uuid, options=None, **dargs):
"""
Get a secret value
:param uuid: secret UUID
:return: CmdResult object.
"""
cmd = "secret-get-value --secret %s" % uuid
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def secret_set_value(uuid, password, options=None, encode=False, **dargs):
"""
Set a secret value
:param uuid: secret UUID
:param password: secret value
:param encode: if False, that means you've already provided a base64-encoded
password. if True, will base64-encode password before use it.
:return: CmdResult object.
"""
cmd = "secret-set-value --secret %s" % uuid
if password:
if encode:
encoding = locale.getpreferredencoding()
cmd += (" --base64 %s"
% base64.b64encode(password.encode(encoding)).decode(encoding))
else:
cmd += " --base64 %s" % password
if options:
cmd += " --%s" % options
return command(cmd, **dargs)
def nodedev_create(xml_file, options=None, **dargs):
"""
Return cmd result of the device to be created by an XML file
:param xml_file: device XML file
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodedev-create %s" % xml_file
if options is not None:
cmd += " %s" % options
logging.debug("Create the device from %s", xml_file)
return command(cmd, **dargs)
def nodedev_destroy(dev_name, options=None, **dargs):
"""
Return cmd result of the device to be destroyed
:param dev_name: name of the device
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nodedev-destroy %s" % dev_name
if options is not None:
cmd += " %s" % options
logging.debug("Destroy the device %s on the node", dev_name)
return command(cmd, **dargs)
def domfstrim(name, minimum=None, mountpoint=None, options="", **dargs):
"""
Do fstrim on domain's mounted filesystems
:param name: name of domain
:param options: options maybe --minimum <number>, --mountpoint <string>
:return: CmdResult object
"""
cmd = "domfstrim %s" % name
if minimum is not None:
cmd += " --minimum %s" % minimum
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domfsfreeze(name, mountpoint=None, options="", **dargs):
"""
Freeze domain's mounted filesystems
:param name: name of domain
:param mountpoint: specific mountpoints to be frozen
:param options: extra options to domfsfreeze cmd.
:return: CmdResult object
"""
cmd = "domfsfreeze %s" % name
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domfsthaw(name, mountpoint=None, options="", **dargs):
"""
Thaw domain's mounted filesystems
:param name: name of domain
:param mountpoint: specific mountpoints to be thawed
:param options: extra options to domfsfreeze cmd.
:return: CmdResult object
"""
cmd = "domfsthaw %s" % name
if mountpoint is not None:
cmd += " --mountpoint %s" % mountpoint
cmd += " %s" % options
return command(cmd, **dargs)
def domtime(name, now=False, pretty=False, sync=False, time=None,
options="", **dargs):
"""
Get/Set domain's time
:param name: name of domain
:param now: set to the time of the host running virsh
:param pretty: print domain's time in human readable form
:param sync: instead of setting given time, synchronize from domain's RTC
:param time: integer time to set
:return: CmdResult object
"""
cmd = "domtime %s" % name
if now:
cmd += " --now"
if pretty:
cmd += " --pretty"
if sync:
cmd += " --sync"
if time is not None:
cmd += " --time %s" % time
cmd += " %s" % options
return command(cmd, **dargs)
def nwfilter_dumpxml(name, options="", to_file=None, **dargs):
"""
Do dumpxml for network filter.
:param name: the name or uuid of filter.
:param options: extra options to nwfilter-dumpxml cmd.
:param to_file: optional file to write XML output to.
:param dargs: standardized virsh function API keywords
:return: Cmdobject of virsh nwfilter-dumpxml.
"""
cmd = ('nwfilter-dumpxml %s %s' % (name, options))
result = command(cmd, **dargs)
if to_file is not None:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def nwfilter_define(xml_file, options="", **dargs):
"""
Return cmd result of network filter define.
:param xml_file: network filter XML file
:param options: extra options to nwfilter-define cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-define --file %s %s" % (xml_file, options)
return command(cmd, **dargs)
def nwfilter_undefine(name, options="", **dargs):
"""
Return cmd result of network filter undefine.
:param name: network filter name or uuid
:param options: extra options to nwfilter-undefine cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-undefine %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_list(options="", **dargs):
"""
Get list of network filters.
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: list of network filters
"""
cmd = "nwfilter-list %s" % options
return command(cmd, **dargs)
def nwfilter_edit(name, options="", **dargs):
"""
Edit the XML configuration for a network filter.
:param name: network filter name or uuid.
:param options: extra options to nwfilter-edit cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-edit %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_binding_create(name, options="", **dargs):
"""
Associate a network port with a network filter.
The network filter backend will immediately
attempt to instantiate the filter rules on the
port.
:param name: binding xml file name
:param options: extra options to nwfilter-binding- cmd.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "nwfilter-binding-create %s %s" % (name, options)
return command(cmd, **dargs)
def nwfilter_binding_list(options="", **dargs):
"""
List all of the network ports which have filters
associated with them
:param options: extra options for nwfilter_binding_list
:param dargs: standardized virsh function API keywords
"""
cmd = "nwfilter-binding-list %s" % options
return command(cmd, **dargs)
def nwfilter_binding_dumpxml(portdev_name, options="", to_file="", **dargs):
"""
output the network filter binding XML for network device
called port name
:param portdev_name: port device name for nwfilter_binding_dumpxml
:param options: extra options for nwfilter_binding_dumpxml
:param dargs: standardized virsh function API keywords
"""
cmd = "nwfilter-binding-dumpxml %s %s" % (portdev_name, options)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def nwfilter_binding_delete(portdev_name, option="", **dargs):
"""
Disassociate a network port from a network filter.
The network filter backend will immediately
tear down the filter rules that exist on the port
:param portdev_name: port device name for nwfilter_binding_delete
:param option: extra option for nwfilter_binding_delete
"""
cmd = "nwfilter-binding-delete %s %s" % (portdev_name, option)
return command(cmd, **dargs)
def cd(dir_path, options="", **dargs):
"""
Run cd command in virsh interactive session.
:param dir_path: dir path string
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "cd --dir %s %s" % (dir_path, options)
return command(cmd, **dargs)
def pwd(options="", **dargs):
"""
Run pwd command in virsh session.
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "pwd %s" % options
return command(cmd, **dargs)
def echo(echo_str, options="", **dargs):
"""
Run echo command in virsh session.
:param echo_str: the echo string
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "echo %s %s" % (echo_str, options)
return command(cmd, **dargs)
def exit(**dargs):
"""
Run exit command in virsh session.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "exit"
return command(cmd, **dargs)
def quit(**dargs):
"""
Run quit command in virsh session.
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "quit"
return command(cmd, **dargs)
def sendkey(name, keycode, codeset="", holdtime="", **dargs):
"""
Send keycodes to the guest
:param name: name of domain
:param keycode: the key code
:param codeset: the codeset of keycodes
:param holdtime: milliseconds for each keystroke to be held
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "send-key %s" % name
if codeset:
cmd += " --codeset %s" % codeset
if holdtime:
cmd += " --holdtime %s" % holdtime
cmd += " %s" % keycode
return command(cmd, **dargs)
def create(xmlfile, options="", **dargs):
"""
Create guest from xml
:param xmlfile: domain xml file
:param options: --paused
:return: CmdResult object
"""
cmd = "create %s %s" % (xmlfile, options)
return command(cmd, **dargs)
def sysinfo(options="", **dargs):
"""
Return the hypervisor sysinfo xml.
:param options: extra options
:return: CmdResult object
"""
cmd = "sysinfo %s" % options
return command(cmd, **dargs)
def reset(name, **dargs):
"""
Reset a domain
:param name: name of domain
:return: CmdResult object
"""
cmd = "reset %s" % name
return command(cmd, **dargs)
def domdisplay(name, options="", **dargs):
"""
Get domain display connection URI
:param name: name of domain
:param options: options of domdisplay
:return: CmdResult object
"""
cmd = "domdisplay %s %s" % (name, options)
return command(cmd, **dargs)
def domblkerror(name, **dargs):
"""
Show errors on block devices
:param name: name of domain
:return: CmdResult object
"""
return command("domblkerror %s" % name, **dargs)
def domcontrol(name, options="", **dargs):
"""
Return domain control interface state.
:param name: name of domain
:param options: extra options
:return: CmdResult object
"""
cmd = "domcontrol %s %s" % (name, options)
return command(cmd, **dargs)
def save_image_dumpxml(state_file, options="", to_file="", **dargs):
"""
Dump xml from saved state file
:param state_file: saved state file to read
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "save-image-dumpxml %s %s" % (state_file, options)
result = command(cmd, **dargs)
if to_file:
result_file = open(to_file, 'w')
result_file.write(result.stdout.strip())
result_file.close()
return result
def save_image_define(state_file, xmlfile, options="", **dargs):
"""
Redefine the XML for a domain's saved state file
:param state_file: saved state file to modify
:param xmlfile: filename containing updated XML for the target
:param options: extra options
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "save-image-define %s %s %s" % (state_file, xmlfile, options)
return command(cmd, **dargs)
def inject_nmi(name, options="", **dargs):
"""
Inject NMI to the guest
:param name: domain name
:param options: extra options
"""
cmd = "inject-nmi %s %s" % (name, options)
return command(cmd, **dargs)
def vol_download(name, dfile, options="", **dargs):
"""
Download volume contents to a file
:param name: name of volume
:param dfile: file path that will download to
:param options: pool name, offset and length
:return: CmdResult object
"""
cmd = "vol-download %s %s %s" % (name, dfile, options)
return command(cmd, **dargs)
def vol_upload(name, dfile, options="", **dargs):
"""
Upload file contents to a volume
:param name: name of volume
:param dfile: file path that will upload from
:param options: pool name, offset and length
:return: CmdResult object
"""
cmd = "vol-upload %s %s %s" % (name, dfile, options)
return command(cmd, **dargs)
def blkiotune(name, weight=None, device_weights=None, options=None, **dargs):
"""
Set or get a domain's blkio parameters
:param name: name of domain
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blkiotune %s" % name
if options:
cmd += " --%s" % options
if weight:
cmd += " --weight %s" % weight
if device_weights:
cmd += " --device-weights %s" % device_weights
return command(cmd, **dargs)
def blkdeviotune(name, device=None, options=None, params=None, **dargs):
"""
Set or get a domain's blkio parameters
:param name: name of domain
:param device: device name may be vda, vdb and so on
:param options: options may be live, config and current
:param params: parameters for blkdeviotune
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "blkdeviotune %s" % name
if options:
cmd += " %s" % options
if device:
cmd += " --device %s" % device
if params:
if params.get("total_iops_sec"):
cmd += " --total-iops-sec %s" % params.get("total_iops_sec")
if params.get("read_iops_sec"):
cmd += " --read-iops-sec %s" % params.get("read_iops_sec")
if params.get("write_iops_sec"):
cmd += " --write-iops-sec %s" % params.get("write_iops_sec")
if params.get("total_iops_sec_max"):
cmd += " --total-iops-sec-max %s" % params.get("total_iops_sec_max")
if params.get("read_iops_sec_max"):
cmd += " --read-iops-sec-max %s" % params.get("read_iops_sec_max")
if params.get("write_iops_sec_max"):
cmd += " --write-iops-sec-max %s" % params.get("write_iops_sec_max")
if params.get("total_iops_sec_max_length"):
cmd += " --total-iops-sec-max-length %s" % params.get("total_iops_sec_max_length")
if params.get("read_iops_sec_max_length"):
cmd += " --read-iops-sec-max-length %s" % params.get("read_iops_sec_max_length")
if params.get("write_iops_sec_max_length"):
cmd += " --write-iops-sec-max-length %s" % params.get("write_iops_sec_max_length")
if params.get("total_bytes_sec"):
cmd += " --total-bytes-sec %s" % params.get("total_bytes_sec")
if params.get("read_bytes_sec"):
cmd += " --read-bytes-sec %s" % params.get("read_bytes_sec")
if params.get("write_bytes_sec"):
cmd += " --write-bytes-sec %s" % params.get("write_bytes_sec")
if params.get("total_bytes_sec_max"):
cmd += " --total-bytes-sec-max %s" % params.get("total_bytes_sec_max")
if params.get("read_bytes_sec_max"):
cmd += " --read-bytes-sec-max %s" % params.get("read_bytes_sec_max")
if params.get("write_bytes_sec_max"):
cmd += " --write-bytes-sec-max %s" % params.get("write_bytes_sec_max")
if params.get("total_bytes_sec_max_length"):
cmd += " --total-bytes-sec-max %s" % params.get("total_bytes_sec_max_length")
if params.get("read_bytes_sec_max_length"):
cmd += " --read-bytes-sec-max-length %s" % params.get("read_bytes_sec_max_length")
if params.get("write_bytes_sec_max_length"):
cmd += " --write-bytes-sec-max-length %s" % params.get("write_bytes_sec_max_length")
if params.get("size_iops_sec"):
cmd += " --size-iops-sec %s" % params.get("size_iops_sec")
if params.get("group_name"):
cmd += " --group-name %s" % params.get("group_name")
return command(cmd, **dargs)
def perf(domain, options="", events="", other_opt="", **dargs):
"""
Enable or disable perf events
:param domain: Domain name, id
:param options: --enable | --disable
:param events: perf event names seperated by comma
:param other_opt: --config | --live | --current
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "perf %s %s %s %s" % (domain, options, events, other_opt)
return command(cmd, **dargs)
def domstats(domains="", options="", **dargs):
"""
Get statistics about one or multiple domains
:param domains: List of domains
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domstats %s %s" % (domains, options)
return command(cmd, **dargs)
def freepages(cellno=None, pagesize=None, options="", **dargs):
"""
Display available free pages for the NUMA cell
:param cellno: NUMA cell number
:param pagesize: Page size (in kibibytes)
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "freepages %s" % options
if cellno is not None:
cmd += " --cellno %s" % cellno
if pagesize is not None:
cmd += " --pagesize %s" % pagesize
return command(cmd, **dargs)
def domcapabilities(virttype=None, emulatorbin=None, arch=None, machine=None,
options="", **dargs):
"""
Capabilities of emulator with respect to host and libvirt
:param virttype: Virtualization type (/domain/@type)
:param emulatorbin: Path to emulator binary (/domain/devices/emulator)
:param arch: Domain architecture (/domain/os/type/@arch)
:param machine: machine type (/domain/os/type/@machine)
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "domcapabilities %s" % options
if virttype:
cmd += " --virttype %s" % virttype
if emulatorbin:
cmd += " --emulatorbin %s" % emulatorbin
if arch:
cmd += " --arch %s" % arch
if machine:
cmd += " --machine %s" % machine
return command(cmd, **dargs)
def metadata(name, uri, options="", key=None, new_metadata=None, **dargs):
"""
Show or set domain's custom XML Metadata
:param name: Domain name, id or uuid
:param uri: URI of the namespace
:param options: options may be live, config and current
:param key: Key to be used as a namespace identifier
:param new_metadata: new metadata to set
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "metadata --domain %s --uri %s %s" % (name, uri, options)
if key:
cmd += " --key %s" % key
if new_metadata:
cmd += " --set '%s'" % new_metadata.replace("\'", "\"")
return command(cmd, **dargs)
def cpu_models(arch, options="", **dargs):
"""
Get the CPU models for an arch.
:param arch: Architecture
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "cpu-models %s %s" % (arch, options)
return command(cmd, **dargs)
def net_dhcp_leases(network, mac=None, options="", **dargs):
"""
Print lease info for a given network
:param network: Network name or uuid
:param mac: Mac address
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-dhcp-leases %s %s" % (network, options)
if mac:
cmd += " --mac %s" % mac
return command(cmd, **dargs)
def qemu_monitor_event(domain=None, event=None, event_timeout=None,
options="", **dargs):
"""
Listen for QEMU Monitor Events
:param domain: Domain name, id or UUID
:param event: Event type name
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "qemu-monitor-event %s" % options
if domain:
cmd += " --domain %s" % domain
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def net_event(network=None, event=None, event_timeout=None, options="",
**dargs):
"""
List event types, or wait for network events to occur
:param network: Network name or uuid
:param event: Event type to wait for
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "net-event %s" % options
if network:
cmd += " --network %s" % network
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def event(domain=None, event=None, event_timeout=None, options="", **dargs):
"""
List event types, or wait for domain events to occur
:param domain: Domain name, id or UUID
:param event: Event type name
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "event %s" % options
if domain:
cmd += " --domain %s" % domain
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def move_mouse(name, coordinate, **dargs):
"""
Move VM mouse.
:param name: domain name
:param coordinate: Mouse coordinate
"""
cmd = "mouse_move %s %s" % coordinate
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
# Sleep 1 sec to make sure VM received mouse move event
time.sleep(1)
def click_button(name, left_button=True, **dargs):
"""
Click left/right button of VM mouse.
:param name: domain name
:param left_button: Click left or right button
"""
state = 1
if not left_button:
state = 4
cmd = "mouse_button %s" % state
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
# Sleep 1 sec to make sure VM received mouse button event,
# then release button(state=0)
time.sleep(1)
cmd = "mouse_button 0"
qemu_monitor_command(name=name, cmd=cmd, options='--hmp', **dargs)
time.sleep(1)
def iothreadadd(name, thread_id, options=None, **dargs):
"""
Add an IOThread to the guest domain.
:param name: domain name
:param thread_id: domain iothread ID
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadadd %s %s" % (name, thread_id)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreaddel(name, thread_id, options=None, **dargs):
"""
Delete an IOThread from the guest domain.
:param name: domain name
:param thread_id: domain iothread ID
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreaddel %s %s" % (name, thread_id)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadinfo(name, options=None, **dargs):
"""
View domain IOThreads.
:param name: domain name
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadinfo %s" % name
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadpin(name, thread_id, cpuset, options=None, **dargs):
"""
Control domain IOThread affinity.
:param name: domain name
:param thread_id: domain iothread ID
:param cpuset: host cpu number(s) to set
:param options: options may be live, config and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadpin %s %s %s" % (name, thread_id, cpuset)
if options:
cmd += " %s" % options
return command(cmd, **dargs)
def iothreadset(name, thread_id, values, options="", **dargs):
"""
Modifies an existing iothread of the domain using the specified iothread_id
:param name: domain name
:param thread_id: domain iothread ID
:param values: the values to be set
:param options: options may be live and current
:param dargs: standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "iothreadset %s %s %s %s" % (name, thread_id, values, options)
return command(cmd, **dargs)
def domrename(domain, new_name, options="", **dargs):
"""
Rename an inactive domain.
:param domain:domain name, id or uuid.
:param new_name:new domain name.
:param options:extra param.
:param dargs: standardized virsh function API keywords
:return: result from command
"""
cmd = "domrename %s %s %s" % (domain, new_name, options)
return command(cmd, **dargs)
def nodedev_event(event=None, event_timeout=None, options="", **dargs):
"""
List event types, or wait for nodedevice events to occur
:param event: Event type to wait for
:param event_timeout: Timeout seconds
:param options: Extra options
:param dargs: Standardized virsh function API keywords
:return: CmdResult instance
"""
cmd = "nodedev-event %s" % options
if event:
cmd += " --event %s" % event
if event_timeout:
cmd += " --timeout %s" % event_timeout
return command(cmd, **dargs)
def backup_begin(name, options="", **dargs):
"""
Begin domain backup
:param name: name of domain
:param options: options of backup-begin command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "backup-begin %s %s" % (name, options)
return command(cmd, **dargs)
def backup_dumpxml(name, **dargs):
"""
Dump domain backup xml
:param name: name of domain
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "backup-dumpxml %s" % name
return command(cmd, **dargs)
def checkpoint_create(name, options="", **dargs):
"""
Create domain checkpoint (with xml input)
:param name: name of domain
:param options: options of checkpoint-create command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-create %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_create_as(name, options="", **dargs):
"""
Create domain checkpoint (with options)
:param name: name of domain
:param options: options of checkpoint-create-as command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-create-as %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_edit(name, checkpoint, **dargs):
"""
Edit domain checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-edit %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_info(name, checkpoint, **dargs):
"""
Output basic information about the checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-info %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_list(name, options="", **dargs):
"""
List domain's checkpoint(s)
:param name: name of domain
:param options: options of checkpoint-list command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-list %s %s" % (name, options)
return command(cmd, **dargs)
def checkpoint_dumpxml(name, checkpoint, options="", **dargs):
"""
Dump domain checkpoint xml
:param name: name of domain
:param checkpoint: name of checkpoint
:param options: options of checkpoint-dumpxml command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-dumpxml %s %s %s" % (name, checkpoint, options)
return command(cmd, **dargs)
def checkpoint_parent(name, checkpoint, **dargs):
"""
Output the name of the parent checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-parent %s %s" % (name, checkpoint)
return command(cmd, **dargs)
def checkpoint_delete(name, checkpoint, options="", **dargs):
"""
Delete domain checkpoint
:param name: name of domain
:param checkpoint: name of checkpoint
:param options: options of checkpoint-delete command
:param dargs: standardized virsh function API keywords
:return: CmdResult object
"""
cmd = "checkpoint-delete %s %s %s" % (name, checkpoint, options)
return command(cmd, **dargs)
|
clebergnu/avocado-vt
|
virttest/virsh.py
|
Python
|
gpl-2.0
| 152,004
| 0.000329
|
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from climate import tests
class DBUtilsTestCase(tests.TestCase):
"""Test case for DB Utils."""
pass
|
frossigneux/blazar
|
climate/tests/db/test_utils.py
|
Python
|
apache-2.0
| 741
| 0
|
#!/home/bolt/.python_compiled/bin/python3
import math
from PIL import Image
def complex_wrapper(func, scale_factor=1):
"""
Modifies a complex function that takes a complex
argument and returns a complex number to take a
tuple and return a tuple.
"""
def inner(real, imag):
complex_num=complex(real, imag)
return_value=func(complex_num/scale_factor)
return return_value.real, return_value.imag
return inner
def decorate_atan(func):
"""
A decorator to modify the range of atan from -pi to pi to 0 to 2*pi
"""
def inner(y, x):
return_val=func(y, x)
if return_val>=0:
return return_val
else:
return 2*math.pi+return_val
return inner
atan=decorate_atan(math.atan2)
def assign_color_shade(position):
"""
This function assigns a unique color shade to each angle in [0, 2*pi)
"""
x,y=position
if (x,y)==(0,0):
return (255, 255, 255)
angle=atan(y,x)
mod_angle=angle%(2/3*math.pi)
mixing=mod_angle/(2/3*math.pi)*255
if angle<=2/3*math.pi:
return (255-mixing, mixing, 0)
elif 2/3*math.pi<angle<=4/3*math.pi:
return (0, 255-mixing, mixing)
else:
return (mixing, 0, 255-mixing)
def color_intensity(position, radius, gradient):
"""
This function assigns an intensity based on the radial distance and the gradient
"""
x,y=position
shade_tuple=assign_color_shade(position)
if x**2+y**2<radius**2:
r,b,g=shade_tuple
ratio=((x**2+y**2)/(radius**2))**gradient
r_new,b_new,g_new=255-ratio*(255-r),255-ratio*(255-b),255-ratio*(255-g)
return r_new,b_new,g_new
else:
ratio=((radius**2)/(x**2+y**2))**gradient
r,b,g=shade_tuple
return r*ratio,b*ratio,g*ratio
def colorize_point(position, radius, gradient=1):
"""
This function combines the last 2 functions and returns the shade of each point
"""
r,b,g=color_intensity(position, radius, gradient)
return round(r), round(b), round(g)
def generate_plane_image(x_size, y_size, radius, gradient):
"""
This function generates the domain plane
"""
image=Image.new('RGB', (x_size, y_size))
x_c,y_c=x_size//2, y_size//2
for x in range(x_size):
for y in range(y_size):
image.putpixel((x,y), colorize_point((x-x_c, y-y_c), radius, gradient))
return image
def map_function(plane_image, func, radius, gradient):
"""
This function maps the function on the domain plane
"""
image=Image.new('RGB', plane_image.size)
x_size,y_size=plane_image.size
x_c,y_c=x_size//2, y_size//2
for x in range(x_size):
for y in range(y_size):
x_new,y_new=func(x-x_c, y-y_c)
try:
new_colors=plane_image.getpixel((x_new+x_c, y_new+y_c))
except IndexError:
new_colors=colorize_point((x_new, y_new), radius, gradient)
image.putpixel((x,y), new_colors)
return image
|
Bolt64/my_code
|
Code Snippets/domain_coloring.py
|
Python
|
mit
| 3,030
| 0.022112
|
#!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8
#
# Shell command
# Copyright 2010, Jeremy Grosser <synack@digg.com>
import argparse
import os
import sys
import clusto
from clusto import script_helper
class Console(script_helper.Script):
'''
Use clusto's hardware port mappings to console to a remote server
using the serial console.
'''
def __init__(self):
script_helper.Script.__init__(self)
def _add_arguments(self, parser):
user = os.environ.get('USER')
parser.add_argument('--user', '-u', default=user,
help='SSH User (you can also set this in clusto.conf too'
'in console.user: --user > clusto.conf:console.user > "%s")' % user)
parser.add_argument('server', nargs=1,
help='Object to console to (IP or name)')
def add_subparser(self, subparsers):
parser = self._setup_subparser(subparsers)
self._add_arguments(parser)
def run(self, args):
try:
server = clusto.get(args.server[0])
if not server:
raise LookupError('Object "%s" does not exist' % args.server)
except Exception as e:
self.debug(e)
self.error('No object like "%s" was found' % args.server)
return 1
server = server[0]
if not hasattr(server, 'console'):
self.error('The object %s lacks a console method' % server.name)
return 2
user = os.environ.get('USER')
if args.user:
self.debug('Grabbing user from parameter')
user = args.user
else:
self.debug('Grabbing user from config file or default')
user = self.get_conf('console.user', user)
self.debug('User is "%s"' % user)
return(server.console(ssh_user=user))
def main():
console, args = script_helper.init_arguments(Console)
return(console.run(args))
if __name__ == '__main__':
sys.exit(main())
|
sanyaade-mobiledev/clusto
|
src/clusto/commands/console.py
|
Python
|
bsd-3-clause
| 2,107
| 0.003322
|
from datetime import datetime
#####################
# Account Test Data #
#####################
account = {
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890'
}
account_put = {
'name': 'Savings Account',
'type': 'Savings'
}
db_account = {
'id': 'acct_testaccountname',
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890',
'bal_uncleared': 2635.63,
'bal_cleared': -40.92,
'bal_reconciled': 1021.61,
'budget_monitored': True
}
db_account_2 = {
'id': 'acct_toaccountname',
'name': 'To Account Name',
'type': 'Savings',
'bank_name': 'Bank of Catonsville',
'account_num': '0987654321',
'bal_uncleared': 100.00,
'bal_cleared': 100.00,
'bal_reconciled': 200.00,
'budget_monitored': False
}
db_account_3 = {
'id': 'acct_to2accountname',
'name': 'To 2 Account Name',
'type': 'Savings',
'bank_name': 'Bank of Catonsville',
'account_num': '0987654320',
'bal_uncleared': 500.00,
'bal_cleared': 500.00,
'bal_reconciled': 600.00,
'budget_monitored': False
}
#########################
# Transaction Test Data #
#########################
transaction = {
'date': '2014-08-10',
'type': 'EFT',
'payee': 'Giant',
# need: category/account, split -> consider fields.Nested
'reconciled': '',
'amount': -52.08,
'memo': ''
}
transaction_transfer = {
'date': '2014-08-10',
'type': 'XFER',
'payee': 'Move to Savings',
'reconciled': '',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
}
transaction_put_amount = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'memo': 'Birthday present'
}
transaction_put_reconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'reconciled': 'C'
}
transaction_put_amountreconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'reconciled': 'C'
}
db_transactions= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'DEP',
'payee': 'Sandy Spring Bank',
'reconciled': 'R',
'amount': 1145.06,
'memo': 'Sandy\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,8,1),
'type': 'EFT',
'payee': 'Costco',
'reconciled': 'R',
'amount': -123.45,
'memo': 'Test transaction memo',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259c9',
'date': datetime(2014,8,6),
'type': 'EFT',
'payee': 'Exxon',
'reconciled': 'C',
'amount': -40.92,
'memo': '',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259ca',
'date': datetime(2014,8,18),
'type': 'DEP',
'payee': 'U.S. Government',
'reconciled': '',
'amount': 2649.52,
'memo': 'Kyle\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259cb',
'date': datetime(2014,8,12),
'type': 'EFT',
'payee': 'Amazon.com',
'reconciled': '',
'amount': -13.89,
'memo': '',
'cat_or_acct_id': '2'
}
]
db_transfer_transactions_fromAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'somecategoryidstring'
}
]
db_transfer_transactions_toAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'R',
'amount': 100.00,
'memo': '',
'cat_or_acct_id': 'acct_testaccountname'
}
]
###################
# Payee Test Data #
###################
payee = { 'name': 'Costco' }
payee_put = { 'name': 'Newegg.com' }
db_payees = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Costco'
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Amazon.com'
},
{
'id': '53f69e77137a001e344259f3',
'name': 'U.S. Government'
},
{
'id': '53f69e77137a001e344259f4',
'name': 'Exxon'
},
{
'id': '53f69e77137a001e344259f5',
'name': 'Sandy Spring Bank'
}
]
######################
# Category Test Data #
######################
category_1 = {
'name': 'Tithe',
'parent_id': None
}
category_2 = {
'name': 'Gas & Electric',
'parent_id': '1234567890'
}
category_put = { 'parent_id': '1234567890' }
db_categories = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Auto',
'budget_tracked': False,
'parent_id': None
},
{
'id': '53f69e77137a001e344259fa',
'name': 'Gas',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259fb',
'name': 'Service',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Dining & Entertainment',
'budget_tracked': True,
'parent_id': None
},
{
'id': '53f69e77137a001e344259f3',
'name': 'Tithe',
'budget_tracked': True,
'parent_id': None
}
]
|
kschoelz/abacuspb
|
test/test_data.py
|
Python
|
gpl-2.0
| 5,915
| 0.005748
|
# fabfile.py
# TODO - Description.
#
###########################################################################
##
## Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
from fabric.api import *
from textwrap import dedent, wrap
import io
import re
import pickle
import sys
import os
import yaml
script_dir = os.path.dirname(__file__)
with open(script_dir+"/config.yaml", "r") as f:
config = yaml.load(f)
if os.path.isfile('config.yaml'):
with open('config.yaml', 'r') as f:
config.update(yaml.load(f))
else:
print("Error: Current directory must have local application config.")
sys.exit(-1)
env.roledefs['master'] = config['master']
env.roledefs['workers'] = config['workers']
env.roledefs['all'] = config['all']
env.use_ssh_config = True
@task
def assembly():
local("sbt assembly &> assembly.log")
@task
def sync():
# put(config['local_jar_dir'] + '/' + config['jar'], config['remote_jar_dir'])
for server in config['all']:
local("rsync -azrv --progress {}/{} {}:/{}".format(
config['local_jar_dir'],
config['jar'],
server,
config['remote_jar_dir']
))
@task
@roles('master')
def start():
outIO = io.BytesIO(); errIO = io.BytesIO()
sudo(' '.join([
config['remote_spark_dir'] + '/bin/spark-submit ',
'--class', config['main_class'], '--master', config['spark_master'],
'--deploy-mode', 'cluster', config['remote_jar_dir'] + '/' + config['jar']
]), stdout=outIO, stderr=errIO)
outIO.seek(0); errIO.seek(0)
outStr = outIO.read()
driverRe = re.search("State of (driver-\d*-\d*) is (\S*)", outStr)
driverId = driverRe.group(1)
status = driverRe.group(2)
print(" DriverID: " + driverId)
print(" Status: " + status)
if status == "ERROR":
msg = """
The error state occurs when the Spark Master rejects the job,
which is likely due to a misconfiguration in the Spark context
of your application.
Once checking your Spark context for accuracy, next ssh into the node
that failed and go to Spark work directory, which contains
the output for Spark applicaitons and drivers.
Check stderr and stdout in the driver and application directories.
"""
print(dedent(msg))
elif status == "RUNNING":
driverServerRe = re.search("Driver running on (\S*):\d* ", outStr)
driverServer = driverServerRe.group(1)
print(" DriverServer: " + driverServer)
with open('lastJobStarted.pickle', 'wb') as f:
pickle.dump({
'driverId': driverId,
'driverServer': driverServer
}, f)
else:
print(status)
@task
@roles('master')
def kill(driverId=None):
if not driverId:
try:
with open('lastJobStarted.pickle', 'rb') as f:
m = pickle.load(f)
except IOError as e:
print("Unable to open lastJobStarted.pickle")
driverId = m['driverId']
sudo(' '.join([
config['remote_spark_dir'] + '/bin/spark-class ',
"org.apache.spark.deploy.Client kill",
config['spark_master'],
driverId
]))
@task
def getOutput(driverId=None,driverServer=None):
if not driverId:
try:
with open('lastJobStarted.pickle', 'rb') as f:
m = pickle.load(f)
except IOError as e:
print("Unable to open lastJobStarted.pickle")
sys.exit(-1)
driverId = m['driverId']
driverServer = m['driverServer']
local("scp " +
driverServer + ":" + config['spark_work'] + "/" + driverId +
"/stdout " + "stdout.txt")
local("scp " +
driverServer + ":" + config['spark_work'] + "/" + driverId +
"/stderr " + "stderr.txt")
|
adobe-research/spark-cluster-deployment
|
application-deployment-fabfile.py
|
Python
|
apache-2.0
| 4,180
| 0.013636
|
# Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains data about certain markup, like HTML tags and external links.
When updating this file, please also update the the C tokenizer version:
- mwparserfromhell/parser/ctokenizer/definitions.c
- mwparserfromhell/parser/ctokenizer/definitions.h
"""
__all__ = [
"get_html_tag",
"is_parsable",
"is_visible",
"is_single",
"is_single_only",
"is_scheme",
]
URI_SCHEMES = {
# [wikimedia/mediawiki.git]/includes/DefaultSettings.php @ 5c660de5d0
"bitcoin": False,
"ftp": True,
"ftps": True,
"geo": False,
"git": True,
"gopher": True,
"http": True,
"https": True,
"irc": True,
"ircs": True,
"magnet": False,
"mailto": False,
"mms": True,
"news": False,
"nntp": True,
"redis": True,
"sftp": True,
"sip": False,
"sips": False,
"sms": False,
"ssh": True,
"svn": True,
"tel": False,
"telnet": True,
"urn": False,
"worldwind": True,
"xmpp": False,
}
PARSER_BLACKLIST = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"ce",
"chem",
"gallery",
"graph",
"hiero",
"imagemap",
"inputbox",
"math",
"nowiki",
"pre",
"score",
"section",
"source",
"syntaxhighlight",
"templatedata",
"timeline",
]
INVISIBLE_TAGS = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"gallery",
"graph",
"imagemap",
"inputbox",
"math",
"score",
"section",
"templatedata",
"timeline",
]
# [wikimedia/mediawiki.git]/includes/parser/Sanitizer.php @ 95e17ee645
SINGLE_ONLY = ["br", "wbr", "hr", "meta", "link", "img"]
SINGLE = SINGLE_ONLY + ["li", "dt", "dd", "th", "td", "tr"]
MARKUP_TO_HTML = {
"#": "li",
"*": "li",
";": "dt",
":": "dd",
}
def get_html_tag(markup):
"""Return the HTML tag associated with the given wiki-markup."""
return MARKUP_TO_HTML[markup]
def is_parsable(tag):
"""Return if the given *tag*'s contents should be passed to the parser."""
return tag.lower() not in PARSER_BLACKLIST
def is_visible(tag):
"""Return whether or not the given *tag* contains visible text."""
return tag.lower() not in INVISIBLE_TAGS
def is_single(tag):
"""Return whether or not the given *tag* can exist without a close tag."""
return tag.lower() in SINGLE
def is_single_only(tag):
"""Return whether or not the given *tag* must exist without a close tag."""
return tag.lower() in SINGLE_ONLY
def is_scheme(scheme, slashes=True):
"""Return whether *scheme* is valid for external links."""
scheme = scheme.lower()
if slashes:
return scheme in URI_SCHEMES
return scheme in URI_SCHEMES and not URI_SCHEMES[scheme]
|
earwig/mwparserfromhell
|
src/mwparserfromhell/definitions.py
|
Python
|
mit
| 3,915
| 0
|
if __name__ == '__main__':
# We want to call _enable_attach inside an import to make sure that it works properly that way.
import _debugger_case_wait_for_attach_impl
|
fabioz/PyDev.Debugger
|
tests_python/resources/_debugger_case_wait_for_attach.py
|
Python
|
epl-1.0
| 174
| 0.005747
|
# -*- codeing: utf-8 -*-
def bubble_sort(to_sort):
index = 0
while index < len(to_sort):
offset = index
while offset > 0 and to_sort[offset - 1] > to_sort[offset]:
temp = to_sort[offset]
to_sort[offset] = to_sort[offset - 1]
to_sort[offset - 1] = temp
offset -= 1
index += 1
return to_sort
def quick_sort(to_sort):
result = []
if to_sort:
eq = to_sort[0]
lt, gt = _split_by(to_sort, eq)
for e in quick_sort(lt):
result.append(e)
result.append(eq)
for e in quick_sort(gt):
result.append(e)
return result
def _split_by(to_sort, eq):
lt = []
gt = []
for e in to_sort[1:]:
if e < eq:
lt.append(e)
if e > eq:
gt.append(e)
return (lt, gt)
import unittest
class BubbleSortTest(unittest.TestCase):
def test_sorts_empty_list(self):
self.assertEqual([], bubble_sort([]))
def test_sorts_single_element_list(self):
self.assertEqual([1], bubble_sort([1]))
def test_sorts_two_elements_sorted_list(self):
self.assertEqual([1, 2], bubble_sort([1, 2]))
def test_sorts_two_elements_unsorted_list(self):
self.assertEqual([1, 2], bubble_sort([2, 1]))
def test_sorts_three_elements_sorted_list(self):
self.assertEqual([1, 2, 3], bubble_sort([1, 2, 3]))
def test_sorts_2_1_3_list(self):
self.assertEqual([1, 2, 3], bubble_sort([2, 1, 3]))
def test_sorts_1_3_2_list(self):
self.assertEqual([1, 2, 3], bubble_sort([1, 3, 2]))
def test_sorts_3_2_1_list(self):
self.assertEqual([1, 2, 3], bubble_sort([3, 2, 1]))
class QuickSortTest(unittest.TestCase):
def test_sorts_an_empty_list(self):
self.assertEqual([], quick_sort([]))
def test_sorts_single_element_list(self):
self.assertEqual([1], quick_sort([1]))
def test_sorts_two_elements_sorted_list(self):
self.assertEqual([1, 2], quick_sort([1, 2]))
def test_sorts_two_elements_unsorted_list(self):
self.assertEqual([1, 2], quick_sort([2, 1]))
def test_sorts_three_elements_sorted_list(self):
self.assertEqual([1, 2, 3], quick_sort([1, 2, 3]))
def test_sorts_2_1_3_list(self):
self.assertEqual([1, 2, 3], quick_sort([2, 1, 3]))
def test_sorts_1_3_2_list(self):
self.assertEqual([1, 2, 3], quick_sort([1, 3, 2]))
def test_sorts_3_2_1_list(self):
self.assertEqual([1, 2, 3], quick_sort([3, 2, 1]))
|
Alex-Diez/python-tdd-katas
|
old-katas/sort-kata/day-1.py
|
Python
|
mit
| 2,549
| 0.002354
|
# -*- coding: utf-8 -*-
# -*- Channel PeliculasySeries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO',
'ori':'VO', 'so':'VOS', 'sor':'VOS'}
list_language = IDIOMAS.values()
list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080']
list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza',
'thevideome']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries')
host = 'https://peliculasyseries.org/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True),
extra='movie'))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movies', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','')
if 'class' in lang_data:
lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"')
else:
return lang_data.strip()
for lang in lang_list:
if lang not in IDIOMAS:
lang = 'VOS'
if lang not in language:
language.append(IDIOMAS[lang])
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"')
if 'Genero' in item.title:
patron = '<li><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
if 'screener' in quality.lower():
quality = 'Screener'
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >'
patron += '<div class="season-item">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
thumbnail=scrapedthumbnail, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >'
patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, scrapedtitle, lang_data, scrapedepisode in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
language = get_language(lang_data)
title = '%sx%s - %s %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle, language)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos',
thumbnail=scrapedthumbnail, language=language, infoLabels=infoLabels))
itemlist = filtertools.get_links(itemlist, item, list_language)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
from lib import generictools
itemlist = []
data = get_source(item.url)
patron = '<div class="available-source" ><div class="([^"]+)">.*?'
patron += 'data-data="([^"]+)".*?<span class="quality-text">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for lang_data, scrapedurl, quality in matches:
lang = get_language(lang_data)
if 'screener' in quality.lower():
quality = 'Screener'
quality = quality
title = '%s [%s] [%s]'
url = base64.b64decode(scrapedurl[1:])
itemlist.append(
Item(channel=item.channel, url=url, title=title, action='play', quality=quality, language=IDIOMAS[lang],
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.quality, x.language))
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = '<li class="search-results-item media-item" .*?<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img class="content" src="([^"]+)" .*?>(Pelicula|Serie) del año([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumb, content_type, year in matches:
title = scrapedtitle
if len(year)==0:
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
if not '/serie' in url:
action = 'findvideos'
else:
action = 'seasons'
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail, action=action,
infoLabels={'year':year})
if new_item.action == 'findvideos':
new_item.contentTitle = new_item.title
else:
new_item.contentSerieName = new_item.title
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host + 'movies'
elif categoria == 'infantiles':
item.url = host + 'genero/animation/'
item.type='movies'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
|
alfa-jor/addon
|
plugin.video.alfa/channels/peliculasyseries.py
|
Python
|
gpl-3.0
| 12,489
| 0.008091
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This script is used to capture the content of config.status-generated
# files and subsequently restore their timestamp if they haven't changed.
import argparse
import errno
import itertools
import os
import re
import subprocess
import sys
import pickle
import mozpack.path as mozpath
class Pool(object):
def __new__(cls, size):
try:
import multiprocessing
size = min(size, multiprocessing.cpu_count())
return multiprocessing.Pool(size)
except:
return super(Pool, cls).__new__(cls)
def imap_unordered(self, fn, iterable):
return itertools.imap(fn, iterable)
def close(self):
pass
def join(self):
pass
class File(object):
def __init__(self, path):
self._path = path
self._content = open(path, 'rb').read()
stat = os.stat(path)
self._times = (stat.st_atime, stat.st_mtime)
@property
def path(self):
return self._path
@property
def mtime(self):
return self._times[1]
@property
def modified(self):
'''Returns whether the file was modified since the instance was
created. Result is memoized.'''
if hasattr(self, '_modified'):
return self._modified
modified = True
if os.path.exists(self._path):
if open(self._path, 'rb').read() == self._content:
modified = False
self._modified = modified
return modified
def update_time(self):
'''If the file hasn't changed since the instance was created,
restore its old modification time.'''
if not self.modified:
os.utime(self._path, self._times)
# As defined in the various sub-configures in the tree
PRECIOUS_VARS = set([
'build_alias',
'host_alias',
'target_alias',
'CC',
'CFLAGS',
'LDFLAGS',
'LIBS',
'CPPFLAGS',
'CPP',
'CCC',
'CXXFLAGS',
'CXX',
'CCASFLAGS',
'CCAS',
])
CONFIGURE_DATA = 'configure.pkl'
# Autoconf, in some of the sub-configures used in the tree, likes to error
# out when "precious" variables change in value. The solution it gives to
# straighten things is to either run make distclean or remove config.cache.
# There's no reason not to do the latter automatically instead of failing,
# doing the cleanup (which, on buildbots means a full clobber), and
# restarting from scratch.
def maybe_clear_cache(data):
env = dict(data['env'])
for kind in ('target', 'host', 'build'):
arg = data[kind]
if arg is not None:
env['%s_alias' % kind] = arg
# configure can take variables assignments in its arguments, and that
# overrides whatever is in the environment.
for arg in data['args']:
if arg[:1] != '-' and '=' in arg:
key, value = arg.split('=', 1)
env[key] = value
comment = re.compile(r'^\s+#')
cache = {}
with open(data['cache-file']) as f:
for line in f:
if not comment.match(line) and '=' in line:
key, value = line.rstrip(os.linesep).split('=', 1)
# If the value is quoted, unquote it
if value[:1] == "'":
value = value[1:-1].replace("'\\''", "'")
cache[key] = value
for precious in PRECIOUS_VARS:
# If there is no entry at all for that precious variable, then
# its value is not precious for that particular configure.
if 'ac_cv_env_%s_set' % precious not in cache:
continue
is_set = cache.get('ac_cv_env_%s_set' % precious) == 'set'
value = cache.get('ac_cv_env_%s_value' % precious) if is_set else None
if value != env.get(precious):
print 'Removing %s because of %s value change from:' \
% (data['cache-file'], precious)
print ' %s' % (value if value is not None else 'undefined')
print 'to:'
print ' %s' % env.get(precious, 'undefined')
os.remove(data['cache-file'])
return True
return False
def split_template(s):
"""Given a "file:template" string, returns "file", "template". If the string
is of the form "file" (without a template), returns "file", "file.in"."""
if ':' in s:
return s.split(':', 1)
return s, '%s.in' % s
def get_config_files(data):
config_status = mozpath.join(data['objdir'], 'config.status')
if not os.path.exists(config_status):
return [], []
configure = mozpath.join(data['srcdir'], 'configure')
config_files = []
command_files = []
# Scan the config.status output for information about configuration files
# it generates.
config_status_output = subprocess.check_output(
[data['shell'], '-c', '%s --help' % config_status],
stderr=subprocess.STDOUT).splitlines()
state = None
for line in config_status_output:
if line.startswith('Configuration') and line.endswith(':'):
if line.endswith('commands:'):
state = 'commands'
else:
state = 'config'
elif not line.strip():
state = None
elif state:
for f, t in (split_template(couple) for couple in line.split()):
f = mozpath.join(data['objdir'], f)
t = mozpath.join(data['srcdir'], t)
if state == 'commands':
command_files.append(f)
else:
config_files.append((f, t))
return config_files, command_files
def prepare(srcdir, objdir, shell, args):
parser = argparse.ArgumentParser()
parser.add_argument('--target', type=str)
parser.add_argument('--host', type=str)
parser.add_argument('--build', type=str)
parser.add_argument('--cache-file', type=str)
# The --srcdir argument is simply ignored. It's a useless autoconf feature
# that we don't support well anyways. This makes it stripped from `others`
# and allows to skip setting it when calling the subconfigure (configure
# will take it from the configure path anyways).
parser.add_argument('--srcdir', type=str)
data_file = os.path.join(objdir, CONFIGURE_DATA)
previous_args = None
if os.path.exists(data_file):
with open(data_file, 'rb') as f:
data = pickle.load(f)
previous_args = data['args']
# Msys likes to break environment variables and command line arguments,
# so read those from stdin, as they are passed from the configure script
# when necessary (on windows).
# However, for some reason, $PATH is not handled like other environment
# variables, and msys remangles it even when giving it is already a msys
# $PATH. Fortunately, the mangling/demangling is just find for $PATH, so
# we can just take the value from the environment. Msys will convert it
# back properly when calling subconfigure.
input = sys.stdin.read()
if input:
data = {a: b for [a, b] in eval(input)}
environ = {a: b for a, b in data['env']}
environ['PATH'] = os.environ['PATH']
args = data['args']
else:
environ = os.environ
args, others = parser.parse_known_args(args)
data = {
'target': args.target,
'host': args.host,
'build': args.build,
'args': others,
'shell': shell,
'srcdir': srcdir,
'env': environ,
}
if args.cache_file:
data['cache-file'] = mozpath.normpath(mozpath.join(os.getcwd(),
args.cache_file))
else:
data['cache-file'] = mozpath.join(objdir, 'config.cache')
if previous_args is not None:
data['previous-args'] = previous_args
try:
os.makedirs(objdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(data_file, 'wb') as f:
pickle.dump(data, f)
def prefix_lines(text, prefix):
return ''.join('%s> %s' % (prefix, line) for line in text.splitlines(True))
def run(objdir):
ret = 0
output = ''
with open(os.path.join(objdir, CONFIGURE_DATA), 'rb') as f:
data = pickle.load(f)
data['objdir'] = objdir
cache_file = data['cache-file']
cleared_cache = True
if os.path.exists(cache_file):
cleared_cache = maybe_clear_cache(data)
config_files, command_files = get_config_files(data)
contents = []
for f, t in config_files:
contents.append(File(f))
# AC_CONFIG_COMMANDS actually only registers tags, not file names
# but most commands are tagged with the file name they create.
# However, a few don't, or are tagged with a directory name (and their
# command is just to create that directory)
for f in command_files:
if os.path.isfile(f):
contents.append(File(f))
# Only run configure if one of the following is true:
# - config.status doesn't exist
# - config.status is older than configure
# - the configure arguments changed
# - the environment changed in a way that requires a cache clear.
configure = mozpath.join(data['srcdir'], 'configure')
config_status_path = mozpath.join(objdir, 'config.status')
skip_configure = True
if not os.path.exists(config_status_path):
skip_configure = False
config_status = None
else:
config_status = File(config_status_path)
if config_status.mtime < os.path.getmtime(configure) or \
data.get('previous-args', data['args']) != data['args'] or \
cleared_cache:
skip_configure = False
relobjdir = os.path.relpath(objdir, os.getcwd())
if not skip_configure:
command = [data['shell'], configure]
for kind in ('target', 'build', 'host'):
if data.get(kind) is not None:
command += ['--%s=%s' % (kind, data[kind])]
command += data['args']
command += ['--cache-file=%s' % cache_file]
# Pass --no-create to configure so that it doesn't run config.status.
# We're going to run it ourselves.
command += ['--no-create']
print prefix_lines('configuring', relobjdir)
print prefix_lines('running %s' % ' '.join(command[:-1]), relobjdir)
sys.stdout.flush()
try:
output += subprocess.check_output(command,
stderr=subprocess.STDOUT, cwd=objdir, env=data['env'])
except subprocess.CalledProcessError as e:
return relobjdir, e.returncode, e.output
# Leave config.status with a new timestamp if configure is newer than
# its original mtime.
if config_status and os.path.getmtime(configure) <= config_status.mtime:
config_status.update_time()
# Only run config.status if one of the following is true:
# - config.status changed or did not exist
# - one of the templates for config files is newer than the corresponding
# config file.
skip_config_status = True
if not config_status or config_status.modified:
# If config.status doesn't exist after configure (because it's not
# an autoconf configure), skip it.
if os.path.exists(config_status_path):
skip_config_status = False
else:
# config.status changed or was created, so we need to update the
# list of config and command files.
config_files, command_files = get_config_files(data)
for f, t in config_files:
if not os.path.exists(t) or \
os.path.getmtime(f) < os.path.getmtime(t):
skip_config_status = False
if not skip_config_status:
if skip_configure:
print prefix_lines('running config.status', relobjdir)
sys.stdout.flush()
try:
output += subprocess.check_output([data['shell'], '-c',
'./config.status'], stderr=subprocess.STDOUT, cwd=objdir,
env=data['env'])
except subprocess.CalledProcessError as e:
ret = e.returncode
output += e.output
for f in contents:
f.update_time()
return relobjdir, ret, output
def subconfigure(args):
parser = argparse.ArgumentParser()
parser.add_argument('--list', type=str,
help='File containing a list of subconfigures to run')
parser.add_argument('--skip', type=str,
help='File containing a list of Subconfigures to skip')
parser.add_argument('subconfigures', type=str, nargs='*',
help='Subconfigures to run if no list file is given')
args, others = parser.parse_known_args(args)
subconfigures = args.subconfigures
if args.list:
subconfigures.extend(open(args.list, 'rb').read().splitlines())
if args.skip:
skips = set(open(args.skip, 'rb').read().splitlines())
subconfigures = [s for s in subconfigures if s not in skips]
if not subconfigures:
return 0
ret = 0
# One would think using a ThreadPool would be faster, considering
# everything happens in subprocesses anyways, but no, it's actually
# slower on Windows. (20s difference overall!)
pool = Pool(len(subconfigures))
for relobjdir, returncode, output in \
pool.imap_unordered(run, subconfigures):
print prefix_lines(output, relobjdir)
sys.stdout.flush()
ret = max(returncode, ret)
if ret:
break
pool.close()
pool.join()
return ret
def main(args):
if args[0] != '--prepare':
return subconfigure(args)
topsrcdir = os.path.abspath(args[1])
subdir = args[2]
# subdir can be of the form srcdir:objdir
if ':' in subdir:
srcdir, subdir = subdir.split(':', 1)
else:
srcdir = subdir
srcdir = os.path.join(topsrcdir, srcdir)
objdir = os.path.abspath(subdir)
return prepare(srcdir, objdir, args[3], args[4:])
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
eventql/eventql
|
deps/3rdparty/spidermonkey/mozjs/build/subconfigure.py
|
Python
|
agpl-3.0
| 14,193
| 0.000705
|
#!/usr/bin/env python
import glob
import os
import sys
import unittest
import common
if len(sys.argv) > 1:
builddir = sys.argv[1]
no_import_hooks = True
else:
builddir = '..'
no_import_hooks = False
common.run_import_tests(builddir, no_import_hooks)
SKIP_FILES = ['common', 'runtests']
dir = os.path.split(os.path.abspath(__file__))[0]
os.chdir(dir)
def gettestnames():
files = [fname[:-3] for fname in glob.glob('test*.py')
if fname not in SKIP_FILES]
return files
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for name in gettestnames():
suite.addTest(loader.loadTestsFromName(name))
testRunner = unittest.TextTestRunner()
testRunner.run(suite)
|
mate-desktop/python-mate-desktop
|
tests/runtests.py
|
Python
|
lgpl-2.1
| 722
| 0.00554
|
# Kevin Nash (kjn33)
# EECS 293
# Assignment 12
from entity import Entity
from random import randint
class Passenger(Entity):
""" Entities that need to be checked in following queueing """
def __init__(self):
"""
Passengers follow Entity initialization,
are randomly given special parameters
"""
super(Passenger, self).__init__()
# 50% chance of being a frequent flyer
self.frequent = randint(1, 2) % 2 == 0
# 10% chance of having a given special condition
self.oversize = randint(1, 10) % 10 == 0
self.rerouted = randint(1, 10) % 10 == 0
self.overbook = randint(1, 10) % 10 == 0
self.time = 2
self.calc_time()
def __str__(self):
""" Represent Passenger by name, ID, and flyer type """
flyer_type = "regular"
if self.frequent:
flyer_type = "frequent"
return "%s %d (%s)" % (self.__class__.__name__, self.id, flyer_type)
def calc_time(self):
""" Set the time required for check in based on special parameters """
if self.oversize:
self.time += 2
if self.rerouted:
self.time += 2
if self.overbook:
self.time += 2
|
Fullbiter/EECS-293
|
pa12-13/airville/src/passenger.py
|
Python
|
gpl-3.0
| 1,260
| 0.000794
|
import pygame
pygame.init()
screen = pygame.display.set_mode((400, 300))
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.display.flip()
|
stivosaurus/rpi-snippets
|
reference_scripts/basic_pygame.py
|
Python
|
unlicense
| 266
| 0.015038
|
from twisted.internet import protocol, reactor
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data)
class EchoFactory(protocol.Factory):
def buildProtocol(self, addr):
return Echo()
reactor.listenTCP(1234, EchoFactory())
reactor.run()
|
walterfan/snippets
|
python/exam/EchoServer.py
|
Python
|
apache-2.0
| 296
| 0.013514
|
#!/usr/bin/env python
'''em_dict_basic.py - Basic benchmark for external memory dictionary.'''
__author__ = 'huku <huku@grhack.net>'
import sys
import shutil
import random
import time
import util
import pyrsistence
def main(argv):
# Initialize new external memory dictionary.
util.msg('Populating external memory dictionary')
t1 = time.time()
dirname = util.make_temp_name('em_dict')
em_dict = pyrsistence.EMDict(dirname)
for i in util.xrange(0x1000000):
em_dict[i] = i
t2 = time.time()
util.msg('Done in %d sec.' % (t2 - t1))
# Close and remove external memory dictionary from disk.
em_dict.close()
shutil.rmtree(dirname)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
# EOF
|
huku-/pyrsistence
|
tests/em_dict_basic.py
|
Python
|
bsd-2-clause
| 766
| 0
|
# -*- coding: utf-8 -*-
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/languages/zh_tw.py
|
Python
|
mit
| 5,129
| 0.001366
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field last_read_by on 'Message'
m2m_table_name = db.shorten_name(u'digiapproval_message_last_read_by')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('message', models.ForeignKey(orm[u'digiapproval.message'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['message_id', 'user_id'])
def backwards(self, orm):
# Removing M2M table for field last_read_by on 'Message'
db.delete_table(db.shorten_name(u'digiapproval_message_last_read_by'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'digiapproval.customeraccount': {
'Meta': {'object_name': 'CustomerAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CUSTOMER'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_accounts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['digiapproval.CustomerAccount']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'digiapproval.message': {
'Meta': {'object_name': 'Message'},
'_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'last_read'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.task': {
'Meta': {'object_name': 'Task'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('jsonfield.fields.JSONField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': "'36'"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.userfile': {
'Meta': {'object_name': 'UserFile'},
'_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'virus_status': ('django.db.models.fields.CharField', [], {'default': "'UNSCANNED'", 'max_length': '16'})
},
u'digiapproval.workflow': {
'Meta': {'object_name': 'Workflow'},
'approver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_approver'", 'to': u"orm['auth.User']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_customer'", 'to': u"orm['digiapproval.CustomerAccount']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.WorkflowSpec']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'STARTED'", 'max_length': '10'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'70558195da6a4488b22d6e8749f86580'", 'max_length': '36'}),
'workflow': ('digiapproval_project.apps.digiapproval.fields.WorkflowField', [], {})
},
u'digiapproval.workflowspec': {
'Meta': {'object_name': 'WorkflowSpec'},
'approvers': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_approvers'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'delegators': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_delegators'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'64'"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflowspecs_owner'", 'to': u"orm['auth.Group']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spec': ('digiapproval_project.apps.digiapproval.fields.WorkflowSpecField', [], {}),
'toplevel': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
}
}
complete_apps = ['digiapproval']
|
tsujamin/digi-approval
|
src/digiapproval_project/digiapproval_project/apps/digiapproval/migrations/0009_add_last_read_mm_auto.py
|
Python
|
gpl-3.0
| 8,804
| 0.007383
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'auberge liste'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'auberge liste'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "affiche les auberges existantes"
self.aide_longue = \
"Cette commande permet de lister les auberges existantes."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande."""
auberges = sorted([a for a in importeur.auberge.auberges.values()],
key=lambda a: a.cle)
if auberges:
en_tete = "+-" + "-" * 15 + "-+-" + "-" * 25 + "-+-" + \
"-" * 8 + "-+-" + "-" * 6 + "-+"
msg = en_tete + "\n"
msg += "| Clé | Salle | " \
"Chambres | Occupé |\n"
msg += en_tete
for auberge in auberges:
cle = auberge.cle
ident = auberge.ident_comptoir
nb_chambres = len(auberge.chambres)
pct_occupation = auberge.pct_occupation
msg += "\n| {:<15} | {:<25} | {:>8} | {:>5}% |".format(
cle, ident, nb_chambres, pct_occupation)
msg += "\n" + en_tete
personnage << msg
else:
personnage << "Aucune auberge n'existe pour l'heure."
|
stormi/tsunami
|
src/secondaires/auberge/commandes/auberge/liste.py
|
Python
|
bsd-3-clause
| 3,069
| 0.000979
|
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import config as agent_conf
from neutron.agent.metadata import agent
from neutron.agent.metadata import config as metadata_conf
from neutron.common import config
from neutron.common import utils
from neutron.openstack.common.cache import cache
LOG = logging.getLogger(__name__)
def main():
cfg.CONF.register_opts(metadata_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS)
cfg.CONF.register_opts(metadata_conf.METADATA_PROXY_HANDLER_OPTS)
cache.register_oslo_configs(cfg.CONF)
cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5')
agent_conf.register_agent_state_opts_helper(cfg.CONF)
config.init(sys.argv[1:])
config.setup_logging()
utils.log_opt_values(LOG)
# metadata agent need not connect DB
cfg.CONF.set_override("connection", "", "database")
proxy = agent.UnixDomainMetadataProxy(cfg.CONF)
proxy.run()
|
waltBB/neutron_read
|
neutron/agent/metadata_agent.py
|
Python
|
apache-2.0
| 1,584
| 0
|
"""
sandman_pasta reimplements the behaviour of decaf-masta, but instead evaluates all calls to deployable heat templates
"""
import json
from decaf_storage.json_base import StorageJSONEncoder
from decaf_storage import Endpoint
from decaf_utils_components.base_daemon import daemonize
import yaml
import time
import urllib
from decaf_utils_components import BasePlugin, In, Out
import base64
import sys
import math
import traceback
__author__ = "Banana PG-SANDMAN"
__date__ = "$01-jun-2016$"
TMPDIR = "/tmp/decaf/"
class Pasta(BasePlugin):
__version__ = "0.1-dev01"
datacenters = dict()
config = None
logger = None
def __init__(self, logger=None, config=None):
super(Pasta, self).__init__(logger=logger, config=config)
with open('/etc/decaf/pastad.cfg') as file:
self.config = yaml.safe_load(file)
if self.config is None:
self.logger.error("No configuration file found or not in yaml format.")
sys.exit(1)
try:
self.datacenters = self.config["datacenters"]
except KeyError as e:
self.logger.error("Please check the configuration. There is no datacenter defined.")
sys.exit(1)
self.logger.debug('Configuration seems sane.')
def _before_connect(self, url=None, rpc=None, routing_key=None):
pass
# same behaviour as masta
def _after_connect(self):
self.rpc.set_json_encoder(StorageJSONEncoder)
self.storage = Endpoint(self.rpc, self.logger)
# Check if all the datacenters are also registered in Storage, if not, register them
storage_datacenters = self.storage.get('datacenter', options=[], filters={})
def connect(self, url=None, rpc=None, routing_key=None):
# fake being masta, so we don't have to change other code
super(Pasta, self).connect(self.config["rpc"]["url"], None, "decaf_masta")
@In("datacenter_id", int)
@Out("success_code", int)
def initialize_datacenter(self, datacenter_config):
"""
Reimplemented method of decaf_masta
:param datacenter_config: A DatacenterConfig object describing the datacenter to be added.
:return: The id of the new entry.
"""
self.logger.info("Call to initialize_datacenter")
return 0
@In("keystone_credentials", dict)
@Out("keystone_id", int)
def create_keystone_credentials(self, keystone_credentials):
self.logger.info("Call to create_keystone_credentials")
return 0
@In("keystone_id", int)
@Out("keystone_credentials", dict)
def get_keystone_credentials(self, keystone_id):
"""
Gets a keystone entry from the database.
:param keystone_id: The id of the database entry.
:return: The data of the keystone entry with the given id, or an error code if not found.
"""
return 400
@Out("keystone_list", list)
def get_keystones(self):
"""
Get keystone entries contained in the database.
:return: A list of keystone entries currently existing in the Masta database.
"""
return None
# ----------------------------------------------------------
# DATACENTERS
# Every datacenter has a respective set of keystone credentials and a region.
# Keystone does not have to be installed on the actual datacenter, but could.
# ----------------------------------------------------------
@In("datacenter", dict)
@Out("datacenter_id", int)
def create_datacenter(self, datacenter):
"""
Adds a datacenter entry to the database.
:param datacenter: A Datacenter dictionary containing information of the datacenter.
:return: The id of the new entry in the database.
"""
return int(datacenter.datacenter_id)
@Out("datacenter_list", list)
def get_datacenters(self):
"""
Get datacenter entries contained in the database.
:return: A list of datacenter entries currently existing in the Masta database.
"""
return [datacenter.to_dict() for datacenter in self.datacenters]
@In("datacenter_id", int)
@Out("datacenter_stats", dict)
def get_datacenter_stats(self, datacenter_id):
"""
Returns information about the datacenter.
:param datacenter_id: The id of the datacenter.
:return: A list of datacenter entries currently existing in the Masta database
"""
return datacenter_stats
@In("datacenter_id", int)
@Out("ip_namespace", str)
def get_datacenter_ip_namespace(self, datacenter_id):
"""
Returns the name of the IP namespace of the router on the given datacenter.
:param datacenter_id: The masta id of the datacenter.
:return: IP namespace name.
"""
ip_namespace = "qrouter-1"
return ip_namespace
# ----------------------------------------------------------
# DEPLOY SCENARIO
# A scenario is deployed in two steps: First, the edges are created.
# Secondly, the nodes are created.
# If the process fails at one step, MaSta will rollback the deployment.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def deploy_scenario(self, instance_graph):
"""
Deploy scenario on the infrastructure.
:param instance_graph: An object of type InstanceGraph to be deployed.
:return: The modified instance graph with ips and keynames, if successful.
"""
return instance_graph
# ----------------------------------------------------------
# DESTROY SCENARIO
# Deletes all the nodes and edges and removes
# the scenario from the database.
# ----------------------------------------------------------
@In("scenario_instance_id", str)
@Out("success_code", int)
def destroy_scenario(self, scenario_instance_id):
"""
Destroy scenario by deleting all its nodes and removing from database.
:param scenario_instance_id: The id of the scenario instance.
:return: 200, if successful. 404, if not found.
"""
return 200
@Out("success_code", int)
def destroy_all_scenarios(self):
"""
Destroys all scenarios in the MaSta database.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# ALTER SCENARIO
# Methods to change a running scenario.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def extend_scenario(self, instance_graph):
"""
Method to extend an existing scenario.
:param instance_graph: An InstanceGraph with all the nodes and edges to add.
:return: 200, if successful.
"""
return 200
@In("shrink_graph", dict)
@Out("success_code", int)
def shrink_scenario(self, shrink_graph):
"""
Method to shrink an existing scenario.
:param shrink_graph: An object of type InstanceGraph that lists all the nodes and edges to delete.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# INTERNAL SCENARIO METHODS
# Internal methods for creation and deletion
# of nodes and edges.
# ----------------------------------------------------------
def create_nodes(self, instance_graph, session):
"""
Internal method to create nodes in database and deploy the nodes on the infrastructure.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def create_edges(self, instance_graph, session):
"""
Internal method to create edges in the database and set up the networks in OpenStack.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def rollback(self, instance_graph, session, del_scenario=False):
"""
Internal method to rollback the creation or altering of a scenario.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def delete_nodes(self, vm_instance_id_list, session):
"""
Internal method to delete nodes from a scenario.
:param scenario_instance_id: The id of the scenario.
:param session: The session object.
:return: 200, if successful.
"""
return 200
def delete_edges(self, edge_list, session):
"""
Internal method to delete edges from a scenario.
:param edge_list: A list containing objects of internal edges, management ports and public ports from the db.
:param session: The session object.
:return:
"""
pass
# ----------------------------------------------------------
# ACTIONS
# Perform actions on the VMS.
# ----------------------------------------------------------
@In("vm_action", dict)
@Out("success_code", int)
def action_vm_instance(self, vm_action):
"""
Perform an action on a single vm instance.
:param vm_action: A dictionary of type VMAction containing the vm instance id and the action to perform.
:return: 200, if successful.
"""
return 200
@In("scenario_action", dict)
@Out("success_code", int)
def action_scenario(self, scenario_action):
"""
Perform an action on a scenario.
:param scenario_action: A dictionary of type ScenarioAction containing the scenario instance id and the action to perform.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# FLAVORS
# ----------------------------------------------------------
@In("flavor_data", dict)
@Out("success_code", int)
def create_flavor(self, flavor_data):
"""
Adds a flavor entry to the database and uploads the flavor to OpenStack.
:param flavor_data: A FlavorData object containing data about the flavor.
:return: 201: flavor created. 200: flavor already exists, not created
"""
return 201
@In("flavor_id", str)
@Out("success_code", int)
def delete_flavor(self, flavor_id):
"""
Deletes a flavor from the database and OpenStack.
:param flavor_id: The id of the flavor.
:return: 200, if successful. 404, if not found.
"""
return 200
# ----------------------------------------------------------
# IMAGES
# ----------------------------------------------------------
@In("image_data", dict)
@Out("success_code", int)
def create_image(self, image_data):
"""
Stores an image in OpenStack.
:param image_data: A ImageData object containing data about the image.
:return: 201: image created. 200: image already exists, not created
"""
return 201
@In("image_id", str)
@Out("success_code", int)
def delete_image(self, image_id):
"""
Deletes an image from the database and OpenStack.
:param image_id: The id of the image.
:return: 200, if successful. 404, if not found.
"""
return 200
# ----------------------------------------------------------
# NETWORKS
# ----------------------------------------------------------
@In("vm_instance_id", str)
@Out("instance_ip", str)
def get_vm_mgmt_ip(self, vm_instance_id, session=None):
"""
Retrieves the management IP address of an instance.
:param vm_instance_id: The id of the VM instance.
:return: The ip of the instance.
"""
return "10.0.0.1"
# ----------------------------------------------------------
# MONITORING DATA
# ----------------------------------------------------------
@In("monitoring_request", dict)
@Out("monitoring_response", dict)
def get_monitoring_data(self, monitoring_request):
"""
Retrieves monitoring data for a specific VM.
:param monitoring_request: A MonitoringRequest object.
:return: A MonitoringResponse object.
"""
monitoring_request = monitoring_request["monitoring_request"]
monitoring_response = {
"monitoring_response": {
"type": monitoring_request["type"],
"vm_instance_id": monitoring_request["vm_instance_id"],
"value": {
"current": 10,
"total": 100
}
}
}
return monitoring_response
@In("monitoring_alarm_request", dict)
@Out("subscription_name", str)
def create_monitoring_alarm(self, monitoring_alarm_request):
"""
Sets up an alarm and returns a subscription id to subscribe to the message broker.
:param monitoring_alarm_request: A MonitoringAlarmRequest object containing data about the alarm to be set up.
:return: The name of the subscription
"""
return "test"
@In("subscription_name", str)
@Out("success_code", int)
def delete_monitoring_alarm(self, subscription_name):
"""
Delete monitoring alarm by subscription_name.
:param subscription_name: The name of the Subscription.
:return: 200, if successful. 404, if not found.
"""
return 200
@In("monitoring_alarm_id", int)
@Out("success_code", int)
def delete_monitoring_alarm_by_id(self, monitoring_alarm_id):
"""
Delete monitoring alarm by alarm id.
:param monitoring_alarm_id: The id of the alarm, under which it is registered in the MaSta database.
:return: 200, if successful. 404, if not found.
"""
return 200
@Out("success_code", int)
def delete_all_monitoring_alarms(self):
"""
Deletes all monitoring alarms in the DB.
:return: 200, if successful.
"""
return 200
def invoke_monitoring_alarm(self, data):
"""
Internal method. Called by the MaSta-Server when an alarm message arrives.
:param data: data
:return:
"""
pass
def daemon():
daemonize(Pasta)
if __name__ == '__main__':
daemon()
|
CN-UPB/OpenBarista
|
components/sandman-pasta/sandman_pasta/sandman_pasta.py
|
Python
|
mpl-2.0
| 14,698
| 0.004763
|
from neo.Storage.Common.DataCache import DataCache
class CloneCache(DataCache):
def __init__(self, innerCache):
super(CloneCache, self).__init__()
self.innerCache = innerCache
def AddInternal(self, key, value):
self.innerCache.Add(key, value)
def DeleteInternal(self, key):
self.innerCache.Delete(key)
def FindInternal(self, key_prefix):
for k, v in self.innerCache.Find(key_prefix):
yield k, v.Clone()
def GetInternal(self, key):
return self.innerCache[key].Clone()
def TryGetInternal(self, key):
res = self.innerCache.TryGet(key)
if res is None:
return None
else:
return res.Clone()
def UpdateInternal(self, key, value):
self.innerCache.GetAndChange(key).FromReplica(value)
|
hal0x2328/neo-python
|
neo/Storage/Common/CloneCache.py
|
Python
|
mit
| 828
| 0
|
#!/usr/bin/env python
#Copyright (c) 2010 Gerson Minichiello
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import urllib
from HTMLParser import HTMLParser
class PyOpenGraph(object):
types = {'activity':['activity', 'sport'],
'business':['bar', 'company', 'cafe', 'hotel', 'restaurant'],
'group':['cause' 'sports_league' 'sports_team'],
'organization':['band', 'government', 'non_profit', 'school', 'university'],
'person':['actor', 'athlete', 'author', 'director', 'musician', 'politician', 'public_figure'],
'place':['city', 'country', 'landmark', 'state_province'],
'product':['album', 'book', 'drink', 'food', 'game', 'isbn', 'movie', 'product', 'song', 'tv_show', 'upc'],
'website':['article', 'blog', 'website']}
def __init__(self, url):
f = urllib.urlopen(url)
contents = f.read()
f.close()
p = PyOpenGraphParser()
p.feed(contents)
p.close()
self.metadata = p.properties
def is_valid(self):
required = set(['title', 'type', 'image', 'url'])
if (set(self.metadata.keys()).intersection(required)) == required:
return True
else:
return False
def __str__(self):
return self.metadata['title']
class PyOpenGraphParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.properties = {}
def handle_starttag(self, tag, attrs):
if tag == 'meta':
attrdict = dict(attrs)
if attrdict.has_key('property') and attrdict['property'].startswith('og:') and attrdict.has_key('content'):
self.properties[attrdict['property'].replace('og:', '')] = attrdict['content']
def handle_endtag(self, tag):
pass
def error(self, msg):
pass
if __name__ == '__main__':
# Usage
og = PyOpenGraph('http://www.rottentomatoes.com/m/10011268-oceans/')
print og.metadata
print og.metadata['title']
|
bretlowery/snakr
|
lib/PyOpenGraph/PyOpenGraph.py
|
Python
|
bsd-3-clause
| 3,011
| 0.016274
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 06 13:07:14 2015
@author: Ryan Jones
"""
class DemandTechnology:
def __init__(self, drivers, ID, **kwargs):
self.ID = ID
self.drivers = drivers
for col, att in util.object_att_from_table('DemandTechs', ID):
setattr(self, col, att)
self.stocks = {}
def add_stock(self, GAU=None, DAU=None):
if (GAU, DAU) in self.stocks:
# ToDo note that a technology was added twice
return
self.stocks[GAU, DAU] = DemandStock()
def tech_efficiency(self, ID, efficiency, start=None, end=None):
"""
returns function parameters based on identification of efficiency type - main or aux
"""
vintage_start = int(cfgfile.get('vintage', 'start_year'))
vintage_end = int(cfgfile.get('vintage', 'end_year'))
vintages = np.arange(vintage_start, vintage_end + 1)
model_start_year = int(cfgfile.get('case', 'model_start_year'))
model_end_year = int(cfgfile.get('case', 'model_end_year'))
years = np.arange(model_start_year, model_end_year + 1)
# years = np.arange (start, end)
# vintages = np.arange (start, end)
stock = self.stock
if efficiency == "main":
efficiency_key = 'main_energy_efficiency'
ref_ID = stock.techs[ID].reference_main_efficiency_id
decay = 'main_energy_efficiency_decay'
else:
efficiency_key = 'aux_energy_efficiency'
ref_ID = stock.techs[ID].reference_aux_efficiency_id
decay = 'aux_energy_efficiency_decay'
eff_def = stock.techs[ID].efficiency_definition
if eff_def == "absolute":
ref_ID = ID
else:
ref_eff_def = stock.techs[ref_ID].efficiency_definition
if ref_eff_def == "relative":
error_text = "reference technology for technology %s not defined in absolute terms" % ID
raise ValueError(error_text)
else:
pass
# units to convert efficiency values to
sd_unit_type = self.service_demand.unit_type
sd_unit = self.service_demand.unit_base
energy_unit = cfgfile.get('case', 'energy_unit')
# converts efficiency values of technologies that are defined in
# absolute terms ex. miles/gallon for subsectors with inputs defined
# in energy service terms ex. kilometers to consistent efficiency
# units of energy_unit/service_demand_unit ex. gigajoule/kilometer
if eff_def == 'absolute' and sd_unit_type == 'service':
eff = util.efficiency_convert(getattr(stock.techs[ID], efficiency_key),
stock.techs[ID].efficiency_numerator_unit,
stock.techs[ID].efficiency_denominator_unit,
energy_unit, sd_unit)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
clean_eff_numerator_unit = energy_unit
clean_eff_denominator_unit = sd_unit
# no conversion is used if the service_demand unit is energy, as
# the efficiency values will be normalized in later calculations
elif eff_def == 'absolute' and sd_unit_type == 'energy':
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
clean_eff_numerator_unit = stock.techs[ID].efficiency_numerator_unit
clean_eff_denominator_unit = stock.techs[ID].efficiency_denominator_unit
# converts efficiency values for reference technologies
# that are defined in absolute terms ex. miles/gallon for
# subsectors with inputs defined in energy service terms ex.
# kilometers to consistent efficiency units of
# energy_unit/service_demand_unit ex. gigajoule/kilometer
elif eff_def == "relative" and sd_unit_type == 'service':
ref_eff = util.efficiency_convert(
getattr(stock.techs[ref_ID], efficiency_key),
stock.techs[ref_ID].efficiency_numerator_unit,
stock.techs[ref_ID].efficiency_denominator_unit, energy_unit,
sd_unit)
ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages)
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
eff *= ref_eff
clean_eff_numerator_unit = energy_unit
clean_eff_denominator_unit = sd_unit
# no conversion is used if the service_demand unit is energy, as
# the efficiency values will be normalized in later calculations.
# efficiency values are multiplied by reference technology efficiencies
else:
ref_eff = getattr(stock.techs[ref_ID], efficiency_key)
ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages)
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest",
newindex=vintages)
eff *= ref_eff
clean_eff_numerator_unit = stock.techs[ref_ID].efficiency_numerator_unit
clean_eff_denominator_unit = stock.techs[ref_ID].efficiency_denominator_unit
decay_df = stockrollover.vintage_age(years, vintages)
decay_df *= stockrollover.vintage_exist(years, vintages)
if eff_def == "absolute":
decay_df = 1 - (decay_df * getattr(stock.techs[ID], decay))
else:
decay_df = 1 - (decay_df * getattr(stock.techs[ref_ID], decay))
eff = eff.transpose()
eff = (decay_df.values * eff.values, years, vintages)
setattr(stock.techs[ID], 'clean_%s_efficiency' % efficiency, eff)
setattr(stock.techs[ID], 'clean_%s_efficiency_numerator_unit' % efficiency, clean_eff_numerator_unit)
setattr(stock.techs[ID], 'clean_%s_efficiency_denominator_unit' % efficiency, clean_eff_denominator_unit)
def stock_efficiency(self):
sd_unit_type = self.service_demand.unit_type
if sd_unit_type == 'energy':
# ==============================================================================
# in order to calculate a normalized efficiency for a stock, which is
# used when the service demand is defined in energy terms, all
# absolute efficiency values must be in the same units. This code converts
# all efficiency values to the same units.
# ==============================================================================
primary_key = self.stock.techs[min(self.stock.techs.keys())]
setattr(self.stock, 'primary_efficiency_ID', primary_key)
setattr(self.stock, 'primary_efficiency_numerator_unit',
stock.techs[primary_key].clean_main_efficiency_numerator_unit)
setattr(self.stock, 'primary_efficiency_denominator_unit',
stock.techs[primary_key].clean_main_efficiency_denominator_unit)
for key in self.stock.techs:
for eff_type in ['main', 'aux']:
data = getattr(self.stock.techs[key],
'clean_%s_efficiency' % eff_type)
unit_from_denominator = getattr(self.stock.techs[key],
'clean_%s_efficiency_denominator_unit' % eff_type)
unit_from_numerator = getattr(self.stock.techs[key],
'clean_%s_efficiency_numerator_unit' % eff_type)
unit_to_denominator = getattr(self.stock, 'primary_efficiency_denominator_unit')
unit_to_numerator = getattr(self.stock, 'primary_efficiency_numerator_unit')
eff = util.efficiency_convert(data, unit_from_numerator, unit_from_denominator, unit_to_numerator,
unit_to_denominator)
class DemandStock(Stock):
"""
Demand-side equipment stock.
Attributes
----------
final_energy_list : list
ex. {"electricity", "pipeline gas"}
List of final_energy types demanded by techs in the stock.
stocksubsector: instance of class StockSubsector
"""
# def __init__(self):
# self.service_demand = ServiceDemand()
#
# def add_service_demand(self):
# self.service_demand = ServiceDemand()
#
# def cf_to_unit(self, unit, service_demand_unit):
# """converts capacity factor stock units to energy output units based on service demand unit"""
#
# def tech_lookup(self, unit, stocksubsector):
# """return dictionary techs from tech database based on a lookup of unit and stockSubsector."""
#
# def final_energy_list(self, techs):
# """return all final energy types from attributes of tech dictionary"""
#
# def add_demand_techs(self, demand_tech_id):
# if demand_tech_id in self.techs:
# return
# self.techs[demand_tech_id] = technology.DemandTech(demand_tech_id)
class Stock(DataMapFunctions):
"""
"""
def __init__(self, drivers, ID, technology_id=None, **kwargs):
self.ID = ID
self.sql_ID_table = cfgfile.get('db_table_name', 'DemandStock')
self.sql_data_table = cfgfile.get('db_table_name', 'DemandStockData')
self.technology_id = technology_id
self.drivers = drivers
self.mapped = False
for col, att in util.object_att_from_table(self.sql_ID_table, ID):
setattr(self, col, att)
DataMapFunctions.__init__(self)
self.read_timeseries_data()
# self.project('DemandStock', 'service_demand_dependent')
#
@staticmethod
def stock_new(var, **kwargs):
print kwargs['asdf']
# def stock_growth(self):
# """combines driver and intensity attributes into a stock growth projection"""
#
# def rollover(self):
# """used to perform stock rollover calculations and produce stock compositions matrices of stock units by tech"""
#
# def stock_normal(self):
# """normalizes the stock for weighted average calculations"""
#
# def stock_additions(self):
# """calculates annual stock additions as a function of new and replace on burnout by available tech"""
#
# def stock_additions_early(self):
# """calculates annual stock additions as a function of early replacement by available tech"""
#
# def stock_replacement_early(self):
# """calculates annual stock replacements by year and replaced vintage"""
#
# def stock_output(self, output_type):
# """calculates stock output by selected output type: physical units, efficiency, etc."""
#
# pass
|
energyPATHWAYS/energyPATHWAYS
|
energyPATHWAYS/_obsolete/temp.py
|
Python
|
mit
| 11,014
| 0.003087
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-bad-import-order,unused-import
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from tensorflow.examples.image_retraining import retrain
from tensorflow.python.framework import test_util
class ImageRetrainingTest(test_util.TensorFlowTestCase):
def dummyImageLists(self):
return {'label_one': {'dir': 'somedir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']},
'label_two': {'dir': 'otherdir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']}}
def testGetImagePath(self):
image_lists = self.dummyImageLists()
self.assertEqual('image_dir/somedir/image_one.jpg', retrain.get_image_path(
image_lists, 'label_one', 0, 'image_dir', 'training'))
self.assertEqual('image_dir/otherdir/image_four.jpg',
retrain.get_image_path(image_lists, 'label_two', 1,
'image_dir', 'testing'))
def testGetBottleneckPath(self):
image_lists = self.dummyImageLists()
self.assertEqual('bottleneck_dir/somedir/image_five.jpg_imagenet_v3.txt',
retrain.get_bottleneck_path(
image_lists, 'label_one', 0, 'bottleneck_dir',
'validation', 'imagenet_v3'))
def testShouldDistortImage(self):
self.assertEqual(False, retrain.should_distort_images(False, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(True, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 10, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 1, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 0, 50))
def testAddInputDistortions(self):
with tf.Graph().as_default():
with tf.Session() as sess:
retrain.add_input_distortions(True, 10, 10, 10, 299, 299, 3, 128, 128)
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortJPGInput:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortResult:0'))
@tf.test.mock.patch.object(retrain, 'FLAGS', learning_rate=0.01)
def testAddFinalTrainingOps(self, flags_mock):
with tf.Graph().as_default():
with tf.Session() as sess:
bottleneck = tf.placeholder(
tf.float32, [1, 1024],
name='bottleneck')
retrain.add_final_training_ops(5, 'final', bottleneck, 1024)
self.assertIsNotNone(sess.graph.get_tensor_by_name('final:0'))
def testAddEvaluationStep(self):
with tf.Graph().as_default():
final = tf.placeholder(tf.float32, [1], name='final')
gt = tf.placeholder(tf.float32, [1], name='gt')
self.assertIsNotNone(retrain.add_evaluation_step(final, gt))
def testAddJpegDecoding(self):
with tf.Graph().as_default():
jpeg_data, mul_image = retrain.add_jpeg_decoding(10, 10, 3, 0, 255)
self.assertIsNotNone(jpeg_data)
self.assertIsNotNone(mul_image)
def testCreateModelInfo(self):
did_raise_value_error = False
try:
retrain.create_model_info('no_such_model_name')
except ValueError:
did_raise_value_error = True
self.assertTrue(did_raise_value_error)
model_info = retrain.create_model_info('inception_v3')
self.assertIsNotNone(model_info)
self.assertEqual(299, model_info['input_width'])
if __name__ == '__main__':
tf.test.main()
|
dyoung418/tensorflow
|
tensorflow/examples/image_retraining/retrain_test.py
|
Python
|
apache-2.0
| 4,548
| 0.005057
|
#!/usr/bin/env python
# Shellscript to verify r.gwflow calculation, this calculation is based on
# the example at page 167 of the following book:
# author = "Kinzelbach, W. and Rausch, R.",
# title = "Grundwassermodellierung",
# publisher = "Gebr{\"u}der Borntraeger (Berlin, Stuttgart)",
# year = "1995"
#
import sys
import os
import grass.script as grass
# Overwrite existing maps
grass.run_command("g.gisenv", set="OVERWRITE=1")
grass.message(_("Set the region"))
# The area is 2000m x 1000m with a cell size of 25m x 25m
grass.run_command("g.region", res=50, n=950, s=0, w=0, e=2000)
grass.run_command("r.mapcalc", expression="phead= if(row() == 19, 5, 3)")
grass.run_command("r.mapcalc", expression="status=if((col() == 1 && row() == 13) ||\
(col() == 1 && row() == 14) ||\
(col() == 2 && row() == 13) ||\
(col() == 2 && row() == 14) ||\
(row() == 19), 2, 1)")
grass.run_command("r.mapcalc", expression="hydcond=0.001")
grass.run_command("r.mapcalc", expression="recharge=0.000000006")
grass.run_command("r.mapcalc", expression="top=20")
grass.run_command("r.mapcalc", expression="bottom=0")
grass.run_command("r.mapcalc", expression="syield=0.001")
grass.run_command("r.mapcalc", expression="null=0.0")
#compute a steady state groundwater flow
grass.run_command("r.gwflow", "f", solver="cholesky", top="top", bottom="bottom", phead="phead", \
status="status", hc_x="hydcond", hc_y="hydcond", s="syield", \
recharge="recharge", output="gwresult", dt=864000000000, type="unconfined", budget="water_budget")
|
AsherBond/MondocosmOS
|
grass_trunk/raster/r.gwflow/valid_calc_excavation.py
|
Python
|
agpl-3.0
| 1,561
| 0.012172
|
""" Module summary:
Variables:
db_session - A connection to the farmfinder database.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dbsetup import Base
############################################################################
# Connect to database and create database session:
engine = create_engine("sqlite:///secondresponse/database/sr.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
db_session = DBSession()
|
courtneypattison/second-response
|
secondresponse/database/dbconnect.py
|
Python
|
mit
| 483
| 0.00207
|
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(children), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
|
youtube/cobalt
|
third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
|
Python
|
bsd-3-clause
| 2,576
| 0.001553
|
#!/usr/bin/env python
# -*- coding:UTF-8
__author__ = 'shenshijun'
import copy
class Queue(object):
"""
使用Python的list快速实现一个队列
"""
def __init__(self, *arg):
super(Queue, self).__init__()
self.__queue = list(copy.copy(arg))
self.__size = len(self.__queue)
def enter(self, value):
self.__size += 1
self.__queue.append(value)
def exit(self):
if self.__size <= 0:
return None
else:
value = self.__queue[0]
self.__size -= 1
del self.__queue[0]
return value
def __len__(self):
return self.__size
def empty(self):
return self.__size <= 0
def __str__(self):
return "".join(["Queue(list=", str(self.__queue), ",size=", str(self.__size)])
|
NoSmartNoMan/algorithm-1
|
lib/queue.py
|
Python
|
gpl-2.0
| 833
| 0.002466
|
from distutils.core import setup
setup(
name = 'voxgenerator',
packages = ['voxgenerator',
'voxgenerator.core',
'voxgenerator.plugin',
'voxgenerator.pipeline',
'voxgenerator.generator',
'voxgenerator.service',
'voxgenerator.control'],
version = '1.0.3',
description = 'Vox generator',
url = 'https://github.com/benoitfragit/VOXGenerator/tree/master/voxgenerator',
author = 'Benoit Franquet',
author_email = 'benoitfraubuntu@gmail.com',
scripts = ['run_voxgenerator.py', 'run_voxgenerator', 'run_voxgenerator_gui.py'],
keywords = ['voice', 'control', 'pocketsphinx'],
classifiers = ["Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"]
)
|
benoitfragit/VOXGenerator
|
setup.py
|
Python
|
gpl-2.0
| 1,170
| 0.026496
|
'''
New Integration Test for migrate between clusters
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
data_migration = test_stub.DataMigration()
def test():
data_migration.create_vm()
data_migration.migrate_vm()
test_stub.migrate_vm_to_random_host(data_migration.vm)
data_migration.vm.check()
data_migration.vm.destroy()
test_util.test_pass('Migrate migrated VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
if data_migration.vm:
try:
data_migration.vm.destroy()
except:
pass
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multiclusters/data_migration/test_migrate_migrated_vm.py
|
Python
|
apache-2.0
| 791
| 0.006321
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-25 00:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
('filer', '0007_auto_20161016_1055'),
('vouchers', '0001_initial'),
('financial', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='revenueitem',
name='purchasedVoucher',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vouchers.Voucher', verbose_name='Purchased voucher/gift certificate'),
),
migrations.AddField(
model_name='revenueitem',
name='registration',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Registration'),
),
migrations.AddField(
model_name='revenueitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revenuessubmittedby', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expense_attachment', to='filer.File', verbose_name='Attach File (optional)'),
),
migrations.AddField(
model_name='expenseitem',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='financial.ExpenseCategory'),
),
migrations.AddField(
model_name='expenseitem',
name='event',
field=models.ForeignKey(blank=True, help_text='If this item is associated with an Event, enter it here.', null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
migrations.AddField(
model_name='expenseitem',
name='eventstaffmember',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.EventStaffMember'),
),
migrations.AddField(
model_name='expenseitem',
name='eventvenue',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='venueexpense', to='core.Event'),
),
migrations.AddField(
model_name='expenseitem',
name='payToLocation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location'),
),
migrations.AddField(
model_name='expenseitem',
name='payToUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payToUser', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expensessubmittedby', to=settings.AUTH_USER_MODEL),
),
]
|
django-danceschool/django-danceschool
|
danceschool/financial/migrations/0002_auto_20170425_0010.py
|
Python
|
bsd-3-clause
| 3,541
| 0.003106
|
import os
import inspect
import types
from collections import OrderedDict
import json
from JumpScale import j
# api codes
# 4 function with params
# 7 ???
# 8 property
class Arg:
"""
Wrapper for argument
"""
def __init__(self, name, defaultvalue):
self.name = name
self.defaultvalue = defaultvalue
def __str__(self):
out = ""
if self.defaultvalue is not None:
out += "- %s = %s\n" % (self.name, self.defaultvalue)
else:
out += "- %s\n" % (self.name)
return out
def __repr__(self):
return self.__str__()
def attrib(name, type, doc=None, objectpath=None, filepath=None, extra=None):
"""
Helper function for codecompletion tree.
"""
return (name, type, doc, objectpath, filepath, extra)
class MethodDoc:
"""
Method documentation
"""
def __init__(self, method, name, classdoc):
self.classdoc = classdoc
self.params = []
inspected = inspect.getargspec(method)
if inspected.defaults is not None:
counter = len(inspected.defaults) - len(inspected.args)
else:
counter = -99999
for param in inspected.args:
if inspected.defaults is not None and counter > -1:
defval = inspected.defaults[counter]
if j.data.types.string.check(defval):
defval = "'%s'" % defval
else:
defval = None
counter += 1
if param != "self":
self.params.append(Arg(param, defval))
if inspected.varargs is not None:
self.params.append(Arg("*%s" % inspected.varargs, None))
if inspected.keywords is not None:
self.params.append(Arg("**%s" % inspected.keywords, None))
self.comments = inspect.getdoc(method)
if self.comments is None:
self.comments = ""
self.comments = j.data.text.strip(self.comments)
self.comments = j.data.text.wrap(self.comments, 90)
self.linenr = inspect.getsourcelines(method)[1]
self.name = name
# self.methodline=inspect.getsourcelines(method)[0][0].strip().replace("self, ","").replace("self,","").replace("self","").replace(":","")
def __str__(self):
"""
Markdown representation of the method and its arguments
"""
out = ""
param_s = ""
if len(self.params) > 0:
param_s = ", ".join([str(arg.name) + "=" + str(arg.defaultvalue)
if arg.defaultvalue else arg.name for arg in self.params])
param_s = "*%s*" % param_s
out += "#### %s(%s) \n\n" % (self.name, param_s)
if self.comments is not None and self.comments.strip() != "":
out += "```\n" + self.comments + "\n```\n\n"
return out
def __repr__(self):
return self.__str__()
class ClassDoc:
def __init__(self, classobj, location):
self.location = location
self.methods = {}
self.comments = inspect.getdoc(classobj)
module = inspect.getmodule(classobj)
self.path = inspect.getabsfile(module)
self.errors = ""
self.properties = []
for key, val in classobj.__dict__.items():
if key.startswith("_"):
continue
self.properties.append(key)
def getPath(self):
for method in self.methods:
return inspect.getabsfile(method)
def addMethod(self, name, method):
try:
source = inspect.getsource(method)
except:
self.errors += '#### Error trying to add %s source in %s.\n' % (name, self.location)
print("ADD METHOD:%s %s" % (self.path, name))
md = MethodDoc(method, name, self)
self.methods[name] = md
return source, md.params
def undersore_location(self):
return self.location.replace(".", "_")
def write(self, dest):
dest2 = j.sal.fs.joinPaths(dest, self.location.split(".")[1], "%s.md" % self.undersore_location())
destdir = j.sal.fs.getDirName(dest2)
j.sal.fs.createDir(destdir)
content = str(self)
content = content.replace("\n\n\n", "\n\n")
content = content.replace("\n\n\n", "\n\n")
content = content.replace("\n\n\n", "\n\n")
# ugly temp hack, better to do with regex
content = content.replace("\{", "$%[")
content = content.replace("\}", "$%]")
content = content.replace("{", "\{")
content = content.replace("}", "\}")
content = content.replace("$%]", "\}")
content = content.replace("$%[", "\{")
j.sal.fs.writeFile(filename=dest2, contents=content)
return dest2
def __str__(self):
C = "<!-- toc -->\n"
C += "## %s\n\n" % self.location
C += "- %s\n" % self.path
if self.properties != []:
C += "- Properties\n"
for prop in self.properties:
C += " - %s\n" % prop
C += "\n### Methods\n"
C += "\n"
if self.comments is not None:
C += "\n%s\n\n" % self.comments
keys = sorted(self.methods.keys())
for key in keys:
method = self.methods[key]
C2 = str(method)
C += C2
return C
def __repr__(self):
return self.__str__()
class ObjectInspector:
"""
functionality to inspect object structure and generate apifile
and pickled ordereddict for codecompletion
"""
def __init__(self):
self.__jslocation__ = "j.tools.objectinspector"
self.apiFileLocation = j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi", "jumpscale.api")
# j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi"))
self.classDocs = {}
self.visited = []
self.root = None
self.manager = None
self.logger = j.logger.get('j.tools.objectinspector')
self.jstree = OrderedDict() # jstree['j.sal']={'unix': unixobject, 'fs': fsobject}
def importAllLibs(self, ignore=[], base="%s/lib/JumpScale/" % j.dirs.base):
self.base = os.path.normpath(base)
towalk = j.sal.fs.listDirsInDir(base, recursive=False, dirNameOnly=True, findDirectorySymlinks=True)
errors = "### errors while trying to import libraries\n\n"
for item in towalk:
path = "%s/%s" % (base, item)
for modname in j.sal.fs.listDirsInDir(path, False, True, True):
if modname not in ignore:
toexec = "import JumpScale.%s.%s" % (item, modname)
try:
exec(toexec)
except Exception as e:
self.logger.error(("COULD NOT IMPORT %s" % toexec))
errors += "**%s**\n\n" % toexec
errors += "%s\n\n" % e
return errors
def raiseError(self, errormsg):
self.logger.error("ERROR:%s" % errormsg)
errormsg = errormsg.strip()
errormsg = errormsg.strip("-")
errormsg = errormsg.strip("*")
errormsg = errormsg.strip()
errormsg = "* %s\n" % errormsg
j.sal.fs.writeFile(filename="%s/errors.md" % self.dest, contents=errormsg, append=True)
def generateDocs(self, dest, ignore=[], objpath="j"):
"""
Generates documentation of objpath in destination direcotry dest
@param dest: destination directory to write documentation.
@param objpath: object path
@param ignore: modules list to be ignored during the import.
"""
self.dest = dest
self.apiFileLocation = "%s/jumpscale.api" % self.dest
j.sal.fs.writeFile("%s/errors.md" % dest, "")
j.sal.fs.createDir(self.dest)
self.errors = self.importAllLibs(ignore=ignore)
#self.errors = ''
objectLocationPath = objpath
# extract the object name (j.sal.unix ) -> unix to make a stub out of it.
objname = ''
filepath = ''
if '.' in objpath:
objname = objpath.split(".")[-1]
else:
objname = objpath
try:
obj = eval(objpath)
if "__file__" in dir(obj):
filepath = inspect.getabsfile(obj.__file__)
if not filepath.startswith(self.base):
return
else:
filepath = inspect.getfile(obj.__class__)
if not filepath.startswith(self.base):
return
except:
pass
# add the root object to the tree (self.jstree) as its first element (order maintained by ordereddict/pickle)
self.jstree[objectLocationPath] = attrib(objname, "class", 'emptydocs', objectLocationPath)
self.inspect(objectLocationPath)
j.sal.fs.createDir(dest)
j.sal.fs.writeFile(filename="%s/errors.md" % dest, contents=self.errors, append=True)
self.writeDocs(dest)
def _processMethod(self, name, method, path, classobj):
if classobj is None:
raise j.exceptions.RuntimeError("cannot be None")
classpath = ".".join(path.split(".")[:-1])
if classpath not in self.classDocs:
self.classDocs[classpath] = ClassDoc(classobj, classpath)
obj = self.classDocs[classpath]
return obj.addMethod(name, method)
def _processClass(self, name, path, classobj):
if path not in self.classDocs:
self.classDocs[path] = ClassDoc(classobj, path)
obj = self.classDocs[path]
def inspect(self, objectLocationPath="j", recursive=True, parent=None, obj=None):
"""
walk over objects in memory and create code completion api in jumpscale cfgDir under codecompletionapi
@param object is start object
@param objectLocationPath is full location name in object tree e.g. j.sal.fs , no need to fill in
"""
self.logger.debug(objectLocationPath)
if obj is None:
try:
obj = eval(objectLocationPath)
except:
self.raiseError("could not eval:%s" % objectLocationPath)
return
# only process our files
try:
if "__file__" in dir(obj):
filepath = inspect.getabsfile(obj.__file__)
filepath = os.path.normpath(filepath) # normalize path
if not filepath.startswith(self.base):
return
else:
clsfile = inspect.getfile(obj.__class__)
clsfile = os.path.normpath(clsfile)
if not clsfile.startswith(self.base):
return
except Exception as e:
# print "COULD NOT DEFINE FILE OF:%s"%objectLocationPath
pass
if obj not in self.visited and obj:
self.visited.append(obj)
else:
self.logger.debug("RECURSIVE:%s" % objectLocationPath)
return
attrs = dir(obj)
ignore = ["constructor_args", "NOTHING", "template_class", "redirect_cache"]
def check(item):
if item == "_getFactoryEnabledClasses":
return True
if item.startswith("_"):
return False
if item.startswith("im_"):
return False
if item in ignore:
return False
return True
# if objectLocationPath == 'j.actions.logger.disabled':
attrs = [item for item in attrs if check(item)]
for objattributename in attrs:
filepath = None
objectLocationPath2 = "%s.%s" % (objectLocationPath, objattributename)
try:
objattribute = eval("obj.%s" % objattributename)
except Exception as e:
self.logger.error(str(e))
self.raiseError("cannot eval %s" % objectLocationPath2)
continue
if objattributename.upper() == objattributename:
# is special type or constant
self.logger.debug("special type: %s" % objectLocationPath2)
j.sal.fs.writeFile(self.apiFileLocation, "%s?7\n" % objectLocationPath2, True)
self.jstree[objectLocationPath2] = attrib(objattributename, "const", '', objectLocationPath2, filepath)
elif objattributename == "_getFactoryEnabledClasses":
try:
for fclparent, name, obj2 in obj._getFactoryEnabledClasses():
if fclparent != "":
objectLocationPath2 = objectLocationPath + "." + fclparent + "." + name
else:
objectLocationPath2 = objectLocationPath + "." + name
self._processClass(name, objectLocationPath2, obj)
if not isinstance(objattribute, (str, bool, int, float, dict, list, tuple)):
self.inspect(objectLocationPath=objectLocationPath2, recursive=True, parent=obj, obj=obj2)
except Exception as e:
self.logger.error("the _getFactoryEnabledClasses gives error")
import ipdb
elif inspect.isfunction(objattribute) or inspect.ismethod(objattribute) or inspect.isbuiltin(objattribute) or inspect.isgenerator(objattribute):
# isinstance(objattribute, (types.BuiltinMethodType,
# types.BuiltinFunctionType, types.MethodType, types.FunctionType)):
try:
methodpath = inspect.getabsfile(objattribute)
methodargs = ", ".join(objattribute.__code__.co_varnames)
filepath = methodpath
if not methodpath.startswith(self.base):
self.classDocs.pop(objectLocationPath2, "")
self.logger.info("SKIPPED:%s" % objectLocationPath2)
return
except Exception as e:
self.logger.error(str(e))
source, params = self._processMethod(objattributename, objattribute, objectLocationPath2, obj)
self.logger.debug("instancemethod: %s" % objectLocationPath2)
j.sal.fs.writeFile(self.apiFileLocation, "%s?4(%s)\n" % (objectLocationPath2, params), True)
self.jstree[objectLocationPath2] = attrib(
objattributename, "method", objattribute.__doc__, objectLocationPath2, filepath, methodargs)
elif isinstance(objattribute, (str, bool, int, float, list, tuple, dict, property)) or objattribute is None:
self.logger.debug("property: %s" % objectLocationPath2)
j.sal.fs.writeFile(self.apiFileLocation, "%s?8\n" % objectLocationPath2, True)
self.jstree[objectLocationPath2] = attrib(
objattributename, "property", objattribute.__doc__, objectLocationPath2)
elif isinstance(objattribute.__class__, type):
j.sal.fs.writeFile(self.apiFileLocation, "%s?8\n" % objectLocationPath2, True)
self.logger.debug("class or instance: %s" % objectLocationPath2)
try:
filepath = inspect.getfile(objattribute.__class__)
except:
pass
self.jstree[objectLocationPath2] = attrib(
objattributename, "class", objattribute.__doc__, objectLocationPath2, filepath)
try:
if not isinstance(objattribute, (str, bool, int, float, dict, list, tuple)
) or objattribute is not None:
self.inspect(objectLocationPath2, parent=objattribute)
except Exception as e:
self.logger.error(str(e))
else:
pass
def writeDocs(self, path):
"""
Writes the documentation on a specified path.
"""
self.dest=os.path.normpath(self.dest)
todelete = []
summary = {}
for key, doc in list(self.classDocs.items()):
key2 = ".".join(key.split(".")[0:2]) #root items (data,core,application, sal,..)
if key2 not in summary:
summary[key2] = {}
dest = doc.write(path)
# remember gitbook info
dest=os.path.normpath(dest)
summary[key2][key] = j.sal.fs.pathRemoveDirPart(dest, self.dest)
summarytxt = ""
keys1 = sorted(summary.keys())
for key1 in keys1:
summarytxt += "* %s\n" % (key1)
keys2 = sorted(summary[key1].keys())
for key2 in keys2:
keylink = summary[key1][key2]
keylink = keylink.rstrip(".md").replace(".", "_")
keylink = keylink + ".md"
summarytxt += " * [%s](%s)\n" % (key2, keylink)
j.sal.fs.writeFile(filename="%s/SUMMARY.md" % (self.dest), contents=summarytxt)
with open("%s/out.pickled" % self.dest, 'wb') as f:
import pickle
pickle.dump(self.jstree, f)
#json.dump(self.jstree, f, indent=4, sort_keys=True)
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/tools/objectinspector/ObjectInspector.py
|
Python
|
apache-2.0
| 17,296
| 0.003296
|
"""Tests `numpy_utils.py`."""
# Copyright (c) 2021 Aubrey Barnard.
#
# This is free, open software released under the MIT license. See
# `LICENSE` for details.
import random
import unittest
import numpy.random
from .. import numpy_utils
class NumpyAsStdlibPrngTest(unittest.TestCase):
def test_random_floats(self):
seed = 0xdeadbeeffeedcafe
n_samples = 10
orig_prng = numpy.random.default_rng(seed)
expected = [orig_prng.random() for _ in range(n_samples)]
wrap_prng = numpy_utils.NumpyAsStdlibPrng(
numpy.random.default_rng(seed))
actual = [wrap_prng.random() for _ in range(n_samples)]
self.assertEqual(expected, actual)
class NumpyBitGeneratorTest(unittest.TestCase):
def test_random_floats(self):
seed = 0xdeadbeeffeedcafe
n_samples = 10
old_prng = random.Random(seed)
expected = [old_prng.random() for _ in range(n_samples)]
new_prng = numpy.random.Generator(
numpy_utils.numpy_bit_generator(
random.Random(seed)))
actual = [new_prng.random() for _ in range(n_samples)]
self.assertEqual(expected, actual)
|
afbarnard/barnapy
|
barnapy/test/numpy_utils_test.py
|
Python
|
mit
| 1,182
| 0
|
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.connectors import (
BaseConnector, ReceiveInboundConnector, ReceiveOutboundConnector,
IgnoreMessage)
from vumi.tests.utils import LogCatcher
from vumi.worker import BaseWorker
from vumi.message import TransportUserMessage
from vumi.middleware.tests.utils import RecordingMiddleware
from vumi.tests.helpers import VumiTestCase, MessageHelper, WorkerHelper
class DummyWorker(BaseWorker):
def setup_connectors(self):
pass
def setup_worker(self):
pass
def teardown_worker(self):
pass
class BaseConnectorTestCase(VumiTestCase):
connector_class = None
def setUp(self):
self.msg_helper = self.add_helper(MessageHelper())
self.worker_helper = self.add_helper(WorkerHelper())
@inlineCallbacks
def mk_connector(self, worker=None, connector_name=None,
prefetch_count=None, middlewares=None, setup=False):
if worker is None:
worker = yield self.worker_helper.get_worker(DummyWorker, {})
if connector_name is None:
connector_name = "dummy_connector"
connector = self.connector_class(worker, connector_name,
prefetch_count=prefetch_count,
middlewares=middlewares)
if setup:
yield connector.setup()
returnValue(connector)
@inlineCallbacks
def mk_consumer(self, *args, **kwargs):
conn = yield self.mk_connector(*args, **kwargs)
consumer = yield conn._setup_consumer('inbound', TransportUserMessage,
lambda msg: None)
returnValue((conn, consumer))
class TestBaseConnector(BaseConnectorTestCase):
connector_class = BaseConnector
@inlineCallbacks
def test_creation(self):
conn = yield self.mk_connector(connector_name="foo")
self.assertEqual(conn.name, "foo")
self.assertTrue(isinstance(conn.worker, BaseWorker))
@inlineCallbacks
def test_middlewares_consume(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn, consumer = yield self.mk_consumer(
worker=worker, connector_name='foo', middlewares=middlewares)
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[(str(i), 'inbound', 'foo')
for i in range(3)])
@inlineCallbacks
def test_middlewares_publish(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn = yield self.mk_connector(
worker=worker, connector_name='foo', middlewares=middlewares)
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[[str(i), 'outbound', 'foo']
for i in range(2, -1, -1)])
@inlineCallbacks
def test_pretech_count(self):
conn, consumer = yield self.mk_consumer(prefetch_count=10)
self.assertEqual(consumer.channel.qos_prefetch_count, 10)
@inlineCallbacks
def test_setup_raises(self):
conn = yield self.mk_connector()
self.assertRaises(NotImplementedError, conn.setup)
@inlineCallbacks
def test_teardown(self):
conn, consumer = yield self.mk_consumer()
self.assertTrue(consumer.keep_consuming)
yield conn.teardown()
self.assertFalse(consumer.keep_consuming)
@inlineCallbacks
def test_paused(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(conn.paused)
consumer.unpause()
self.assertFalse(conn.paused)
@inlineCallbacks
def test_pause(self):
conn, consumer = yield self.mk_consumer()
consumer.unpause()
self.assertFalse(consumer.paused)
conn.pause()
self.assertTrue(consumer.paused)
@inlineCallbacks
def test_unpause(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(consumer.paused)
conn.unpause()
self.assertFalse(consumer.paused)
@inlineCallbacks
def test_setup_publisher(self):
conn = yield self.mk_connector(connector_name='foo')
publisher = yield conn._setup_publisher('outbound')
self.assertEqual(publisher.routing_key, 'foo.outbound')
@inlineCallbacks
def test_setup_consumer(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
self.assertTrue(consumer.paused)
self.assertEqual(consumer.routing_key, 'foo.inbound')
self.assertEqual(consumer.message_class, TransportUserMessage)
@inlineCallbacks
def test_set_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, 'dummy_endpoint')
msg = self.msg_helper.make_inbound("inbound")
msg.set_routing_endpoint('dummy_endpoint')
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_none_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, None)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_message_with_endpoint(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
class TestReceiveInboundConnector(BaseConnectorTestCase):
connector_class = ReceiveInboundConnector
@inlineCallbacks
def test_setup(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn.setup()
conn.unpause()
with LogCatcher() as lc:
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
[msg_log] = lc.messages()
self.assertTrue(msg_log.startswith("No inbound handler for 'foo'"))
with LogCatcher() as lc:
event = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(event, 'foo')
[event_log] = lc.messages()
self.assertTrue(event_log.startswith("No event handler for 'foo'"))
msg = self.msg_helper.make_outbound("outbound")
yield conn.publish_outbound(msg)
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_default_inbound_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_inbound_handler(
self.msg_helper.make_inbound("inbound"))
[log] = lc.messages()
self.assertTrue(log.startswith("No inbound handler for 'foo'"))
@inlineCallbacks
def test_default_event_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_event_handler(self.msg_helper.make_ack())
[log] = lc.messages()
self.assertTrue(log.startswith("No event handler for 'foo'"))
@inlineCallbacks
def test_set_inbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_inbound_handler(msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_inbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_inbound_handler(msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_event_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_event_handler(msgs.append)
msg = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_event_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_event_handler(msgs.append)
msg = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_outbound(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_outbound("outbound")
yield conn.publish_outbound(msg)
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_inbound_handler_ignore_message(self):
def im_handler(msg):
raise IgnoreMessage()
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_inbound_handler(im_handler)
msg = self.msg_helper.make_inbound("inbound")
with LogCatcher() as lc:
yield self.worker_helper.dispatch_inbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith(
"Ignoring msg due to IgnoreMessage(): <Message"))
class TestReceiveOutboundConnector(BaseConnectorTestCase):
connector_class = ReceiveOutboundConnector
@inlineCallbacks
def test_setup(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn.setup()
conn.unpause()
with LogCatcher() as lc:
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith("No outbound handler for 'foo'"))
msg = self.msg_helper.make_inbound("inbound")
yield conn.publish_inbound(msg)
msgs = self.worker_helper.get_dispatched_inbound('foo')
self.assertEqual(msgs, [msg])
msg = self.msg_helper.make_ack()
yield conn.publish_event(msg)
msgs = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_default_outbound_handler(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
with LogCatcher() as lc:
conn.default_outbound_handler(
self.msg_helper.make_outbound("outbound"))
[log] = lc.messages()
self.assertTrue(log.startswith("No outbound handler for 'foo'"))
@inlineCallbacks
def test_set_outbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_outbound_handler(msgs.append)
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_outbound_handler(self):
msgs = []
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_outbound_handler(msgs.append)
msg = self.msg_helper.make_outbound("outbound")
yield self.worker_helper.dispatch_outbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_inbound(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_inbound("inbound")
yield conn.publish_inbound(msg)
msgs = self.worker_helper.get_dispatched_inbound('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_event(self):
conn = yield self.mk_connector(connector_name='foo', setup=True)
msg = self.msg_helper.make_ack()
yield conn.publish_event(msg)
msgs = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_outbound_handler_nack_message(self):
def im_handler(msg):
raise IgnoreMessage()
conn = yield self.mk_connector(connector_name='foo', setup=True)
conn.unpause()
conn.set_default_outbound_handler(im_handler)
msg = self.msg_helper.make_inbound("inbound")
with LogCatcher() as lc:
yield self.worker_helper.dispatch_outbound(msg, 'foo')
[log] = lc.messages()
self.assertTrue(log.startswith(
"Ignoring msg (with NACK) due to IgnoreMessage(): <Message"))
[event] = self.worker_helper.get_dispatched_events('foo')
self.assertEqual(event['event_type'], 'nack')
|
TouK/vumi
|
vumi/tests/test_connectors.py
|
Python
|
bsd-3-clause
| 14,757
| 0
|
from django.apps import AppConfig
class InvestmentsConfig(AppConfig):
name = 'charcoallog.investments'
def ready(self):
# using @receiver decorator
# do not optimize import !!!
import charcoallog.investments.signals # noqa: F401
|
hpfn/charcoallog
|
charcoallog/investments/apps.py
|
Python
|
gpl-3.0
| 265
| 0
|
# $Id: frontend.py 6154 2009-10-05 19:08:10Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`, `validate_boolean`,
`validate_threshold`, `validate_colon_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import docutils
import docutils.utils
import docutils.nodes
import optparse
from optparse import SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup_error(value)
except AttributeError: # TODO: remove (only needed prior to Python 2.3)
if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
None, sys.exc_info()[2])
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
if isinstance(value, unicode):
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if isinstance(value, unicode):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
if config_parser: # validate all values
class_values = value
else: # just validate the latest value
class_values = [value[-1]]
for class_value in class_values:
normalized = docutils.nodes.make_id(class_value)
if class_value != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (class_value, normalized))
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwd()
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value = getattr(self, setting)
if value:
value += other_dict[setting]
del other_dict[setting]
self._update_loose(other_dict)
def copy(self):
"""Return a shallow copy of `self`."""
return self.__class__(defaults=self.__dict__)
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
def process(self, opt, value, values, parser):
"""
Call the validator function on applicable settings and
evaluate the 'overrides' option.
Extends `optparse.Option.process`.
"""
result = optparse.Option.process(self, opt, value, values, parser)
setting = self.dest
if setting:
if self.validator:
value = getattr(values, setting)
try:
new_value = self.validator(setting, value, parser)
except Exception, error:
raise (optparse.OptionValueError(
'Error in option "%s":\n %s: %s'
% (opt, error.__class__.__name__, error)),
None, sys.exc_info()[2])
setattr(values, setting, new_value)
if self.overrides:
setattr(values, self.overrides, None)
return result
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
standard_config_files = [
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
'~/.docutils'] # user-specific
"""Docutils configuration files, using ConfigParser syntax. Filenames
will be tilde-expanded later. Later files override earlier ones."""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
'0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
"""Lookup table for boolean configuration file settings."""
try:
default_error_encoding = sys.stderr.encoding or 'ascii'
except AttributeError:
default_error_encoding = 'ascii'
# TODO: variable no longer needed since 'backslashreplace' is
# part of Python >= 2.3 (required since Docutils 0.6)
if hasattr(codecs, 'backslashreplace_errors'):
default_error_encoding_error_handler = 'backslashreplace'
else:
default_error_encoding_error_handler = 'replace'
settings_spec = (
'General Docutils Options',
None,
(('Specify the document title as metadata.',
['--title'], {}),
('Include a "Generated by Docutils" credit and link.',
['--generator', '-g'], {'action': 'store_true',
'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link.',
['--source-link', '-s'], {'action': 'store_true',
'validator': validate_boolean}),
('Use <URL> for a source link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Link from section headers to TOC entries. (default)',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Link from section headers to the top of the TOC.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Link from footnotes/citations to references. (default)',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1,
'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Enable section numbering by Docutils. (default)',
['--section-numbering'],
{'action': 'store_true', 'dest': 'sectnum_xform',
'default': 1, 'validator': validate_boolean}),
('Disable section numbering by Docutils.',
['--no-section-numbering'],
{'action': 'store_false', 'dest': 'sectnum_xform'}),
('Remove comment elements from the document tree.',
['--strip-comments'],
{'action': 'store_true', 'validator': validate_boolean}),
('Leave comment elements in the document tree. (default)',
['--leave-comments'],
{'action': 'store_false', 'dest': 'strip_comments'}),
('Remove all elements with classes="<class>" from the document tree. '
'Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-elements-with-class'],
{'action': 'append', 'dest': 'strip_elements_with_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Remove all classes="<class>" attributes from elements in the '
'document tree. Warning: potentially dangerous; use with caution. '
'(Multiple-use option.)',
['--strip-class'],
{'action': 'append', 'dest': 'strip_classes',
'metavar': '<class>', 'validator': validate_strip_class}),
('Report system messages at or higher than <level>: "info" or "1", '
'"warning"/"2" (default), "error"/"3", "severe"/"4", "none"/"5"',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>',
'validator': validate_threshold}),
('Report all system messages. (Same as "--report=1".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Report no system messages. (Same as "--report=5".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Halt execution at system messages at or above <level>. '
'Levels as in --report. Default: 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>',
'validator': validate_threshold}),
('Halt at the slightest problem. Same as "--halt=info".',
['--strict'], {'action': 'store_const', 'const': 1,
'dest': 'halt_level'}),
('Enable a non-zero exit status for non-halting system messages at '
'or above <level>. Default: 5 (disabled).',
['--exit-status'], {'choices': threshold_choices,
'dest': 'exit_status_level',
'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Enable debug-level system messages and diagnostics.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Disable debug output. (default)',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when Docutils is halted.',
['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}),
('Disable Python tracebacks. (default)',
['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
('Specify the encoding and optionally the '
'error handler of input text. Default: <locale-dependent>:strict.',
['--input-encoding', '-i'],
{'metavar': '<name[:handler]>',
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for undecodable characters. '
'Choices: "strict" (default), "ignore", and "replace".',
['--input-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding and optionally the error handler for '
'output. Default: UTF-8:strict.',
['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}),
('Specify error handler for unencodable output characters; '
'"strict" (default), "ignore", "replace", '
'"xmlcharrefreplace", "backslashreplace".',
['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify text encoding and error handler for error output. '
'Default: %s:%s.'
% (default_error_encoding, default_error_encoding_error_handler),
['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': default_error_encoding,
'validator': validate_encoding_and_error_handler}),
('Specify the error handler for unencodable characters in '
'error output. Default: %s.'
% default_error_encoding_error_handler,
['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}),
('Specify the language (as 2-letter code). Default: en.',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Write output file dependencies to <file>.',
['--record-dependencies'],
{'metavar': '<file>', 'validator': validate_dependency_file,
'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Typically not useful for non-programmatical use:
(SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
(SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals',
'validator': validate_colon_separated_string_list}),
(SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None,
'_config_files': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
config_section = 'general'
version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)'
% (docutils.__version__, docutils.__version_details__,
sys.version.split()[0], sys.platform))
"""Default version message."""
def __init__(self, components=(), defaults=None, read_config_files=None,
*args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
self.lists = {}
"""Set of list-type settings."""
self.config_files = []
"""List of paths of applied configuration files."""
optparse.OptionParser.__init__(
self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components)
self.populate_from_components(self.components)
self.set_defaults_from_dict(defaults or {})
if read_config_files and not self.defaults['_disable_config']:
try:
config_settings = self.get_standard_config_settings()
except ValueError, error:
self.error(error)
self.set_defaults_from_dict(config_settings.__dict__)
def populate_from_components(self, components):
"""
For each component, first populate from the `SettingsSpec.settings_spec`
structure, then from the `SettingsSpec.settings_defaults` dictionary.
After all components have been processed, check for and populate from
each component's `SettingsSpec.settings_default_overrides` dictionary.
"""
for component in components:
if component is None:
continue
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
option = group.add_option(help=help_text, *option_strings,
**kwargs)
if kwargs.get('action') == 'append':
self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def get_standard_config_files(self):
"""Return list of config files, from environment or standard."""
try:
config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
except KeyError:
config_files = self.standard_config_files
# If 'HOME' is not set, expandvars() requires the 'pwd' module which is
# not available under certain environments, for example, within
# mod_python. The publisher ends up in here, and we need to publish
# from within mod_python. Therefore we need to avoid expanding when we
# are in those environments.
expand = os.path.expanduser
if 'HOME' not in os.environ:
try:
import pwd
except ImportError:
expand = lambda x: x
return [expand(f) for f in config_files if f.strip()]
def get_standard_config_settings(self):
settings = Values()
for filename in self.get_standard_config_files():
settings.update(self.get_config_file_settings(filename), self)
return settings
def get_config_file_settings(self, config_file):
"""Returns a dictionary containing appropriate config file settings."""
parser = ConfigParser()
parser.read(config_file, self)
self.config_files.extend(parser._files)
base_path = os.path.dirname(config_file)
applied = {}
settings = Values()
for component in self.components:
if not component:
continue
for section in (tuple(component.config_section_dependencies or ())
+ (component.config_section,)):
if section in applied:
continue
applied[section] = 1
settings.update(parser.get_section(section), self)
make_paths_absolute(
settings.__dict__, self.relative_path_settings, base_path)
return settings.__dict__
def check_values(self, values, args):
"""Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings,
os.getcwd())
values._config_files = self.config_files
return values
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if source == '-': # means stdin
source = None
if args:
destination = args.pop(0)
if destination == '-': # means stdout
destination = None
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
def set_defaults_from_dict(self, defaults):
self.defaults.update(defaults)
def get_default_values(self):
"""Needed to get custom `Values` instances."""
defaults = Values(self.defaults)
defaults._config_files = self.config_files
return defaults
def get_option_by_dest(self, dest):
"""
Get an option by its dest.
If you're supplying a dest which is shared by several options,
it is undefined which option of those is returned.
A KeyError is raised if there is no option with the supplied
dest.
"""
for group in self.option_groups + [self]:
for option in group.option_list:
if option.dest == dest:
return option
raise KeyError('No option with dest == %r.' % dest)
class ConfigParser(CP.ConfigParser):
old_settings = {
'pep_stylesheet': ('pep_html writer', 'stylesheet'),
'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
'pep_template': ('pep_html writer', 'template')}
"""{old setting: (new section, new setting)} mapping, used by
`handle_old_config`, to convert settings from the old [options] section."""
old_warning = """
The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/user/config.html>,
section "Old-Format Configuration Files".
"""
not_utf8_error = """\
Unable to read configuration file "%s": content not encoded as UTF-8.
Skipping "%s" configuration file.
"""
def __init__(self, *args, **kwargs):
CP.ConfigParser.__init__(self, *args, **kwargs)
self._files = []
"""List of paths of configuration files read."""
def read(self, filenames, option_parser):
if type(filenames) in (str, unicode):
filenames = [filenames]
for filename in filenames:
try:
# Config files must be UTF-8-encoded:
fp = codecs.open(filename, 'r', 'utf-8')
except IOError:
continue
try:
CP.ConfigParser.readfp(self, fp, filename)
except UnicodeDecodeError:
sys.stderr.write(self.not_utf8_error % (filename, filename))
fp.close()
continue
fp.close()
self._files.append(filename)
if self.has_section('options'):
self.handle_old_config(filename)
self.validate_settings(filename, option_parser)
def handle_old_config(self, filename):
warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
filename, 0)
options = self.get_section('options')
if not self.has_section('general'):
self.add_section('general')
for key, value in options.items():
if key in self.old_settings:
section, setting = self.old_settings[key]
if not self.has_section(section):
self.add_section(section)
else:
section = 'general'
setting = key
if not self.has_option(section, setting):
self.set(section, setting, value)
self.remove_section('options')
def validate_settings(self, filename, option_parser):
"""
Call the validator function and implement overrides on all applicable
settings.
"""
for section in self.sections():
for setting in self.options(section):
try:
option = option_parser.get_option_by_dest(setting)
except KeyError:
continue
if option.validator:
value = self.get(section, setting, raw=1)
try:
new_value = option.validator(
setting, value, option_parser,
config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s: %s\n %s = %s'
% (filename, section, error.__class__.__name__,
error, setting, value)), None, sys.exc_info()[2])
self.set(section, setting, new_value)
if option.overrides:
self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
section_dict[option] = self.get(section, option, raw=1)
return section_dict
class ConfigDeprecationWarning(DeprecationWarning):
"""Warning for deprecated configuration file features."""
|
edisonlz/fruit
|
web_project/base/site-packages/docutils/frontend.py
|
Python
|
apache-2.0
| 33,065
| 0.000726
|
import os
from flask import Flask, render_template, request
from PIL import Image
import sys
import pyocr
import pyocr.builders
import re
import json
__author__ = 'K_K_N'
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
def ocr(image_file):
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
# The tools are returned in the recommended order of usage
tool = tools[0]
#print("Will use tool '%s'" % (tool.get_name()))
# Ex: Will use tool 'libtesseract'
langs = tool.get_available_languages()
#print("Available languages: %s" % ", ".join(langs))
lang = langs[1]
#print("Will use lang '%s'" % (lang))
txt = tool.image_to_string(
Image.open(image_file),
lang=lang,
builder=pyocr.builders.TextBuilder()
)
ektp_no = re.search( r'[?:nik\s*:\s*](\d{1,20})\s*', txt, re.I)
#print ektp_no
#if ektp_no:
# print "ektp_no.group() : ", ektp_no.group()
data = {}
data['ektp'] = ektp_no.group().strip()
return json.dumps(data)
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/upload", methods=['POST'])
def upload():
target = os.path.join(APP_ROOT, 'images/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
for file in request.files.getlist("file"):
print(file)
filename = file.filename
destination = "/".join([target, filename])
print(destination)
file.save(destination)
#Return JSON
#print txt
#file.delete(destination)
return ocr(destination)
#return json.dumps(txt)
if __name__ == "__main__":
app.run(port=4555, debug=True)
|
ankutty/OCR-Tesseract
|
ocr_app.py
|
Python
|
apache-2.0
| 1,764
| 0.00907
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test of invocation-side code unconnected to an RPC server."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc.framework.interfaces.links import links
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.links import test_cases
from tests.unit.framework.interfaces.links import test_utilities
_NULL_BEHAVIOR = lambda unused_argument: None
class LonelyInvocationLinkTest(unittest.TestCase):
def testUpAndDown(self):
channel = _intermediary_low.Channel('nonexistent:54321', None)
invocation_link = invocation.invocation_link(
channel, 'nonexistent', None, {}, {})
invocation_link.start()
invocation_link.stop()
def _test_lonely_invocation_with_termination(self, termination):
test_operation_id = object()
test_group = 'test package.Test Service'
test_method = 'test method'
invocation_link_mate = test_utilities.RecordingLink()
channel = _intermediary_low.Channel('nonexistent:54321', None)
invocation_link = invocation.invocation_link(
channel, 'nonexistent', None, {}, {})
invocation_link.join_link(invocation_link_mate)
invocation_link.start()
ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.SHORT_TIMEOUT, 1, None,
None, None, None, None, termination, None)
invocation_link.accept_ticket(ticket)
invocation_link_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
self.assertIsNot(
invocation_link_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
def testLonelyInvocationLinkWithCommencementTicket(self):
self._test_lonely_invocation_with_termination(None)
def testLonelyInvocationLinkWithEntireTicket(self):
self._test_lonely_invocation_with_termination(
links.Ticket.Termination.COMPLETION)
if __name__ == '__main__':
unittest.main()
|
shishaochen/TensorFlow-0.8-Win
|
third_party/grpc/src/python/grpcio/tests/unit/_links/_lonely_invocation_link_test.py
|
Python
|
apache-2.0
| 3,549
| 0.001691
|
#!/usr/bin/env python
import numpy as np
import torch as th
from torchvision import datasets, transforms
from nnexp import learn
if __name__ == '__main__':
dataset = datasets.MNIST('./data', train=True, download=True, transform=transforms.ToTensor())
learn('mnist_simple', dataset)
|
seba-1511/nnexp
|
examples/mnist_simple.py
|
Python
|
apache-2.0
| 293
| 0.006826
|
#!/usr/bin/env python
#########################################
# Installation module for arachni
#########################################
# AUTHOR OF MODULE NAME
AUTHOR="Nathan Underwood (sai nate)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="Website / webapp vulnerability scanner."
# INSTALLATION TYPE
# OPTIONS GIT, SVN, FILE, DOWNLOAD
INSTALL_TYPE="GIT"
#LOCATION OF THE FILE OR GIT / SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/Arachni/arachni.git"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="arachni"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN=""
#COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
|
piratica/ptf
|
vulnerability-analysis/arachni.py
|
Python
|
gpl-3.0
| 614
| 0.016287
|
import os
__version__ = 'v0.0.7' # update also in setup.py
root_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
info = {
"name": "NiLabels",
"version": __version__,
"description": "",
"repository": {
"type": "git",
"url": ""
},
"author": "Sebastiano Ferraris",
"dependencies": {
# requirements.txt automatically generated using pipreqs
"python requirements" : "{0}/requirements.txt".format(root_dir)
}
}
definition_template = """ A template is the average, computed with a chose protocol, of a series of images acquisition
of the same anatomy, or in genreral of different objects that share common features.
"""
definition_atlas = """ An atlas is the segmentation of the template, obtained averaging with a chosen protocol,
the series of segmentations corresponding to the series of images acquisition that generates the template.
"""
definition_label = """ A segmentation assigns each region a label, and labels
are represented as subset of voxel with the same positive integer value.
"""
nomenclature_conventions = """ pfi_xxx = path to file xxx, \npfo_xxx = path to folder xxx,
\nin_xxx = input data structure xxx, \nout_xxx = output data structure xxx, \nz_ : prefix to temporary files and folders,
\nfin_ : file name.
"""
|
SebastianoF/LabelsManager
|
nilabels/definitions.py
|
Python
|
mit
| 1,452
| 0.006198
|
from requests import post
import io
import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn't matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result
|
gisce/primestg
|
primestg/ziv_service.py
|
Python
|
agpl-3.0
| 959
| 0.005214
|
from setuptools import setup
setup(
name='ipy',
packages=['ipy'],
include_package_data=True,
install_requires=[
'flask'
],
)
|
rmcintosh/ipy
|
setup.py
|
Python
|
mit
| 155
| 0
|
#!/usr/bin/env python
from setuptools import setup, Extension
setup(
name = "python-libmemcached",
version = "0.17.0",
description="python memcached client wrapped on libmemcached",
maintainer="subdragon",
maintainer_email="subdragon@gmail.com",
requires = ['pyrex'],
# This assumes that libmemcache is installed with base /usr/local
ext_modules=[Extension('cmemcached', ['cmemcached.pyx'],
libraries=['memcached'],
)],
test_suite="cmemcached_test",
)
|
k0001/python-libmemcached
|
setup.py
|
Python
|
bsd-3-clause
| 517
| 0.040619
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# The 'sysconfig' module requires Makefile and pyconfig.h files from
# Python installation. 'sysconfig' parses these files to get some
# information from them.
# TODO Verify that bundling Makefile and pyconfig.h is still required for Python 3.
import sysconfig
import os
from PyInstaller.utils.hooks import relpath_to_config_or_make
_CONFIG_H = sysconfig.get_config_h_filename()
if hasattr(sysconfig, 'get_makefile_filename'):
# sysconfig.get_makefile_filename is missing in Python < 2.7.9
_MAKEFILE = sysconfig.get_makefile_filename()
else:
_MAKEFILE = sysconfig._get_makefile_filename()
datas = [(_CONFIG_H, relpath_to_config_or_make(_CONFIG_H))]
# The Makefile does not exist on all platforms, eg. on Windows
if os.path.exists(_MAKEFILE):
datas.append((_MAKEFILE, relpath_to_config_or_make(_MAKEFILE)))
|
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-sysconfig.py
|
Python
|
gpl-3.0
| 1,238
| 0.002423
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import Level1Design
def test_Level1Design_inputs():
input_map = dict(bases=dict(mandatory=True,
),
contrasts=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interscan_interval=dict(mandatory=True,
),
model_serial_correlations=dict(mandatory=True,
),
orthogonalization=dict(),
session_info=dict(mandatory=True,
),
)
inputs = Level1Design.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Level1Design_outputs():
output_map = dict(ev_files=dict(),
fsf_files=dict(),
)
outputs = Level1Design.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/fsl/tests/test_auto_Level1Design.py
|
Python
|
bsd-3-clause
| 1,023
| 0.012708
|
#!/usr/bin/env python
import sys, json, psycopg2, argparse
parser = argparse.ArgumentParser(description='Imports word data into the taboo database.')
parser.add_argument('--verified', dest='verified', action='store_true', help='include if these words are verified as good quality')
parser.add_argument('--source', dest='source', help='include to set the source of these imported words')
args = parser.parse_args()
CONN_STR = 'dbname=prod user=prod'
data_str = '\n'.join(sys.stdin.readlines())
data = json.loads(data_str)
conn = psycopg2.connect(CONN_STR)
conn.autocommit = True
cur = conn.cursor()
count = 0
for word in data:
try:
cur.execute("INSERT INTO words (word, skipped, correct, status, source) VALUES(%s, %s, %s, %s, %s) RETURNING wid",
(word, 0, 0, 'approved' if args.verified == True else 'unverified', args.source))
wordid = cur.fetchone()[0]
prohibited_count = 0
for prohibited in data[word]:
prohibited_count = prohibited_count + 1
cur.execute("INSERT INTO prohibited_words (wid, word, rank) VALUES(%s, %s, %s)",
(wordid, prohibited, prohibited_count))
count = count + 1
except Exception as e:
print e
cur.close()
conn.close()
print 'Inserted ' + str(count) + ' words'
|
jbowens/taboo
|
wordgen/data-importer.py
|
Python
|
mit
| 1,307
| 0.007651
|
import psidialogs
s = psidialogs.choice(["1", "2", "3"], "Choose a number!")
if s is not None:
print(s)
|
ponty/psidialogs
|
psidialogs/examples/choice.py
|
Python
|
bsd-2-clause
| 109
| 0
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Jérémie DECOCK (http://www.jdhp.org)
import numpy as np
from pyarm import fig
class MuscleModel:
"Muscle model."
# CONSTANTS ###############################################################
name = 'Fake'
###########################################################################
def __init__(self):
# Init datas to plot
fig.subfig('command',
title='Command',
xlabel='time (s)',
ylabel='Command',
ylim=[-0.1, 1.1])
#legend=('shoulder +', 'shoulder -',
# 'elbow +', 'elbow -'))
def compute_torque(self, angles, velocities, command):
"Compute the torque"
torque = np.zeros(2)
if len(command) > 2:
torque[0] = (command[0] - command[1])
torque[1] = (command[2] - command[3])
fig.append('command', command[0:4])
else:
torque = np.array(command)
fig.append('command', command[0:2])
return torque
|
jeremiedecock/pyarm
|
pyarm/model/muscle/fake_muscle_model.py
|
Python
|
mit
| 1,107
| 0.00543
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import bs4
import json
import re
def xml2json(s):
global timestamp
timestamp = 0
s = s.replace(u'\xa0', u' ')
soup = bs4.BeautifulSoup(s, features="lxml")
intervention_vierge = {"intervenant": "", "contexte": ""}
intervention_vierge["source"] = "https://www.assemblee-nationale.fr/dyn/15/comptes-rendus/seance/"+soup.uid.string
m = soup.metadonnees
dateseance = str(m.dateseance.string)
intervention_vierge["date"] = "%04d-%02d-%02d" % (int(dateseance[0:4]), int(dateseance[4:6]), int(dateseance[6:8]))
intervention_vierge["heure"] = "%02d:%02d" % (int(dateseance[8:10]), int(dateseance[10:12]))
intervention_vierge["session"] = str(m.session.string)[-9:].replace('-', '')
contextes = ['']
numeros_lois = None
intervenant2fonction = {}
last_titre = ''
for p in soup.find_all(['paragraphe', 'point']):
intervention = intervention_vierge.copy()
#Gestion des titres/contextes et numéros de loi
if p.name == "point" and p.texte and p.texte.get_text() and int(p['nivpoint']) < 4:
contextes = contextes[:int(p['nivpoint']) -1 ]
if not contextes:
contextes = []
contextes.append(p.texte.get_text().replace('\n', ''))
if p['valeur'] and p['valeur'][0:9] == ' (n[[o]] ':
numeros_lois = p['valeur'][9:-1].replace(' ', '')
if len(contextes) > 1:
intervention["contexte"] = contextes[0] + " > " + contextes[-1]
elif len(contextes) == 1:
intervention["contexte"] = contextes[0]
if p.name == "point":
intervention['intervention'] = "<p>"+contextes[-1]+"</p>"
if (last_titre != contextes[-1]):
printintervention(intervention)
last_titre = contextes[-1]
continue
#Gestion des interventions
if numeros_lois:
intervention['numeros_loi'] = numeros_lois
intervention["source"] += "#"+p['id_syceron']
if len(p.orateurs):
intervention["intervenant"] = p.orateurs.orateur.nom.get_text()
if p['id_mandat'] and p['id_mandat'] != "-1":
intervention["intervenant_url"] = "http://www2.assemblee-nationale.fr/deputes/fiche/OMC_"+p['id_acteur']
intervention["intervenant"] = p['id_acteur']
if p.orateurs.orateur.qualite and p.orateurs.orateur.qualite.string:
intervention['fonction'] = p.orateurs.orateur.qualite.get_text()
if not intervenant2fonction.get(intervention["intervenant"]) and intervention['fonction']:
intervenant2fonction[intervention["intervenant"]] = intervention['fonction']
elif intervention["intervenant"] == "Mme la présidente":
intervention['fonction'] = "présidente"
intervention["intervenant"] = '';
elif intervention["intervenant"] == "M le président":
intervention['fonction'] = "président"
intervention["intervenant"] = '';
else:
intervention['fonction'] = intervenant2fonction.get(intervention["intervenant"], "")
texte = "<p>"
isdidascalie = False
texte_didascalie = ""
t_string = str(p.texte)
t_string = t_string.replace('>\n', '> ')
t_string = re.sub(r' ?<\/?texte> ?', '', t_string)
t_string = t_string.replace('<italique>', '<i>')
t_string = t_string.replace('</italique>', '</i>')
t_string = t_string.replace('n<exposant>o</exposant>', 'n°')
t_string = t_string.replace('n<exposant>os</exposant>', 'n°')
t_string = t_string.replace('</i> <i>', ' ')
t_string = t_string.replace('<br/>', '</p><p>')
texte += t_string
texte += "</p>"
i = 0;
for i in re.split(' ?(<i>\([^<]*\)</i> ?)', texte):
if i[0] == ' ':
i = i[1:]
if i[-1] == ' ':
i = i[:-1]
if (i[0:3] != '<p>'):
i = '<p>' + i
if (i[-4:] != '</p>'):
i = i + '</p>'
if i.find('<p><i>') == 0:
didasc = intervention_vierge
didasc["intervention"] = i
didasc["contexte"] = intervention["contexte"]
printintervention(didasc)
else:
intervention["intervention"] = i
printintervention(intervention)
def printintervention(i):
global timestamp
if i['intervention'] == '<p></p>' or i['intervention'] == '<p> </p>':
return
intervenants = i['intervenant'].split(' et ')
timestamp += 10
for intervenant in intervenants:
i['timestamp'] = str(timestamp)
i['intervenant'] = intervenant
print(json.dumps(i))
content_file = sys.argv[1]
with open(content_file, encoding='utf-8') as f:
xml2json(f.read())
|
regardscitoyens/nosdeputes.fr
|
batch/hemicycle/parse_hemicycle.py
|
Python
|
agpl-3.0
| 4,977
| 0.005231
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from hashlib import sha256
from abc import abstractmethod
from pathlib import Path
from numbers import Real
import warnings
import logging
import numpy as np
from scipy.optimize import minimize, dual_annealing
from sisl._dispatcher import AbstractDispatch
from sisl._dispatcher import ClassDispatcher
from sisl.io import tableSile
from sisl.utils import PropertyDict
__all__ = ["BaseMinimize", "LocalMinimize", "DualAnnealingMinimize",
"MinimizeToDispatcher"]
_log = logging.getLogger("sisl_toolbox.siesta.minimize")
def _convert_optimize_result(minimizer, result):
""" Convert optimize result to conform to the scaling procedure performed """
# reverse optimized value
# and also store the normalized values (to match the gradients etc)
if minimizer.norm[0] in ("none", "identity"):
# We don't need to do anything
# We haven't scaled anything
return result
result.x_norm = result.x
result.x = minimizer.reverse_normalize(result.x)
if hasattr(result, "jac"):
# transform the jacobian
# The jacobian is dM / dx with dx possibly being scaled
# So here we change multiply by dx / dv
result.jac_norm = result.jac.copy()
result.jac /= minimizer.reverse_normalize(np.ones(len(minimizer)),
with_offset=False)
return result
class BaseMinimize:
# Basic minimizer basically used for figuring out whether
# to use a local or global minimization strategy
def __init__(self, variables=(), out="minimize.dat", norm='identity'):
# ensure we have an ordered dict, for one reason or the other
self.variables = []
if variables is not None:
for v in variables:
self.add_variable(v)
self.reset(out, norm)
def reset(self, out=None, norm=None):
""" Reset data table to be able to restart """
# While this *could* be a named-tuple, we would not be able
# to override the attribute, hence we use a property dict
# same effect.
self.data = PropertyDict(x=[], y=[], hash=[])
# log
log = ""
if not out is None:
log += f" out={str(out)}"
self.out = Path(out)
if not norm is None:
log += f" norm={str(norm)}"
if isinstance(norm, str):
self.norm = (norm, 1.)
elif isinstance(norm, Real):
self.norm = ("l2", norm)
else:
self.norm = norm
_log.info(f"{self.__class__.__name__} resetting{log}")
def normalize(self, variables, with_offset=True):
if isinstance(variables, str):
# this means we grab the variable name from the attributes
# of each variable
out = np.empty(len(self.variables))
for i, v in enumerate(self.variables):
out[i] = v.normalize(v.attrs[variables], self.norm, with_offset=with_offset)
else:
out = np.empty_like(variables)
for i, v in enumerate(variables):
out[i] = self.variables[i].normalize(v, self.norm, with_offset=with_offset)
return out
def normalize_bounds(self):
return [v.normalize(v.bounds, self.norm) for v in self.variables]
def reverse_normalize(self, variables, with_offset=True):
# ensures numpy array
out = np.empty_like(variables)
for i, v in enumerate(variables):
out[i] = self.variables[i].reverse_normalize(v, self.norm, with_offset=with_offset)
return out
def __getitem__(self, key):
return self.variables[key]
@staticmethod
def get_hash(data):
return sha256(data.view(np.uint8)).hexdigest()
def add_variable(self, variable):
if self.variables.count(variable.name) != 0:
raise ValueError(f"Multiple variables with same name {variable.name}")
self.variables.append(variable)
@property
def names(self):
return [v.name for v in self.variables]
@property
def values(self):
return np.array([v.value for v in self.variables], np.float64)
def update(self, variables):
""" Update internal variables for the values """
for var, v in zip(self.variables, variables):
var.update(v)
def dict_values(self):
""" Get all vaules in a dictionary table """
return {v.name: v.value for v in self.variables}
# Define a dispatcher for converting Minimize data to some specific data
# BaseMinimize().to.skopt() will convert to an skopt.OptimizationResult structure
to = ClassDispatcher("to",
obj_getattr=lambda obj, key:
(_ for _ in ()).throw(
AttributeError((f"{obj}.to does not implement '{key}' "
f"dispatcher, are you using it incorrectly?"))
)
)
def __enter__(self):
""" Open the file and fill with stuff """
_log.debug(f"__enter__ {self.__class__.__name__}")
# check if the file exists
if self.out.is_file():
# read in previous data
# this will be "[variables, runs]"
data, header = tableSile(self.out).read_data(ret_header=True)
else:
data = np.array([])
# check if the file exists
if self.out.is_file() and data.size > 0:
nvars = data.shape[0] - 1
if nvars != len(self):
raise ValueError(f"Found old file {self.out} which contains previous data for another number of parameters, please delete or move file")
# now parse header
*header, _ = header[1:].split()
idx = []
for name in self.names:
# find index in header
for i, head in enumerate(header):
if head == name:
idx.append(i)
break
if nvars != len(idx):
print(header)
print(self.names)
print(idx)
raise ValueError(f"Found old file {self.out} which contains previous data with some variables being renamed, please correct header or move file")
# add functional value, no pivot
idx.append(len(self))
# re-arrange data (in case user swapped order of variables)
data = np.ascontiguousarray(data[idx].T)
x, y = data[:, :-1], data[:, -1]
# We populate with hashes without the functional
# That would mean we can't compare hashes between input arguments
# only make the first index a list (x.tolist() makes everything a list)
self.data.x = [xi for xi in x]
self.data.y = [yi for yi in y]
self.data.hash = list(map(self.get_hash, self.data.x))
# Re-open file (overwriting it)
# First output a few things in this file
comment = f"Created by sisl '{self.__class__.__name__}'."
header = self.names + ["metric"]
if len(self.data.x) == 0:
self._fh = tableSile(self.out, 'w').__enter__()
self._fh.write_data(comment=comment, header=header)
else:
comment += f" The first {len(self.data)} lines contains prior content."
data = np.column_stack((self.data.x, self.data.y))
self._fh = tableSile(self.out, 'w').__enter__()
self._fh.write_data(data.T, comment=comment, header=header, fmt='20.17e')
self._fh.flush()
return self
def __exit__(self, *args, **kwargs):
""" Exit routine """
self._fh.__exit__(*args, **kwargs)
# clean-up
del self._fh
def __len__(self):
return len(self.variables)
@abstractmethod
def __call__(self, variables, *args):
""" Actual running code that takes `variables` conforming to the order of initial setup.
It will return the functional of the minimize method
Parameters
----------
variables : array-like
variables to be minimized according to the metric `self.metric`
"""
def _minimize_func(self, norm_variables, *args):
""" Minimization function passed to the minimization method
This is a wrapper which does 3 things:
1. Convert input values from normalized to regular values
2. Update internal variables with the value currently being
runned.
3. Check if the values have already been calculated, if so
return the metric directly from the stored table.
4. Else, calculate the metric using the ``self.__call__``
5. Append values to the data and hash it.
Parameters
----------
norm_variables : array_like
normed variables to be minimized
*args :
arguments passed directly to the ``self.__call__`` method
"""
_log.debug(f"{self.__class__.__name__}._minimize_func")
# Update internal set of variables
variables = self.reverse_normalize(norm_variables)
self.update(variables)
# First get the hash of the current variables
current_hash = self.get_hash(variables)
try:
idx = self.data.hash.index(current_hash)
# immediately return functional value that is hashed
_log.info(f"{self.__class__.__name__}._minimize_func, using prior hashed calculation {idx}")
return self.data.y[idx]
except ValueError:
# in case the hash is not found
pass
# Else we have to call minimize
metric = np.array(self(variables, *args))
# add the data to the output file and hash it
self._fh.write_data(variables.reshape(-1, 1), metric.reshape(-1, 1), fmt='20.17e')
self._fh.flush()
self.data.x.append(variables)
self.data.y.append(metric)
self.data.hash.append(current_hash)
return metric
@abstractmethod
def run(self, *args, **kwargs):
""" Run the minimize model """
class LocalMinimize(BaseMinimize):
def run(self, *args, **kwargs):
# Run minimization (always with normalized values)
norm_v0 = self.normalize(self.values)
bounds = self.normalize_bounds()
with self:
opt = minimize(self._minimize_func,
x0=norm_v0, args=args, bounds=bounds,
**kwargs)
return _convert_optimize_result(self, opt)
class DualAnnealingMinimize(BaseMinimize):
def run(self, *args, **kwargs):
# Run minimization (always with normalized values)
norm_v0 = self.normalize(self.values)
bounds = self.normalize_bounds()
with self:
opt = dual_annealing(self._minimize_func,
x0=norm_v0, args=args, bounds=bounds,
**kwargs)
return _convert_optimize_result(self, opt)
class MinimizeToDispatcher(AbstractDispatch):
""" Base dispatcher from class passing from Minimize class """
@staticmethod
def _ensure_object(obj):
if isinstance(obj, type):
raise ValueError(f"Dispatcher on {obj} must not be called on the class.")
class MinimizeToskoptDispatcher(MinimizeToDispatcher):
def dispatch(self, *args, **kwargs):
import skopt
minim = self._obj
self._ensure_object(minim)
if len(args) > 0:
raise ValueError(f"{minim.__class__.__name__}.to.skopt only accepts keyword arguments")
# First create the Space variable
def skoptReal(v):
low, high = v.bounds
return skopt.space.Real(low, high, transform="identity", name=v.name)
space = skopt.Space(list(map(skoptReal, self.variables)))
# Extract sampled data-points
if "x" in kwargs:
Xi = kwargs.pop("x")
yi = kwargs.pop("y")
else:
Xi = np.array(self.data.x)
yi = np.array(self.data.y)
if "models" not in kwargs:
import sklearn
# We can't use categorial (SVC) since these are regression models
# fast, but should not be as accurate?
#model = sklearn.svm.LinearSVR()
# much slower, but more versatile
# I don't know which one is better ;)
model = sklearn.svm.SVR(cache_size=500)
#model = sklearn.svm.NuSVR(kernel="poly", cache_size=500)
# we need to fit to create auxiliary data
warnings.warn(f"Converting to skopt without a 'models' argument forces "
f"{minim.__class__.__name__} to train a model for the sampled data. "
f"This may be slow depending on the number of samples...")
model.fit(Xi, yi)
kwargs["models"] = [model]
result = skopt.utils.create_result(Xi, yi, space=space, **kwargs)
return result
BaseMinimize.to.register("skopt", MinimizeToskoptDispatcher)
|
zerothi/sisl
|
toolbox/siesta/minimizer/_minimize.py
|
Python
|
mpl-2.0
| 13,374
| 0.002094
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testinfra.modules.base import InstanceModule
class Iptables(InstanceModule):
"""Test iptables rule exists"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# support for -w argument (since 1.6.0)
# https://git.netfilter.org/iptables/commit/?id=aaa4ace72b
# centos 6 has no support
# centos 7 has 1.4 patched
self._has_w_argument = None
def _iptables_command(self, version):
if version == 4:
iptables = "iptables"
elif version == 6:
iptables = "ip6tables"
else:
raise RuntimeError("Invalid version: %s" % version)
if self._has_w_argument is False:
return iptables
else:
return "{} -w 90".format(iptables)
def _run_iptables(self, version, cmd, *args):
ipt_cmd = "{} {}".format(self._iptables_command(version), cmd)
if self._has_w_argument is None:
result = self.run_expect([0, 2], ipt_cmd, *args)
if result.rc == 2:
self._has_w_argument = False
return self._run_iptables(version, cmd, *args)
else:
self._has_w_argument = True
return result.stdout.rstrip('\r\n')
else:
return self.check_output(ipt_cmd, *args)
def rules(self, table='filter', chain=None, version=4):
"""Returns list of iptables rules
Based on ouput of `iptables -t TABLE -S CHAIN` command
optionally takes takes the following arguments:
- table: defaults to `filter`
- chain: defaults to all chains
- version: default 4 (iptables), optionally 6 (ip6tables)
>>> host.iptables.rules()
[
'-P INPUT ACCEPT',
'-P FORWARD ACCEPT',
'-P OUTPUT ACCEPT',
'-A INPUT -i lo -j ACCEPT',
'-A INPUT -j REJECT'
'-A FORWARD -j REJECT'
]
>>> host.iptables.rules("nat", "INPUT")
['-P PREROUTING ACCEPT']
"""
cmd, args = "-t %s -S", [table]
if chain:
cmd += " %s"
args += [chain]
rules = []
for line in self._run_iptables(version, cmd, *args).splitlines():
line = line.replace("\t", " ")
rules.append(line)
return rules
|
philpep/testinfra
|
testinfra/modules/iptables.py
|
Python
|
apache-2.0
| 2,936
| 0
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage blobs."""
import base64
import copy
import hashlib
from io import BytesIO
import json
import mimetypes
import os
import time
import httplib2
import six
from six.moves.urllib.parse import quote
from gcloud._helpers import _rfc3339_to_datetime
from gcloud._helpers import _to_bytes
from gcloud._helpers import _bytes_to_unicode
from gcloud.credentials import generate_signed_url
from gcloud.exceptions import NotFound
from gcloud.exceptions import make_exception
from gcloud.storage._helpers import _PropertyMixin
from gcloud.storage._helpers import _scalar_property
from gcloud.storage.acl import ObjectACL
from gcloud.streaming.http_wrapper import Request
from gcloud.streaming.http_wrapper import make_api_request
from gcloud.streaming.transfer import Download
from gcloud.streaming.transfer import RESUMABLE_UPLOAD
from gcloud.streaming.transfer import Upload
_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com'
class Blob(_PropertyMixin):
"""A wrapper around Cloud Storage's concept of an ``Object``.
:type name: string
:param name: The name of the blob. This corresponds to the
unique path of the object in the bucket.
:type bucket: :class:`gcloud.storage.bucket.Bucket`
:param bucket: The bucket to which this blob belongs.
:type chunk_size: integer
:param chunk_size: The size of a chunk of data whenever iterating (1 MB).
This must be a multiple of 256 KB per the API
specification.
"""
_chunk_size = None # Default value for each instance.
_CHUNK_SIZE_MULTIPLE = 256 * 1024
"""Number (256 KB, in bytes) that must divide the chunk size."""
def __init__(self, name, bucket, chunk_size=None, generation=None):
super(Blob, self).__init__(name=name)
self.chunk_size = chunk_size # Check that setter accepts value.
self.bucket = bucket
self._acl = ObjectACL(self)
self.generation = generation
@property
def chunk_size(self):
"""Get the blob's default chunk size.
:rtype: integer or ``NoneType``
:returns: The current blob's chunk size, if it is set.
"""
return self._chunk_size
@chunk_size.setter
def chunk_size(self, value):
"""Set the blob's default chunk size.
:type value: integer or ``NoneType``
:param value: The current blob's chunk size, if it is set.
:raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
multiple of 256 KB.
"""
if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0:
raise ValueError('Chunk size must be a multiple of %d.' % (
self._CHUNK_SIZE_MULTIPLE,))
self._chunk_size = value
@staticmethod
def path_helper(bucket_path, blob_name):
"""Relative URL path for a blob.
:type bucket_path: string
:param bucket_path: The URL path for a bucket.
:type blob_name: string
:param blob_name: The name of the blob.
:rtype: string
:returns: The relative URL path for ``blob_name``.
"""
return bucket_path + '/o/' + quote(blob_name, safe='')
@property
def acl(self):
"""Create our ACL on demand."""
return self._acl
def __repr__(self):
if self.bucket:
bucket_name = self.bucket.name
else:
bucket_name = None
return '<Blob: %s, %s>' % (bucket_name, self.name)
@property
def path(self):
"""Getter property for the URL path to this Blob.
:rtype: string
:returns: The URL path to this Blob.
"""
if not self.name:
raise ValueError('Cannot determine path without a blob name.')
return self.path_helper(self.bucket.path, self.name)
@property
def path_with_params(self):
"""Getter property for the URL path to this Blob, with version.
:rtype: tuple of ``path`` (a string) and ``params`` (a dictionary)
:returns: the URL path to this blob and a dictionary with the
generation that can be used in query_params for
connection.api_request
"""
params = {}
if self.generation is not None:
params = {'generation': self.generation}
return (self.path, params)
@property
def client(self):
"""The client bound to this blob."""
return self.bucket.client
@property
def public_url(self):
"""The public URL for this blob's object.
:rtype: `string`
:returns: The public URL for this blob.
"""
return '{storage_base_url}/{bucket_name}/{quoted_name}'.format(
storage_base_url='https://storage.googleapis.com',
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
def generate_signed_url(self, expiration, method='GET',
content_type=None,
generation=None, response_disposition=None,
response_type=None, client=None, credentials=None):
"""Generates a signed URL for this blob.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL. Follow `Issue 922`_ for updates on this. If you'd like to
be able to generate a signed URL from GCE, you can use a standard
service account from a JSON file rather than a GCE service account.
.. _Issue 922: https://github.com/GoogleCloudPlatform/\
gcloud-python/issues/922
If you have a blob that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible blobs, but don't want to require users to explicitly
log in.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type content_type: str
:param content_type: (Optional) The content type of the object
referenced by ``resource``.
:type generation: str
:param generation: (Optional) A value that indicates which generation
of the resource to fetch.
:type response_disposition: str
:param response_disposition: (Optional) Content disposition of
responses to requests for the signed URL.
For example, to enable the signed URL
to initiate a file of ``blog.png``, use
the value
``'attachment; filename=blob.png'``.
:type response_type: str
:param response_type: (Optional) Content type of responses to requests
for the signed URL. Used to over-ride the content
type of the underlying blob/object.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: (Optional) The OAuth2 credentials to use to sign
the URL. Defaults to the credentials stored on the
client used.
:rtype: str
:returns: A signed URL you can use to access the resource
until expiration.
"""
resource = '/{bucket_name}/{quoted_name}'.format(
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
if credentials is None:
client = self._require_client(client)
credentials = client._connection.credentials
return generate_signed_url(
credentials, resource=resource,
api_access_endpoint=_API_ACCESS_ENDPOINT,
expiration=expiration, method=method,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation)
def exists(self, client=None):
"""Determines whether or not this blob exists.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: boolean
:returns: True if the blob exists in Cloud Storage.
"""
client = self._require_client(client)
try:
# We only need the status code (200 or not) so we seek to
# minimize the returned payload.
query_params = {'fields': 'name'}
if self.generation is not None:
query_params['generation'] = self.generation
# We intentionally pass `_target_object=None` since fields=name
# would limit the local properties.
client.connection.api_request(method='GET', path=self.path,
query_params=query_params,
_target_object=None)
# NOTE: This will not fail immediately in a batch. However, when
# Batch.finish() is called, the resulting `NotFound` will be
# raised.
return True
except NotFound:
return False
def delete(self, client=None):
"""Deletes a blob from Cloud Storage.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: :class:`Blob`
:returns: The blob that was just deleted.
:raises: :class:`gcloud.exceptions.NotFound`
(propagated from
:meth:`gcloud.storage.bucket.Bucket.delete_blob`).
"""
return self.bucket.delete_blob(self.name, client=client,
generation=self.generation)
def download_to_file(self, file_obj, encryption_key=None, client=None):
"""Download the contents of this blob into a file-like object.
.. note::
If the server-set property, :attr:`media_link`, is not yet
initialized, makes an additional API request to load it.
Downloading a file that has been encrypted with a `customer-supplied`_
encryption key::
>>> from gcloud import storage
>>> from gcloud.storage import Blob
>>> client = storage.Client(project='my-project')
>>> bucket = client.get_bucket('my-bucket')
>>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19'
>>> blob = Blob('secure-data', bucket)
>>> with open('/tmp/my-secure-file', 'wb') as file_obj:
>>> blob.download_to_file(file_obj,
... encryption_key=encryption_key)
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle to which to write the blob's data.
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`gcloud.exceptions.NotFound`
"""
client = self._require_client(client)
if self.media_link is None: # not yet loaded
self.reload()
download_url = self.media_link
# Use apitools 'Download' facility.
download = Download.from_stream(file_obj)
if self.chunk_size is not None:
download.chunksize = self.chunk_size
headers = {}
if encryption_key:
_set_encryption_headers(encryption_key, headers)
request = Request(download_url, 'GET', headers)
# Use the private ``_connection`` rather than the public
# ``.connection``, since the public connection may be a batch. A
# batch wraps a client's connection, but does not store the `http`
# object. The rest (API_BASE_URL and build_api_url) are also defined
# on the Batch class, but we just use the wrapped connection since
# it has all three (http, API_BASE_URL and build_api_url).
download.initialize_download(request, client._connection.http)
def download_to_filename(self, filename, encryption_key=None, client=None):
"""Download the contents of this blob into a named file.
:type filename: string
:param filename: A filename to be passed to ``open``.
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`gcloud.exceptions.NotFound`
"""
with open(filename, 'wb') as file_obj:
self.download_to_file(file_obj, encryption_key=encryption_key,
client=client)
mtime = time.mktime(self.updated.timetuple())
os.utime(file_obj.name, (mtime, mtime))
def download_as_string(self, encryption_key=None, client=None):
"""Download the contents of this blob as a string.
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:rtype: bytes
:returns: The data stored in this blob.
:raises: :class:`gcloud.exceptions.NotFound`
"""
string_buffer = BytesIO()
self.download_to_file(string_buffer, encryption_key=encryption_key,
client=client)
return string_buffer.getvalue()
@staticmethod
def _check_response_error(request, http_response):
"""Helper for :meth:`upload_from_file`."""
info = http_response.info
status = int(info['status'])
if not 200 <= status < 300:
faux_response = httplib2.Response({'status': status})
raise make_exception(faux_response, http_response.content,
error_info=request.url)
# pylint: disable=too-many-locals
def upload_from_file(self, file_obj, rewind=False, size=None,
encryption_key=None, content_type=None, num_retries=6,
client=None):
"""Upload the contents of this blob from a file-like object.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The default value of 'application/octet-stream'
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
Uploading a file with a `customer-supplied`_ encryption key::
>>> from gcloud import storage
>>> from gcloud.storage import Blob
>>> client = storage.Client(project='my-project')
>>> bucket = client.get_bucket('my-bucket')
>>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19'
>>> blob = Blob('secure-data', bucket)
>>> with open('my-file', 'rb') as my_file:
>>> blob.upload_from_file(my_file,
... encryption_key=encryption_key)
The ``encryption_key`` should be a str or bytes with a length of at
least 32.
.. _customer-supplied: https://cloud.google.com/storage/docs/\
encryption#customer-supplied
:type file_obj: file
:param file_obj: A file handle open for reading.
:type rewind: boolean
:param rewind: If True, seek to the beginning of the file handle before
writing the file to Cloud Storage.
:type size: int
:param size: The number of bytes to read from the file handle.
If not provided, we'll try to guess the size using
:func:`os.fstat`. (If the file handle is not from the
filesystem this won't be possible.)
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type content_type: string or ``NoneType``
:param content_type: Optional type of content being uploaded.
:type num_retries: integer
:param num_retries: Number of upload retries. Defaults to 6.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:raises: :class:`ValueError` if size is not passed in and can not be
determined; :class:`gcloud.exceptions.GCloudError` if the
upload response returns an error status.
"""
client = self._require_client(client)
# Use the private ``_connection`` rather than the public
# ``.connection``, since the public connection may be a batch. A
# batch wraps a client's connection, but does not store the `http`
# object. The rest (API_BASE_URL and build_api_url) are also defined
# on the Batch class, but we just use the wrapped connection since
# it has all three (http, API_BASE_URL and build_api_url).
connection = client._connection
content_type = (content_type or self._properties.get('contentType') or
'application/octet-stream')
# Rewind the file if desired.
if rewind:
file_obj.seek(0, os.SEEK_SET)
# Get the basic stats about the file.
total_bytes = size
if total_bytes is None:
if hasattr(file_obj, 'fileno'):
total_bytes = os.fstat(file_obj.fileno()).st_size
else:
raise ValueError('total bytes could not be determined. Please '
'pass an explicit size.')
headers = {
'Accept': 'application/json',
'Accept-Encoding': 'gzip, deflate',
'User-Agent': connection.USER_AGENT,
}
if encryption_key:
_set_encryption_headers(encryption_key, headers)
upload = Upload(file_obj, content_type, total_bytes,
auto_transfer=False)
if self.chunk_size is not None:
upload.chunksize = self.chunk_size
url_builder = _UrlBuilder(bucket_name=self.bucket.name,
object_name=self.name)
upload_config = _UploadConfig()
# Temporary URL, until we know simple vs. resumable.
base_url = connection.API_BASE_URL + '/upload'
upload_url = connection.build_api_url(api_base_url=base_url,
path=self.bucket.path + '/o')
# Use apitools 'Upload' facility.
request = Request(upload_url, 'POST', headers)
upload.configure_request(upload_config, request, url_builder)
query_params = url_builder.query_params
base_url = connection.API_BASE_URL + '/upload'
request.url = connection.build_api_url(api_base_url=base_url,
path=self.bucket.path + '/o',
query_params=query_params)
upload.initialize_upload(request, connection.http)
if upload.strategy == RESUMABLE_UPLOAD:
http_response = upload.stream_file(use_chunks=True)
else:
http_response = make_api_request(connection.http, request,
retries=num_retries)
self._check_response_error(request, http_response)
response_content = http_response.content
if not isinstance(response_content,
six.string_types): # pragma: NO COVER Python3
response_content = response_content.decode('utf-8')
self._set_properties(json.loads(response_content))
# pylint: enable=too-many-locals
def upload_from_filename(self, filename, content_type=None,
encryption_key=None, client=None):
"""Upload this blob's contents from the content of a named file.
The content type of the upload will either be
- The value passed in to the function (if any)
- The value stored on the current blob
- The value given by mimetypes.guess_type
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type filename: string
:param filename: The path to the file.
:type content_type: string or ``NoneType``
:param content_type: Optional type of content being uploaded.
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
content_type = content_type or self._properties.get('contentType')
if content_type is None:
content_type, _ = mimetypes.guess_type(filename)
with open(filename, 'rb') as file_obj:
self.upload_from_file(file_obj, content_type=content_type,
encryption_key=encryption_key, client=client)
def upload_from_string(self, data, content_type='text/plain',
encryption_key=None, client=None):
"""Upload contents of this blob from the provided string.
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
:type data: bytes or text
:param data: The data to store in this blob. If the value is
text, it will be encoded as UTF-8.
:type content_type: string
:param content_type: Optional type of content being uploaded. Defaults
to ``'text/plain'``.
:type encryption_key: str or bytes
:param encryption_key: Optional 32 byte encryption key for
customer-supplied encryption.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8')
string_buffer = BytesIO()
string_buffer.write(data)
self.upload_from_file(file_obj=string_buffer, rewind=True,
size=len(data), content_type=content_type,
encryption_key=encryption_key, client=client)
def make_public(self, client=None):
"""Make this blob public giving all users read access.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
"""
self.acl.all().grant_read()
self.acl.save(client=client)
cache_control = _scalar_property('cacheControl')
"""HTTP 'Cache-Control' header for this object.
See: https://tools.ietf.org/html/rfc7234#section-5.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
content_disposition = _scalar_property('contentDisposition')
"""HTTP 'Content-Disposition' header for this object.
See: https://tools.ietf.org/html/rfc6266 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
content_encoding = _scalar_property('contentEncoding')
"""HTTP 'Content-Encoding' header for this object.
See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
content_language = _scalar_property('contentLanguage')
"""HTTP 'Content-Language' header for this object.
See: http://tools.ietf.org/html/bcp47 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
content_type = _scalar_property('contentType')
"""HTTP 'Content-Type' header for this object.
See: https://tools.ietf.org/html/rfc2616#section-14.17 and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
crc32c = _scalar_property('crc32c')
"""CRC32C checksum for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
@property
def component_count(self):
"""Number of underlying components that make up this object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: integer or ``NoneType``
:returns: The component count (in case of a composed object) or
``None`` if the property is not set locally. This property
will not be set on objects not created via ``compose``.
"""
component_count = self._properties.get('componentCount')
if component_count is not None:
return int(component_count)
@property
def etag(self):
"""Retrieve the ETag for the object.
See: http://tools.ietf.org/html/rfc2616#section-3.11 and
https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: string or ``NoneType``
:returns: The blob etag or ``None`` if the property is not set locally.
"""
return self._properties.get('etag')
@property
def generation(self):
"""Retrieve the generation for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: integer or ``NoneType``
:returns: The generation of the blob or ``None`` if the property
is not set locally.
"""
generation = self._properties.get('generation')
if generation is not None:
return int(generation)
@generation.setter
def generation(self, value):
"""Set the generation for this blob.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:type value: integer or ``NoneType``
:param value: the generation value for this blob. Setting this
value is useful when trying to retrieve specific
versions of a blob.
"""
self._patch_property('generation', value)
@property
def id(self):
"""Retrieve the ID for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: string or ``NoneType``
:returns: The ID of the blob or ``None`` if the property is not
set locally.
"""
return self._properties.get('id')
md5_hash = _scalar_property('md5Hash')
"""MD5 hash for this object.
See: http://tools.ietf.org/html/rfc4960#appendix-B and
https://cloud.google.com/storage/docs/json_api/v1/objects
If the property is not set locally, returns ``None``.
:rtype: string or ``NoneType``
"""
@property
def media_link(self):
"""Retrieve the media download URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: string or ``NoneType``
:returns: The media link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('mediaLink')
@property
def metadata(self):
"""Retrieve arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: The metadata associated with the blob or ``None`` if the
property is not set locally.
"""
return copy.deepcopy(self._properties.get('metadata'))
@metadata.setter
def metadata(self, value):
"""Update arbitrary/application specific metadata for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:type value: dict or ``NoneType``
:param value: The blob metadata to set.
"""
self._patch_property('metadata', value)
@property
def metageneration(self):
"""Retrieve the metageneration for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: integer or ``NoneType``
:returns: The metageneration of the blob or ``None`` if the property
is not set locally.
"""
metageneration = self._properties.get('metageneration')
if metageneration is not None:
return int(metageneration)
@property
def owner(self):
"""Retrieve info about the owner of the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: dict or ``NoneType``
:returns: Mapping of owner's role/ID. If the property is not set
locally, returns ``None``.
"""
return copy.deepcopy(self._properties.get('owner'))
@property
def self_link(self):
"""Retrieve the URI for the object.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: string or ``NoneType``
:returns: The self link for the blob or ``None`` if the property is
not set locally.
"""
return self._properties.get('selfLink')
@property
def size(self):
"""Size of the object, in bytes.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: integer or ``NoneType``
:returns: The size of the blob or ``None`` if the property
is not set locally.
"""
size = self._properties.get('size')
if size is not None:
return int(size)
@property
def storage_class(self):
"""Retrieve the storage class for the object.
See: https://cloud.google.com/storage/docs/storage-classes
https://cloud.google.com/storage/docs/nearline-storage
https://cloud.google.com/storage/docs/durable-reduced-availability
:rtype: string or ``NoneType``
:returns: If set, one of "STANDARD", "NEARLINE", or
"DURABLE_REDUCED_AVAILABILITY", else ``None``.
"""
return self._properties.get('storageClass')
@property
def time_deleted(self):
"""Retrieve the timestamp at which the object was deleted.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally. If the blob has
not been deleted, this will never be set.
"""
value = self._properties.get('timeDeleted')
if value is not None:
return _rfc3339_to_datetime(value)
@property
def updated(self):
"""Retrieve the timestamp at which the object was updated.
See: https://cloud.google.com/storage/docs/json_api/v1/objects
:rtype: :class:`datetime.datetime` or ``NoneType``
:returns: Datetime object parsed from RFC3339 valid timestamp, or
``None`` if the property is not set locally.
"""
value = self._properties.get('updated')
if value is not None:
return _rfc3339_to_datetime(value)
class _UploadConfig(object):
"""Faux message FBO apitools' 'configure_request'.
Values extracted from apitools
'samples/storage_sample/storage/storage_v1_client.py'
"""
accept = ['*/*']
max_size = None
resumable_multipart = True
resumable_path = u'/resumable/upload/storage/v1/b/{bucket}/o'
simple_multipart = True
simple_path = u'/upload/storage/v1/b/{bucket}/o'
class _UrlBuilder(object):
"""Faux builder FBO apitools' 'configure_request'"""
def __init__(self, bucket_name, object_name):
self.query_params = {'name': object_name}
self._bucket_name = bucket_name
self._relative_path = ''
def _set_encryption_headers(key, headers):
"""Builds customer encyrption key headers
:type key: str or bytes
:param key: 32 byte key to build request key and hash.
:type headers: dict
:param headers: dict of HTTP headers being sent in request.
"""
key = _to_bytes(key)
sha256_key = hashlib.sha256(key).digest()
key_hash = base64.b64encode(sha256_key).rstrip()
encoded_key = base64.b64encode(key).rstrip()
headers['X-Goog-Encryption-Algorithm'] = 'AES256'
headers['X-Goog-Encryption-Key'] = _bytes_to_unicode(encoded_key)
headers['X-Goog-Encryption-Key-Sha256'] = _bytes_to_unicode(key_hash)
|
VitalLabs/gcloud-python
|
gcloud/storage/blob.py
|
Python
|
apache-2.0
| 37,138
| 0
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, mario.lassnig@cern.ch, 2016-2017
# - Daniel Drizhuk, d.drizhuk@gmail.com, 2017
import argparse
import logging
import sys
import threading
from pilot.util.constants import SUCCESS, FAILURE, ERRNO_NOJOBS
from pilot.util.https import https_setup
from pilot.util.information import set_location
VERSION = '2017-04-04.001'
def main():
logger = logging.getLogger(__name__)
logger.info('pilot startup - version %s' % VERSION)
args.graceful_stop = threading.Event()
https_setup(args, VERSION)
if not set_location(args):
return False
logger.info('workflow: %s' % args.workflow)
workflow = __import__('pilot.workflow.%s' % args.workflow, globals(), locals(), [args.workflow], -1)
return workflow.run(args)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d',
dest='debug',
action='store_true',
default=False,
help='enable debug logging messages')
# the choices must match in name the python module in pilot/workflow/
arg_parser.add_argument('-w',
dest='workflow',
default='generic',
choices=['generic', 'generic_hpc',
'production', 'production_hpc',
'analysis', 'analysis_hpc',
'eventservice', 'eventservice_hpc'],
help='pilot workflow (default: generic)')
# graciously stop pilot process after hard limit
arg_parser.add_argument('-l',
dest='lifetime',
default=10,
type=int,
help='pilot lifetime seconds (default: 10)')
# set the appropriate site and queue
arg_parser.add_argument('-q',
dest='queue',
required=True,
help='MANDATORY: queue name (e.g., AGLT2_TEST-condor')
# graciously stop pilot process after hard limit
arg_parser.add_argument('-j',
dest='job_label',
default='mtest',
help='job prod/source label (default: mtest)')
# SSL certificates
arg_parser.add_argument('--cacert',
dest='cacert',
default=None,
help='CA certificate to use with HTTPS calls to server, commonly X509 proxy',
metavar='path/to/your/certificate')
arg_parser.add_argument('--capath',
dest='capath',
default=None,
help='CA certificates path',
metavar='path/to/certificates/')
args = arg_parser.parse_args()
console = logging.StreamHandler(sys.stdout)
if args.debug:
logging.basicConfig(filename='pilotlog.txt', level=logging.DEBUG,
format='%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s')
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s'))
else:
logging.basicConfig(filename='pilotlog.txt', level=logging.INFO,
format='%(asctime)s | %(levelname)-8s | %(message)s')
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(message)s'))
logging.getLogger('').addHandler(console)
trace = main()
logging.shutdown()
if not trace:
logging.getLogger(__name__).critical('pilot startup did not succeed -- aborting')
sys.exit(FAILURE)
elif trace.pilot['nr_jobs'] > 0:
sys.exit(SUCCESS)
else:
sys.exit(ERRNO_NOJOBS)
|
TWAtGH/pilot2
|
pilot.py
|
Python
|
apache-2.0
| 4,338
| 0.001844
|
"""Collection of helpers for online deployment."""
|
arugifa/website
|
website/deployment/__init__.py
|
Python
|
gpl-3.0
| 51
| 0
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brainsresample import BRAINSResample
def test_BRAINSResample_inputs():
input_map = dict(args=dict(argstr='%s',
),
defaultValue=dict(argstr='--defaultValue %f',
),
deformationVolume=dict(argstr='--deformationVolume %s',
),
environ=dict(nohash=True,
usedefault=True,
),
gridSpacing=dict(argstr='--gridSpacing %s',
sep=',',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
interpolationMode=dict(argstr='--interpolationMode %s',
),
inverseTransform=dict(argstr='--inverseTransform ',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
pixelType=dict(argstr='--pixelType %s',
),
referenceVolume=dict(argstr='--referenceVolume %s',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
warpTransform=dict(argstr='--warpTransform %s',
),
)
inputs = BRAINSResample.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSResample_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSResample.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py
|
Python
|
bsd-3-clause
| 1,633
| 0.021433
|
import json
from collections import namedtuple
import fauxfactory
import pytest
from riggerlib import recursive_update
from widgetastic.utils import partial_match
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.fixtures.provider import setup_or_skip
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.version import Version
from cfme.utils.version import VersionPicker
from cfme.v2v.infrastructure_mapping import InfrastructureMapping as InfraMapping
FormDataVmObj = namedtuple("FormDataVmObj", ["infra_mapping_data", "vm_list"])
V2vProviders = namedtuple("V2vProviders", ["vmware_provider", "rhv_provider", "osp_provider"])
@pytest.fixture(scope="module")
def v2v_provider_setup(request, appliance, source_provider, provider):
""" Fixture to setup providers """
vmware_provider, rhv_provider, osp_provider = None, None, None
for v2v_provider in [source_provider, provider]:
if v2v_provider.one_of(VMwareProvider):
vmware_provider = v2v_provider
setup_or_skip(request, vmware_provider)
elif v2v_provider.one_of(RHEVMProvider):
rhv_provider = v2v_provider
setup_or_skip(request, rhv_provider)
elif v2v_provider.one_of(OpenStackProvider):
osp_provider = v2v_provider
setup_or_skip(request, osp_provider)
else:
pytest.skip("Provider {} is not a valid provider for v2v tests".format(provider.name))
v2v_providers = V2vProviders(vmware_provider=vmware_provider,
rhv_provider=rhv_provider,
osp_provider=osp_provider)
# Transformation method can be vddk or ssh
if hasattr(request, "param") and request.param == "SSH":
transformation_method = "SSH"
else:
transformation_method = "VDDK"
# set host credentials for Vmware and RHEV hosts
host_credentials(appliance, transformation_method, v2v_providers)
yield v2v_providers
for v2v_provider in v2v_providers:
if v2v_provider is not None:
v2v_provider.delete_if_exists(cancel=False)
def host_credentials(appliance, transformation_method, v2v_providers):
""" Sets up host credentials for vmware and rhv providers
for RHEV migration.
For migration with OSP only vmware(source) provider
host credentials need to be added.
These credentials are automatically removed once the
provider is deleted in clean up.
Args:
appliance
transformation_method : vddk or ssh to be used in configuring conversion host
v2v_providers: vmware (and rhev in case of RHV migration ) , osp not needed.
"""
provider_list = [v2v_providers.vmware_provider]
rhv_hosts = None
if v2v_providers.rhv_provider is not None:
rhv_hosts = v2v_providers.rhv_provider.hosts.all()
provider_list.append(v2v_providers.rhv_provider)
try:
for v2v_provider in provider_list:
hosts = v2v_provider.hosts.all()
for host in hosts:
host_data = [data for data in v2v_provider.data['hosts']
if data['name'] == host.name]
if not host_data:
pytest.skip("No host data")
host.update_credentials_rest(credentials=host_data[0]['credentials'])
except Exception:
logger.exception("Exception when trying to add the host credentials.")
pytest.skip("No data for hosts in providers, failed to retrieve hosts and add creds.")
# Configure conversion host for RHEV migration
if rhv_hosts is not None:
set_conversion_instance_for_rhev(appliance, transformation_method, rhv_hosts)
if v2v_providers.osp_provider is not None:
set_conversion_instance_for_osp(appliance, v2v_providers.osp_provider,
transformation_method)
def _tag_cleanup(host_obj, tag1, tag2):
"""
Clean Up Tags
Returns: Boolean True if all Tags were removed/cleaned
or False means all required Tags are present on host.
"""
def extract_tag(tag):
# Following strip will remove extra asterisk from tag assignment
return tag.category.display_name.strip(" *"), tag.display_name
valid_tags = {extract_tag(tag1), extract_tag(tag2)}
tags = host_obj.get_tags()
tags_set = set(map(extract_tag, tags))
# we always neeed 2 tags for migration, if total is less than 2
# don't bother checking what tag was it, just remove it and
# then add all required tags via add_tags() call. or if tags on host
# are not subset of valid tags, we still remove them.
if len(tags_set) < 2 or not tags_set.issubset(valid_tags):
host_obj.remove_tags(tags=tags)
return True
return False
def create_tags(appliance, transformation_method):
"""
Create tags V2V - Transformation Host * and V2V - Transformation Method
Args:
appliance:
transformation_method: VDDK/SSH
"""
# t is for True in V2V - Transformation Host * tag
tag1 = appliance.collections.categories.instantiate(
display_name="V2V - Transformation Host *"
).collections.tags.instantiate(display_name="t")
tag2 = appliance.collections.categories.instantiate(
display_name="V2V - Transformation Method"
).collections.tags.instantiate(display_name=transformation_method)
return tag1, tag2
def set_conversion_instance_for_rhev(appliance, transformation_method, rhev_hosts):
"""Assigning tags to conversion host.
In 5.10 rails console commands are run to configure all the rhev hosts.
Args:
appliance:
transformation_method : vddk or ssh as per test requirement
rhev_hosts: hosts in rhev to configure for conversion
"""
for host in rhev_hosts:
# set conversion host via rails console
# Delete all prior conversion hosts otherwise it creates duplicate entries
delete_hosts = appliance.ssh_client.run_rails_command("'ConversionHost.delete_all'")
if not delete_hosts.success:
pytest.skip("Failed to delete all conversion hosts:".format(delete_hosts.output))
set_conv_host = appliance.ssh_client.run_rails_command(
"'r = Host.find_by(name:{host});\
c_host = ConversionHost.create(name:{host},resource:r);\
c_host.{method}_transport_supported = true;\
c_host.save'".format(host=json.dumps(host.name),
method=transformation_method.lower())
)
if not set_conv_host.success:
pytest.skip("Failed to set conversion hosts:".format(set_conv_host.output))
def set_conversion_instance_for_osp(appliance, osp_provider, transformation_method='vddk'):
"""
Rails console command
====================
res = Vm.find_by(name: 'my_osp_instance')
conversion_host = ConversionHost.create(name: res.name, resource: res)
conversion_host.vddk_transport_supported = true
conversion_host.save
Args:
appliance
transformation_method: vddk or ssh
osp_provider: OSP
"""
# Delete all prior conversion hosts otherwise it creates duplicate entries
delete_hosts = appliance.ssh_client.run_rails_command("'ConversionHost.delete_all'")
if not delete_hosts.success:
pytest.skip("Failed to delete all conversion hosts:".format(delete_hosts.output))
# transformation method needs to be lower case always
trans_method = transformation_method.lower()
try:
conversion_instances = osp_provider.data['conversion_instances'][trans_method]
except KeyError:
pytest.skip("No conversion instance on provider.")
for instance in conversion_instances:
set_conv_host = appliance.ssh_client.run_rails_command(
"'r = Vm.find_by(name:{vm});\
c_host = ConversionHost.create(name:r.name, resource: r);\
c_host.{method}_transport_supported = true;\
c_host.save'".format(
vm=json.dumps(instance),
method=transformation_method.lower(),
)
)
if not set_conv_host.success:
pytest.skip("Failed to set conversion hosts:".format(set_conv_host.output))
def get_vm(request, appliance, source_provider, template, datastore='nfs'):
""" Helper method that takes template , source provider and datastore
and creates VM on source provider to migrate .
Args:
request
appliance:
source_provider: Provider on which vm is created
template: Template used for creating VM
datastore: datastore in which VM is created. If no datastore
is provided then by default VM is created on nfs datastore
returns: Vm object
"""
source_datastores_list = source_provider.data.get("datastores", [])
source_datastore = [d.name for d in source_datastores_list if d.type == datastore][0]
collection = source_provider.appliance.provider_based_collection(source_provider)
vm_name = random_vm_name("v2v-auto")
vm_obj = collection.instantiate(
vm_name, source_provider, template_name=template(source_provider)["name"]
)
power_on_vm = True
if template.__name__ == "win10_template":
# Need to leave this off, otherwise migration fails
# because when migration process tries to power off the VM if it is powered off
# and for win10, it hibernates and that state of filesystem is unsupported
power_on_vm = False
vm_obj.create_on_provider(
timeout=2400,
find_in_cfme=True,
allow_skip="default",
datastore=source_datastore,
power_on=power_on_vm,
)
request.addfinalizer(lambda: vm_obj.cleanup_on_provider())
return vm_obj
def get_data(provider, component, default_value):
try:
data = (provider.data.get(component, [])[0])
except IndexError:
data = default_value
return data
def infra_mapping_default_data(source_provider, provider):
"""
Default data for infrastructure mapping form.
It is used in other methods to recursive update the data according
to parameters in tests.
Args:
source_provider: Vmware provider
provider: Target rhev/OSP provider
"""
plan_type = VersionPicker({Version.lowest(): None,
"5.10": "rhv" if provider.one_of(RHEVMProvider) else "osp"}).pick()
infra_mapping_data = {
"name": "infra_map_{}".format(fauxfactory.gen_alphanumeric()),
"description": "Single Datastore migration of VM from {ds_type1} to {ds_type2}".format(
ds_type1="nfs", ds_type2="nfs"
),
"plan_type": plan_type,
"clusters": [component_generator("clusters", source_provider, provider)],
"datastores": [component_generator(
"datastores", source_provider, provider,
get_data(source_provider, "datastores", "nfs").type,
get_data(provider, "datastores", "nfs").type)],
"networks": [
component_generator("vlans", source_provider, provider,
get_data(source_provider, "vlans", "VM Network"),
get_data(provider, "vlans", "ovirtmgmt"))
],
}
return infra_mapping_data
@pytest.fixture(scope="function")
def mapping_data_multiple_vm_obj_single_datastore(request, appliance, source_provider, provider):
# this fixture will take list of N VM templates via request and call get_vm for each
cluster = provider.data.get("clusters", [False])[0]
if not cluster:
pytest.skip("No data for cluster available on provider.")
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Single Datastore migration of VM from {ds_type1} to {ds_type2},".format(
ds_type1=request.param[0], ds_type2=request.param[1]
),
"networks": [
component_generator("vlans", source_provider, provider, "VM Network", "ovirtmgmt")
],
},
)
vm_list = []
for template_name in request.param[2]:
vm_list.append(get_vm(request, appliance, source_provider, template_name))
return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=vm_list)
@pytest.fixture(scope="function")
def mapping_data_single_datastore(request, source_provider, provider):
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Single Datastore migration of VM from {ds_type1} to {ds_type2},".format(
ds_type1=request.param[0], ds_type2=request.param[1]
),
"datastores": [
component_generator(
"datastores", source_provider, provider, request.param[0], request.param[1]
)
],
},
)
return infra_mapping_data
@pytest.fixture(scope="function")
def mapping_data_single_network(request, source_provider, provider):
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Single Network migration of VM from {vlan1} to {vlan2},".format(
vlan1=request.param[0], vlan2=request.param[1]
),
"networks": [
component_generator(
"vlans", source_provider, provider, request.param[0], request.param[1]
)
],
},
)
return infra_mapping_data
@pytest.fixture(scope="function")
def edited_mapping_data(request, source_provider, provider):
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
edited_form_data = {
"description": "my edited description",
"clusters": {},
"datastores": {},
"networks": [
component_generator(
"vlans", source_provider, provider, request.param[1][0], request.param[1][1]
)
],
}
return infra_mapping_data, edited_form_data
@pytest.fixture(scope="function")
def mapping_data_dual_vm_obj_dual_datastore(request, appliance, source_provider, provider):
vmware_nw = source_provider.data.get("vlans", [None])[0]
rhvm_nw = provider.data.get("vlans", [None])[0]
cluster = provider.data.get("clusters", [False])[0]
if not vmware_nw or not rhvm_nw or not cluster:
pytest.skip("No data for source or target network in providers.")
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Dual DS migration of VM from {dss1} to {dst1},& from {dss2} to {dst2}".
format(dss1=request.param[0][0],
dst1=request.param[0][1],
dss2=request.param[1][0],
dst2=request.param[1][1]),
"datastores": [
component_generator(
"datastores",
source_provider,
provider,
request.param[0][0],
request.param[0][1],
),
component_generator(
"datastores",
source_provider,
provider,
request.param[1][0],
request.param[1][1],
),
],
"networks": [
component_generator(
"vlans",
source_provider,
provider,
source_provider.data.get("vlans")[0],
provider.data.get("vlans")[0],
)
],
},
)
# creating 2 VMs on two different datastores and returning its object list
vm_obj1 = get_vm(request, appliance, source_provider, request.param[0][2], request.param[0][0])
vm_obj2 = get_vm(request, appliance, source_provider, request.param[1][2], request.param[1][0])
return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj1, vm_obj2])
@pytest.fixture(scope="function")
def mapping_data_vm_obj_dual_nics(request, appliance, source_provider, provider):
vmware_nw = source_provider.data.get("vlans", [None])[0]
rhvm_nw = provider.data.get("vlans", [None])[0]
cluster = provider.data.get("clusters", [False])[0]
if not vmware_nw or not rhvm_nw or not cluster:
pytest.skip("No data for source or target network in providers.")
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Dual DS migration of VM from {dss1} to {dst1},& from {dss2} to {dst2}".
format(dss1=request.param[0][0],
dst1=request.param[0][1],
dss2=request.param[1][0],
dst2=request.param[1][1]),
"networks": [
component_generator(
"vlans", source_provider, provider, request.param[0][0], request.param[0][1]
),
component_generator(
"vlans", source_provider, provider, request.param[1][0], request.param[1][1]
),
],
},
)
vm_obj = get_vm(request, appliance, source_provider, request.param[2])
return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj])
@pytest.fixture(scope="function")
def mapping_data_vm_obj_single_datastore(request, appliance, source_provider, provider):
"""Return Infra Mapping form data and vm object"""
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Single DS migration of VM from {ds_type1} to {ds_type2},".format(
ds_type1=request.param[0], ds_type2=request.param[1]
),
"datastores": [
component_generator(
"datastores", source_provider, provider, request.param[0], request.param[1]
)
],
},
)
vm_obj = get_vm(request, appliance, source_provider, request.param[2], request.param[0])
return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj])
@pytest.fixture(scope="function")
def mapping_data_vm_obj_single_network(request, appliance, source_provider, provider):
infra_mapping_data = infra_mapping_default_data(source_provider, provider)
recursive_update(
infra_mapping_data,
{
"description": "Single Network migration of VM from {vlan1} to {vlan2},".format(
vlan1=request.param[0], vlan2=request.param[1]
),
"networks": [
component_generator(
"vlans", source_provider, provider, request.param[0], request.param[1]
)
],
},
)
vm_obj = get_vm(request, appliance, source_provider, request.param[2])
return FormDataVmObj(infra_mapping_data=infra_mapping_data, vm_list=[vm_obj])
def component_generator(selector, source_provider, provider, source_type=None, target_type=None):
"""
Component generator method to generate a dict of source and target
components(clusters/datastores/networks).
Gets the provider data based on selector from cfme_data.yaml and creates
InfraMapping.component(source_list, target_list) object
Test is skipped if no source or target data is found
Args:
selector: can be clusters/datastores/vlans
source_provider: vmware provider to migrate from
provider: rhev or osp provider or target provider to migrate to
source_type: string source datastores/networks on vmware provider to migrate from.
Ex: if source_type is "iscsi". Provider data is checked for datastore with type
iscsi and that datastores name is used.
target_type: string target datastores/networks to migrate to
returns : InfraMapping.component(source_list, target_list) object
"""
if selector not in ['clusters', 'datastores', 'vlans']:
raise ValueError("Please specify cluster, datastore or network(vlans) selector!")
source_data = source_provider.data.get(selector, [])
target_data = provider.data.get(selector, [])
if not (source_data and target_data):
pytest.skip("No source and target data")
if selector == "clusters":
sources = source_data or None
targets = target_data or None
component = InfraMapping.ClusterComponent(
[partial_match(sources[0])], [partial_match(targets[0])]
)
elif selector == "datastores":
# Ignoring target_type for osp and setting new value
if provider.one_of(OpenStackProvider):
target_type = "volume"
sources = [d.name for d in source_data if d.type == source_type]
targets = [d.name for d in target_data if d.type == target_type]
component = InfraMapping.DatastoreComponent(
[partial_match(sources[0])], [partial_match(targets[0])]
)
else:
sources = [v for v in source_data if v == source_type]
targets = [v for v in target_data if v == target_type]
component = InfraMapping.NetworkComponent(
[partial_match(sources[0])], [partial_match(targets[0])]
)
skip_test = not (sources and targets and component)
if skip_test:
pytest.skip("No data for source or target {} in providers.".format(selector))
return component
|
RedHatQE/cfme_tests
|
cfme/fixtures/v2v_fixtures.py
|
Python
|
gpl-2.0
| 22,048
| 0.002812
|
import process_common as pc
import process_operations as po
import module_dialogs
import module_info
from header_dialogs import *
start_states = []
end_states = []
def compile_dialog_states(processor, dialog_file):
global start_states
global end_states
unique_state_list = ["start", "party_encounter", "prisoner_liberated", "enemy_defeated", "party_relieved",
"event_triggered", "close_window", "trade", "exchange_members", "trade_prisoners", "buy_mercenaries",
"view_char", "training", "member_chat", "prisoner_chat"]
unique_state_usages = [1 for i in unique_state_list]
unique_states = dict((k, i) for i, k in enumerate(unique_state_list))
last_index = len(unique_state_list)
for entry in module_dialogs.dialogs:
end_state = entry[5]
index = unique_states.setdefault(end_state, last_index)
if index == last_index:
last_index += 1
unique_state_list.append(end_state)
unique_state_usages.append(0)
end_states.append(index)
for entry in module_dialogs.dialogs:
start_state = entry[2]
try:
index = unique_states[start_state]
unique_state_usages[index] += 1
start_states.append(index)
except KeyError:
pc.ERROR("starting dialog state '%s' has no matching ending state" % start_state)
for state, usages in zip(unique_state_list, unique_state_usages):
if not usages:
pc.ERROR("ending dialog state '%s' is not used" % state)
with open(module_info.export_path("dialog_states.txt"), "wb") as state_file:
state_file.write("".join("%s\r\n" % e for e in unique_state_list))
dialog_names = {}
def get_dialog_name(start_state, end_state, text):
global dialog_names
name = "dlga_%s:%s" % (pc.convert_to_identifier(start_state), pc.convert_to_identifier(end_state))
text_list = dialog_names.setdefault(name, [])
for i, existing_text in enumerate(text_list):
if text == existing_text:
name = "%s.%d" % (name, i + 1)
break
else:
text_list.append(text)
return name
def process_entry(processor, txt_file, entry, index):
name = get_dialog_name(entry[start_state_pos], entry[end_state_pos], entry[text_pos])
trp_pt = entry[speaker_pos]
flags = entry[flags_pos]
speaker = 0
if flags & other:
speaker = processor.process_id(trp_pt[1], "trp") << other_bits
flags ^= other
trp_pt = trp_pt[0]
if flags & party_tpl:
speaker |= processor.process_id(trp_pt, "pt")
else:
speaker |= processor.process_id(trp_pt, "trp")
speaker |= flags
output_list = ["%s %d %d " % (name, speaker, start_states[index])]
output_list.extend(processor.process_block(entry[conditions_pos], "%s conditions" % name))
output_list.append("%s " % pc.replace_spaces(entry[text_pos]) if entry[text_pos] else "NO_TEXT ")
output_list.append(" %d " % end_states[index])
output_list.extend(processor.process_block(entry[consequences_pos], "%s consequences" % name))
output_list.append("%s " % entry[voice_pos] if len(entry) > voice_pos else "NO_VOICEOVER ")
output_list.append("\r\n")
txt_file.write("".join(output_list))
export = po.make_export(data=module_dialogs.dialogs, data_name="dialogs", file_name="conversation",
header_format="dialogsfile version 2\r\n%d\r\n", process_entry=process_entry, process_list=compile_dialog_states)
|
RaJiska/Warband-PW-Punishments-Manager
|
scripts/process_dialogs.py
|
Python
|
gpl-3.0
| 3,283
| 0.018581
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apache_beam as beam
import logging
from typing import Dict, Any, List
from uploaders.google_ads.customer_match.abstract_uploader import GoogleAdsCustomerMatchAbstractUploaderDoFn
from uploaders import utils
from models.execution import DestinationType, AccountConfig
class GoogleAdsCustomerMatchContactInfoUploaderDoFn(GoogleAdsCustomerMatchAbstractUploaderDoFn):
def get_list_definition(self, account_config: AccountConfig, destination_metadata: List[str]) -> Dict[str, Any]:
list_name = destination_metadata[0]
return {
'membership_status': 'OPEN',
'name': list_name,
'description': 'List created automatically by Megalista',
'membership_life_span': 10000,
'crm_based_user_list': {
'upload_key_type': 'CONTACT_INFO', #CONTACT_INFO, CRM_ID, MOBILE_ADVERTISING_ID
'data_source_type': 'FIRST_PARTY',
}
}
def get_row_keys(self) -> List[str]:
return ['hashed_email', 'address_info', 'hashed_phone_number']
def get_action_type(self) -> DestinationType:
return DestinationType.ADS_CUSTOMER_MATCH_CONTACT_INFO_UPLOAD
|
google/megalista
|
megalista_dataflow/uploaders/google_ads/customer_match/contact_info_uploader.py
|
Python
|
apache-2.0
| 1,678
| 0.005364
|
import unittest
import urllib
import logging
from google.appengine.ext import testbed
from google.appengine.api import urlfetch
from conference import ConferenceApi
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from protorpc.remote import protojson
def init_stubs(tb):
tb.init_urlfetch_stub()
tb.init_app_identity_stub()
tb.init_blobstore_stub()
tb.init_capability_stub()
tb.init_channel_stub()
tb.init_datastore_v3_stub()
tb.init_files_stub()
# tb.init_mail_stub()
tb.init_memcache_stub()
tb.init_taskqueue_stub(root_path='tests/resources')
tb.init_user_stub()
tb.init_xmpp_stub()
return tb
class AppEngineAPITest(unittest.TestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
tb = testbed.Testbed()
tb.setup_env(current_version_id='testbed.version')
tb.activate()
self.testbed = init_stubs(tb)
def testUrlfetch(self):
# response = urlfetch.fetch('http://www.google.com')
url = 'http://localhost:9000/_ah/api/conference/v1/conference'
# form_fields = {
# "name": "Albert"
# }
form_fields = ConferenceForm(name='steven')
form_data = protojson.encode_message(form_fields)
# form_data = urllib.urlencode(form_fields)
response = urlfetch.fetch(url=url, payload=form_data, method=urlfetch.POST,
headers={'Content-Type': 'application/json'})
print(dir(response))
print(response.content)
self.assertEquals(200, response.status_code)
|
shteeven/conference
|
holder/test/test_appengine_api.py
|
Python
|
apache-2.0
| 1,694
| 0.002361
|
import os
import unittest
from vsg.rules import architecture
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_016_test_input.vhd'))
lExpected_require_blank = []
lExpected_require_blank.append('')
utils.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_require_blank.vhd'), lExpected_require_blank)
lExpected_no_blank = []
lExpected_no_blank.append('')
utils.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_no_blank.vhd'), lExpected_no_blank)
class test_architecture_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_016_require_blank(self):
oRule = architecture.rule_016()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'architecture')
self.assertEqual(oRule.identifier, '016')
lExpected = [7, 12, 17]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_016_require_blank(self):
oRule = architecture.rule_016()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_require_blank, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_rule_016_no_blank(self):
oRule = architecture.rule_016()
oRule.style = 'no_blank_line'
lExpected = [23]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_016_no_blank(self):
oRule = architecture.rule_016()
oRule.style = 'no_blank_line'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_no_blank, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/architecture/test_rule_016.py
|
Python
|
gpl-3.0
| 2,048
| 0.003418
|
""" Serial communication with Korad KA3xxxP power supplies.
The intent is to give easy access to the power supply as Python objects, eliminating the need to know
special codes.
The object supports the python `with` statement to release the serial port automatically:
from koradserial import KoradSerial
with KoradSerial('/dev/tty.usbmodemfd121') as device:
print "Model: ", device.model
print "Status: ", device.status
LICENSE: MIT
RESOURCES:
http://www.eevblog.com/forum/testgear/power-supply-ps3005d-ka3005d-rs232-protocol/
http://www.eevblog.com/forum/testgear/korad-ka3005p-io-commands/
http://sigrok.org/wiki/Velleman_PS3005D
https://gist.github.com/k-nowicki/5379272
"""
from __future__ import print_function, unicode_literals
from enum import Enum
from time import sleep
import serial
__all__ = ['KoradSerial', 'ChannelMode', 'OnOffState', 'Tracking']
class ChannelMode(Enum):
""" Represents channel modes.
These values should correspond to the values returned by the ``STATUS?`` command.
"""
constant_current = 0
constant_voltage = 1
class OnOffState(Enum):
""" Represents on/off states.
This could just as easily be done as a Boolean, but is explicit.
"""
off = 0
on = 1
class Tracking(Enum):
""" Tracking state for a multi-channel power supply.
These values should correspond to the values returned by the ``STATUS?`` command.
There seems to be conflicting information about these values.
The other values I've seen are:
* 0 - independent
* 1 - series
* 2 - parallel
* 3 - symmetric
However, I don't have a multi-channel power supply to test these.
"""
independent = 0
series = 1
parallel = 3
class Status(object):
""" Decode the KoradSerial status byte.
It appears that the firmware is a little wonky here.
SOURCE:
Taken from http://www.eevblog.com/forum/testgear/korad-ka3005p-io-commands/
Contents 8 bits in the following format
Bit Item Description
0 CH1 0=CC mode, 1=CV mode
1 CH2 0=CC mode, 1=CV mode
2, 3 Tracking 00=Independent, 01=Tracking series,11=Tracking parallel
4 Beep 0=Off, 1=On
5 Lock 0=Lock, 1=Unlock
6 Output 0=Off, 1=On
7 N/A N/A
"""
def __init__(self, status):
""" Initialize object with a KoradSerial status character.
:param status: Status value
:type status: int
"""
super(Status, self).__init__()
self.raw = status
self.channel1 = ChannelMode(status & 1)
self.channel2 = ChannelMode((status >> 1) & 1)
self.tracking = Tracking((status >> 2) & 3)
self.beep = OnOffState((status >> 4) & 1)
self.lock = OnOffState((status >> 5) & 1)
self.output = OnOffState((status >> 6) & 1)
def __repr__(self):
return "{0}".format(self.raw)
def __str__(self):
return "Channel 1: {0}, Channel 2: {1}, Tracking: {2}, Beep: {3}, Lock: {4}, Output: {5}".format(
self.channel1.name,
self.channel2.name,
self.tracking.name,
self.beep.name,
self.lock.name,
self.output.name,
)
def __unicode__(self):
return self.__str__()
def float_or_none(value):
try:
return float(value)
except (TypeError, ValueError):
return None
class KoradSerial(object):
""" Wrapper for communicating with a programmable KoradSerial KA3xxxxP power supply as a serial interface.
"""
class Channel(object):
""" Wrap a channel. """
def __init__(self, serial_, channel_number):
"""
:type serial_: KoradSerial.Serial
:type channel_number: int
"""
super(KoradSerial.Channel, self).__init__()
self.__serial = serial_
self.number = channel_number
@property
def current(self):
result = self.__serial.send_receive("ISET{0}?".format(self.number), fixed_length=6)
# There's a bug that return a 6th character of previous output.
# This has to be read and discarded otherwise it will be prepended to the next output
return float_or_none(result[:5])
@current.setter
def current(self, value):
self.__serial.send("ISET{0}:{1:05.3f}".format(self.number, value))
@property
def voltage(self):
return float_or_none(self.__serial.send_receive("VSET{0}?".format(self.number), fixed_length=5))
@voltage.setter
def voltage(self, value):
self.__serial.send("VSET{0}:{1:05.2f}".format(self.number, value))
@property
def output_current(self):
""" Retrieve this channel's current current output.
:return: Amperes
:rtype: float or None
"""
result = self.__serial.send_receive("IOUT{0}?".format(self.number), fixed_length=5)
return float_or_none(result)
@property
def output_voltage(self):
""" Retrieve this channel's current current voltage.
:return: Volts
:rtype: float or None
"""
result = self.__serial.send_receive("VOUT{0}?".format(self.number), fixed_length=5)
return float_or_none(result)
class Memory(object):
""" Wrap a memory setting. """
def __init__(self, serial_, memory_number):
super(KoradSerial.Memory, self).__init__()
self.__serial = serial_
self.number = memory_number
def recall(self):
""" Recall this memory's settings. """
self.__serial.send("RCL{0}".format(self.number))
def save(self):
""" Save the current voltage and current to this memory. """
self.__serial.send("SAV{0}".format(self.number))
class OnOffButton(object):
""" Wrap an off/off button. """
def __init__(self, serial_, on_command, off_command):
super(KoradSerial.OnOffButton, self).__init__()
self.__serial = serial_
self._on = on_command
self._off = off_command
def on(self):
self.__serial.send(self._on)
def off(self):
self.__serial.send(self._off)
class Serial(object):
""" Serial operations.
There are some quirky things in communication. They go here.
"""
def __init__(self, port, debug=False):
super(KoradSerial.Serial, self).__init__()
self.debug = debug
self.port = serial.Serial(port, 9600, timeout=1)
def read_character(self):
c = self.port.read(1).decode('ascii')
if self.debug:
if len(c) > 0:
print("read: {0} = '{1}'".format(ord(c), c))
else:
print("read: timeout")
return c
def read_string(self, fixed_length=None):
""" Read a string.
It appears that the KoradSerial PSU returns zero-terminated strings.
:return: str
"""
result = []
c = self.read_character()
while len(c) > 0 and ord(c) != 0:
result.append(c)
if fixed_length is not None and len(result) == fixed_length:
break
c = self.read_character()
return ''.join(result)
def send(self, text):
if self.debug:
print("_send: ", text)
sleep(0.1)
self.port.write(text.encode('ascii'))
def send_receive(self, text, fixed_length=None):
self.send(text)
return self.read_string(fixed_length)
def __init__(self, port, debug=False):
super(KoradSerial, self).__init__()
self.__serial = KoradSerial.Serial(port, debug)
# Channels: adjust voltage and current, discover current output voltage.
self.channels = [KoradSerial.Channel(self.__serial, i) for i in range(1, 3)]
# Memory recall/save buttons 1 through 5
self.memories = [KoradSerial.Memory(self.__serial, i) for i in range(1, 6)]
# Second column buttons
self.beep = KoradSerial.OnOffButton(self.__serial, "BEEP1", "BEEP0")
self.output = KoradSerial.OnOffButton(self.__serial, "OUT1", "OUT0")
self.over_current_protection = KoradSerial.OnOffButton(self.__serial, "OCP1", "OCP0")
self.over_voltage_protection = KoradSerial.OnOffButton(self.__serial, "OVP1", "OVP0")
def __enter__(self):
""" See documentation for Python's ``with`` command.
"""
return self
def __exit__(self, type, value, traceback):
""" See documentation for Python's ``with`` command.
"""
self.close()
return False
# ################################################################################
# Serial operations
# ################################################################################
@property
def is_open(self):
""" Report whether the serial port is open.
:rtype: bool
"""
return self.__serial.port.isOpen()
def close(self):
""" Close the serial port """
self.__serial.port.close()
def open(self):
""" Open the serial port """
self.__serial.port.open()
# ################################################################################
# Power supply operations
# ################################################################################
@property
def model(self):
""" Report the power supply model information.
:rtype: str
"""
return self.__serial.send_receive("*IDN?")
@property
def status(self):
""" Report the power supply status.
:rtype: KoradSerial.Status or None
"""
self.__serial.send("STATUS?")
status = self.__serial.read_character()
if len(status) == 0:
return None
else:
return Status(ord(status))
def track(self, value):
""" Set tracking mode.
This does nothing on single-channel power supply.
:param value: Tracking mode to set.
:type value: Tracking
"""
translate = {
Tracking.independent: "TRACK0",
Tracking.series: "TRACK1",
Tracking.parallel: "TRACK2",
}
if value in translate:
self.__serial.send(translate[value])
|
starforgelabs/py-korad-serial
|
koradserial.py
|
Python
|
mit
| 10,720
| 0.001679
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conext.settings")
from django.core.management import execute_from_command_line
import conext.startup as startup
startup.run()
execute_from_command_line(sys.argv)
pass
|
zdlm/conext
|
manage.py
|
Python
|
mit
| 305
| 0.003279
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# health.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from slpkg.messages import Msg
from slpkg.__metadata__ import MetaData as _meta_
from slpkg.pkg.find import find_package
class PackageHealth:
"""Health check installed packages
"""
def __init__(self, mode):
self.mode = mode
self.meta = _meta_
self.green = _meta_.color["GREEN"]
self.red = _meta_.color["RED"]
self.yellow = _meta_.color["YELLOW"]
self.endc = _meta_.color["ENDC"]
self.msg = Msg()
self.pkg_path = _meta_.pkg_path
self.installed = []
self.cn = 0
def packages(self):
"""Get all installed packages from /var/log/packages/ path
"""
self.installed = find_package("", self.pkg_path)
def check(self, line, pkg):
line = line.replace("\n", "")
try:
if (not line.endswith("/") and
not line.endswith(".new") and
not line.startswith("dev/") and
not line.startswith("install/") and
"/incoming/" not in line):
if not os.path.isfile(r"/" + line):
self.cn += 1
print(f"Not installed: {self.red}/{line}{self.endc} --> {pkg}")
elif not self.mode:
print(line)
except IOError:
print()
raise SystemExit()
def test(self):
"""Get started test each package and read file list
"""
self.packages()
self.cf = 0
for pkg in self.installed:
if os.path.isfile(f"{self.meta.pkg_path}{pkg}"):
self.lf = 0
with open(self.pkg_path + pkg, "r") as fopen:
for line in fopen:
if "\0" in line:
print(f"Null: {line}")
break
self.cf += 1 # count all files
self.lf += 1 # count each package files
if self.lf > 19:
self.check(line, pkg)
self.results()
def results(self):
"""Print results
"""
print()
per = int(round((float(self.cf) / (self.cf + self.cn)) * 100))
if per > 90:
color = self.green
elif per < 90 and per > 60:
color = self.yellow
elif per < 60:
color = self.red
health = f"{color}{str(per)}%{self.endc}"
self.msg.template(78)
print(f"| Total files{' ' * 7}Not installed{' ' * 40}Health")
self.msg.template(78)
print(f"| {self.cf}{' ' * (18-len(str(self.cf)))}{self.cn}{' ' * (55-len(str(self.cn)))}{health:>4}")
self.msg.template(78)
|
dslackw/slpkg
|
slpkg/health.py
|
Python
|
gpl-3.0
| 3,641
| 0.000824
|
# -*- coding: utf-8 -*-
"""Module to daemonize the current process on Unix."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import codecs
import os
import sys
is_daemon = False
def daemonize(close_fd=True, chdir=True, write_pid=False, redirect_std=None):
"""
Daemonize the current process.
Only works on POSIX compatible operating systems.
The process will fork to the background and return control to terminal.
@param close_fd: Close the standard streams and replace them by /dev/null
@type close_fd: bool
@param chdir: Change the current working directory to /
@type chdir: bool
@param write_pid: Write the pid to sys.argv[0] + '.pid'
@type write_pid: bool
@param redirect_std: Filename to redirect stdout and stdin to
@type redirect_std: str
"""
# Fork away
if not os.fork():
# Become session leader
os.setsid()
# Fork again to prevent the process from acquiring a
# controlling terminal
pid = os.fork()
if not pid:
global is_daemon
is_daemon = True
if close_fd:
os.close(0)
os.close(1)
os.close(2)
os.open('/dev/null', os.O_RDWR)
if redirect_std:
os.open(redirect_std,
os.O_WRONLY | os.O_APPEND | os.O_CREAT)
else:
os.dup2(0, 1)
os.dup2(1, 2)
if chdir:
os.chdir('/')
return
else:
# Write out the pid
path = os.path.basename(sys.argv[0]) + '.pid'
with codecs.open(path, 'w', 'utf-8') as f:
f.write(str(pid))
os._exit(0)
else:
# Exit to return control to the terminal
# os._exit to prevent the cleanup to run
os._exit(0)
|
darthbhyrava/pywikibot-local
|
pywikibot/daemonize.py
|
Python
|
mit
| 2,017
| 0
|
from .tobii_pro_wrapper import *
|
oguayasa/tobii_pro_wrapper
|
tobii_pro_wrapper/__init__.py
|
Python
|
apache-2.0
| 34
| 0
|
# Copyright 2018 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import random
import time
from api.client import ApiClient
from common import mongo, clean_mongo
from infra.cli import CliUseradm, CliDeviceauth, CliTenantadm
import api.deviceauth as deviceauth_v1
import api.deviceauth_v2 as deviceauth_v2
import api.useradm as useradm
import api.tenantadm as tenantadm
import api.deployments as deployments
import api.inventory as inventory
import util.crypto
from common import User, Device, Authset, Tenant, \
create_user, create_tenant, create_tenant_user, \
create_random_authset, create_authset, \
get_device_by_id_data, change_authset_status
@pytest.yield_fixture(scope='function')
def clean_migrated_mongo(clean_mongo):
deviceauth_cli = CliDeviceauth()
useradm_cli = CliUseradm()
deviceauth_cli.migrate()
useradm_cli.migrate()
yield clean_mongo
@pytest.yield_fixture(scope='function')
def clean_migrated_mongo_mt(clean_mongo):
deviceauth_cli = CliDeviceauth()
useradm_cli = CliUseradm()
for t in ['tenant1', 'tenant2']:
deviceauth_cli.migrate(t)
useradm_cli.migrate(t)
yield clean_mongo
@pytest.yield_fixture(scope="function")
def user(clean_migrated_mongo):
yield create_user('user-foo@acme.com', 'correcthorse')
@pytest.yield_fixture(scope="function")
def devices(clean_migrated_mongo, user):
uc = ApiClient(useradm.URL_MGMT)
r = uc.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
devices = []
for _ in range(5):
aset = create_random_authset(utoken)
dev = Device(aset.did, aset.id_data, aset.pubkey)
devices.append(dev)
yield devices
@pytest.yield_fixture(scope="function")
def tenants_users(clean_migrated_mongo_mt):
cli = CliTenantadm()
api = ApiClient(tenantadm.URL_INTERNAL)
names = ['tenant1', 'tenant2']
tenants=[]
for n in names:
tenants.append(create_tenant(n))
for t in tenants:
for i in range(2):
user = create_tenant_user(i, t)
t.users.append(user)
yield tenants
@pytest.yield_fixture(scope="function")
def tenants_users_devices(clean_migrated_mongo_mt, tenants_users):
uc = ApiClient(useradm.URL_MGMT)
for t in tenants_users:
user = t.users[0]
r = uc.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
for _ in range(5):
aset = create_random_authset(utoken, t.tenant_token)
dev = Device(aset.did, aset.id_data, aset.pubkey, t.tenant_token)
t.devices.append(dev)
yield tenants_users
class TestPreauthBase:
def do_test_ok(self, user, tenant_token=''):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# preauth device
priv, pub = util.crypto.rsa_get_keypair()
id_data = {'mac': 'pretenditsamac'}
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 201
# device appears in device list
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == 1
api_dev = api_devs[0]
assert api_dev['status'] == 'preauthorized'
assert api_dev['identity_data'] == id_data
assert len(api_dev['auth_sets']) == 1
aset = api_dev['auth_sets'][0]
assert aset['identity_data'] == id_data
assert util.crypto.rsa_compare_keys(aset['pubkey'], pub)
assert aset['status'] == 'preauthorized'
# actual device can obtain auth token
body, sighdr = deviceauth_v1.auth_req(id_data,
pub,
priv,
tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
# device and authset changed status to 'accepted'
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES,
path_params={'id': api_dev['id']})
api_devs = r.json()
assert len(api_devs) == 1
api_dev = api_devs[0]
assert api_dev['status'] == 'accepted'
assert len(api_dev['auth_sets']) == 1
aset = api_dev['auth_sets'][0]
assert aset['status'] == 'accepted'
def do_test_fail_duplicate(self, user, devices):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# preauth duplicate device
priv, pub = util.crypto.rsa_get_keypair()
id_data = devices[0].id_data
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 409
# device list is unmodified
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == len(devices)
# existing device has no new auth sets
existing = [d for d in api_devs if d['identity_data'] == id_data]
assert len(existing) == 1
existing = existing[0]
assert len(existing['auth_sets']) == 1
aset = existing['auth_sets'][0]
assert util.crypto.rsa_compare_keys(aset['pubkey'], devices[0].pubkey)
assert aset['status'] == 'pending'
class TestPreauth(TestPreauthBase):
def test_ok(self, user):
self.do_test_ok(user)
def test_fail_duplicate(self, user, devices):
self.do_test_fail_duplicate(user, devices)
def test_fail_bad_request(self, user):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# id data not json
priv, pub = util.crypto.rsa_get_keypair()
id_data = '{\"mac\": \"foo\"}'
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 400
# not a valid key
id_data = {'mac': 'foo'}
body = deviceauth_v2.preauth_req(
id_data,
'not a public key')
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 400
class TestPreauthEnterprise(TestPreauthBase):
def test_ok(self, tenants_users):
user = tenants_users[0].users[0]
self.do_test_ok(user, tenants_users[0].tenant_token)
# check other tenant's devices unmodified
user1 = tenants_users[1].users[0]
devs1 = tenants_users[1].devices
self.verify_devices_unmodified(user1, devs1)
def test_fail_duplicate(self, tenants_users_devices):
user = tenants_users_devices[0].users[0]
devices = tenants_users_devices[0].devices
self.do_test_fail_duplicate(user, devices)
# check other tenant's devices unmodified
user1 = tenants_users_devices[1].users[0]
devs1 = tenants_users_devices[1].devices
self.verify_devices_unmodified(user1, devs1)
def verify_devices_unmodified(self, user, in_devices):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
useradmm = ApiClient(useradm.URL_MGMT)
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == len(in_devices)
for ad in api_devs:
assert ad['status'] == 'pending'
orig_device = [d for d in in_devices if d.id_data == ad['identity_data']]
assert len(orig_device) == 1
orig_device = orig_device[0]
assert len(ad['auth_sets']) == 1
aset = ad['auth_sets'][0]
assert util.crypto.rsa_compare_keys(aset['pubkey'], orig_device.pubkey)
def make_devs_with_authsets(user, tenant_token=''):
""" create a good number of devices, some with >1 authsets, with different statuses.
returns DevWithAuthsets objects."""
useradmm = ApiClient(useradm.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
devices = []
# some vanilla 'pending' devices, single authset
for _ in range(5):
dev = make_pending_device(utoken, 1, tenant_token=tenant_token)
devices.append(dev)
# some pending devices with > 1 authsets
for i in range(2):
dev = make_pending_device(utoken, 3, tenant_token=tenant_token)
devices.append(dev)
# some 'accepted' devices, single authset
for _ in range(3):
dev = make_accepted_device(utoken, 1, tenant_token=tenant_token)
devices.append(dev)
# some 'accepted' devices with >1 authsets
for _ in range(2):
dev = make_accepted_device(utoken, 3, tenant_token=tenant_token)
devices.append(dev)
# some rejected devices
for _ in range(2):
dev = make_rejected_device(utoken, 3, tenant_token=tenant_token)
devices.append(dev)
# preauth'd devices
for i in range(2):
dev = make_preauthd_device(utoken)
devices.append(dev)
# preauth'd devices with extra 'pending' sets
for i in range(2):
dev = make_preauthd_device_with_pending(utoken, num_pending=2, tenant_token=tenant_token)
devices.append(dev)
return devices
@pytest.yield_fixture(scope="function")
def devs_authsets(user):
yield make_devs_with_authsets(user)
@pytest.yield_fixture(scope="function")
def tenants_devs_authsets(tenants_users):
for t in tenants_users:
devs = make_devs_with_authsets(t.users[0], t.tenant_token)
t.devices = devs
yield tenants_users
def rand_id_data():
mac = ":".join(["{:02x}".format(random.randint(0x00, 0xFF), 'x') for i in range(6)])
sn = "".join(["{}".format(random.randint(0x00, 0xFF)) for i in range(6)])
return {'mac': mac, 'sn': sn}
def make_pending_device(utoken, num_auth_sets=1, tenant_token=''):
id_data = rand_id_data()
dev = None
for i in range(num_auth_sets):
priv, pub = util.crypto.rsa_get_keypair()
new_set = create_authset(id_data, pub, priv, utoken, tenant_token=tenant_token)
if dev is None:
dev = Device(new_set.did, new_set.id_data, utoken, tenant_token)
dev.authsets.append(new_set)
dev.status = 'pending'
return dev
def make_accepted_device(utoken, num_auth_sets=1, num_accepted=1, tenant_token=''):
dev = make_pending_device(utoken, num_auth_sets, tenant_token=tenant_token)
for i in range(num_accepted):
aset_id = dev.authsets[i].id
change_authset_status(dev.id, aset_id, 'accepted', utoken)
dev.authsets[i].status = 'accepted'
dev.status = 'accepted'
return dev
def make_rejected_device(utoken, num_auth_sets=1, tenant_token=''):
dev = make_pending_device(utoken, num_auth_sets, tenant_token=tenant_token)
for i in range(num_auth_sets):
aset_id = dev.authsets[i].id
change_authset_status(dev.id, aset_id, 'rejected', utoken)
dev.authsets[i].status = 'rejected'
dev.status = 'rejected'
return dev
def make_preauthd_device(utoken):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
priv, pub = util.crypto.rsa_get_keypair()
id_data = rand_id_data()
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 201
api_dev = get_device_by_id_data(id_data, utoken)
assert len(api_dev['auth_sets']) == 1
aset = api_dev['auth_sets'][0]
dev = Device(api_dev['id'], id_data, pub)
dev.authsets.append(Authset(aset['id'], dev.id, id_data, pub, priv, 'preauthorized'))
dev.status = 'preauthorized'
return dev
def make_preauthd_device_with_pending(utoken, num_pending=1, tenant_token=''):
dev = make_preauthd_device(utoken)
for i in range(num_pending):
priv, pub = util.crypto.rsa_get_keypair()
aset = create_authset(dev.id_data, pub, priv, utoken, tenant_token=tenant_token)
dev.authsets.append(Authset(aset.id, aset.did, dev.id_data, pub, priv, 'pending'))
return dev
class TestDeviceMgmtBase:
def do_test_ok_get_devices(self, devs_authsets, user):
da = ApiClient(deviceauth_v2.URL_MGMT)
ua = ApiClient(useradm.URL_MGMT)
# log in user
r = ua.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# test cases
for status, page, per_page in [
(None, None, None),
('pending', None, None),
('accepted', None, None),
('rejected', None, None),
('preauthorized', None, None),
(None, 1, 10),
(None, 3, 10),
(None, 2, 5),
('accepted', 1, 4),
('accepted', 2, 4),
('accepted', 5, 2),
('pending', 2, 2)]:
qs_params = {}
if status is not None:
qs_params['status'] = status
if page is not None:
qs_params['page'] = page
if per_page is not None:
qs_params['per_page'] = per_page
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES,
qs_params=qs_params)
assert r.status_code == 200
api_devs = r.json()
ref_devs = filter_and_page_devs(devs_authsets, page=page, per_page=per_page, status=status)
self._compare_devs(ref_devs, api_devs)
def do_test_get_device(self, devs_authsets, user):
da = ApiClient(deviceauth_v2.URL_MGMT)
ua = ApiClient(useradm.URL_MGMT)
# log in user
r = ua.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# existing devices
for dev in devs_authsets:
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev.id})
assert r.status_code == 200
api_dev = r.json()
self._compare_dev(dev, api_dev)
# non-existent devices
for id in ['foo', 'bar']:
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': id})
assert r.status_code == 404
def do_test_delete_device_ok(self, devs_authsets, user, tenant_token=''):
devapim = ApiClient(deviceauth_v2.URL_MGMT)
devapid = ApiClient(deviceauth_v1.URL_DEVICES)
userapi = ApiClient(useradm.URL_MGMT)
depapi = ApiClient(deployments.URL_DEVICES)
# log in user
r = userapi.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# decommission a pending device
dev_pending = filter_and_page_devs(devs_authsets, status='pending')[0]
r = devapim.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev_pending.id})
assert r.status_code == 204
# only verify the device is gone
r = devapim.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev_pending.id})
assert r.status_code == 404
# log in an accepted device
dev_acc = filter_and_page_devs(devs_authsets, status='accepted')[0]
body, sighdr = deviceauth_v1.auth_req(dev_acc.id_data,
dev_acc.authsets[0].pubkey,
dev_acc.authsets[0].privkey,
tenant_token)
r = devapid.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
dtoken = r.text
# decommission the accepted device
r = devapim.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev_acc.id})
assert r.status_code == 204
# verify the device lost access
r = depapi.with_auth(dtoken).call('GET',
deployments.URL_NEXT,
qs_params={'device_type': 'foo',
'artifact_name': 'bar'})
assert r.status_code == 401
# verify the device is gone
r = devapim.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev_acc.id})
assert r.status_code == 404
def do_test_delete_device_not_found(self, devs_authsets, user):
ua = ApiClient(useradm.URL_MGMT)
da = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = ua.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# try delete
r = da.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_DEVICE,
path_params={'id': 'foo'})
assert r.status_code == 404
# check device list unmodified
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
self._compare_devs(devs_authsets, api_devs)
def do_test_device_count(self, devs_authsets, user):
ua = ApiClient(useradm.URL_MGMT)
da = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = ua.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# test cases: successful counts
for status in [None, \
'pending', \
'accepted', \
'rejected', \
'preauthorized']:
qs_params={}
if status is not None:
qs_params={'status': status}
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES_COUNT,
qs_params=qs_params)
assert r.status_code == 200
count = r.json()
ref_devs = filter_and_page_devs(devs_authsets, status=status)
ref_count = len(ref_devs)
assert ref_count == count['count']
# fail: bad request
r = da.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES_COUNT,
qs_params={'status': 'foo'})
assert r.status_code == 400
def _compare_devs(self, devs, api_devs):
assert len(api_devs) == len(devs)
for i in range(len(api_devs)):
self._compare_dev(devs[i], api_devs[i])
def _compare_dev(self, dev, api_dev):
assert api_dev['id'] == dev.id
assert api_dev['identity_data'] == dev.id_data
assert api_dev['status'] == dev.status
assert len(api_dev['auth_sets']) == len(dev.authsets)
# GOTCHA: don't rely on indexing, authsets can get reshuffled
# depending on actual contents (we don't order them, so it's up to mongo)
for api_aset in api_dev['auth_sets']:
aset = [a for a in dev.authsets if util.crypto.rsa_compare_keys(a.pubkey, api_aset['pubkey'])]
assert len(aset) == 1
aset = aset[0]
compare_aset(aset, api_aset)
def _filter_and_page_devs(self, devs, page=None, per_page=None, status=None):
if status is not None:
devs = [d for d in devs if d.status==status]
if page is None:
page = 1
if per_page is None:
per_page = 20
lo = (page-1)*per_page
hi = lo + per_page
return devs[lo:hi]
class TestDeviceMgmt(TestDeviceMgmtBase):
def test_ok_get_devices(self, devs_authsets, user):
self.do_test_ok_get_devices(devs_authsets, user)
def test_get_device(self, devs_authsets, user):
self.do_test_get_device(devs_authsets, user)
def test_delete_device_ok(self, devs_authsets, user):
self.do_test_delete_device_ok(devs_authsets, user)
def test_delete_device_not_found(self, devs_authsets, user):
self.do_test_delete_device_not_found(devs_authsets, user)
def test_device_count(self, devs_authsets, user):
self.do_test_device_count(devs_authsets, user)
class TestDeviceMgmtEnterprise(TestDeviceMgmtBase):
def test_ok_get_devices(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_ok_get_devices(t.devices, t.users[0])
def test_get_device(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_get_device(t.devices, t.users[0])
def test_delete_device_ok(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_delete_device_ok(t.devices, t.users[0], tenant_token=t.tenant_token)
def test_delete_device_not_found(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_delete_device_not_found(t.devices, t.users[0])
def test_device_count(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_device_count(t.devices, t.users[0])
def test_limits_max_devices(self, tenants_devs_authsets):
devauthi = ApiClient(deviceauth_v1.URL_INTERNAL)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
useradmm = ApiClient(useradm.URL_MGMT)
for t in tenants_devs_authsets:
# get num currently accepted devices
num_acc = len(filter_and_page_devs(t.devices, status='accepted'))
# set limit to that
r = devauthi.call('PUT',
deviceauth_v1.URL_LIMITS_MAX_DEVICES,
{'limit': num_acc},
path_params={'tid': t.id})
assert r.status_code == 204
# get limit via internal api
r = devauthi.call('GET',
deviceauth_v1.URL_LIMITS_MAX_DEVICES,
path_params={'tid': t.id})
assert r.status_code == 200
assert r.json()['limit'] == num_acc
# get limit via mgmt api
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(t.users[0].name, t.users[0].pwd))
assert r.status_code == 200
utoken = r.text
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_LIMITS_MAX_DEVICES)
assert r.status_code == 200
assert r.json()['limit'] == num_acc
# try accept a device manually
pending = filter_and_page_devs(t.devices, status='pending')[0]
r = devauthm.with_auth(utoken).call('PUT',
deviceauth_v2.URL_AUTHSET_STATUS,
deviceauth_v2.req_status('accepted'),
path_params={'did': pending.id, 'aid': pending.authsets[0].id })
assert r.status_code == 422
# try exceed the limit via preauth'd device
preauthd = filter_and_page_devs(t.devices, status='preauthorized')[0]
body, sighdr = deviceauth_v1.auth_req(preauthd.id_data,
preauthd.authsets[0].pubkey,
preauthd.authsets[0].privkey,
t.tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 401
class TestAuthsetMgmtBase:
def do_test_get_authset_status(self, devs_authsets, user):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
useradmm = ApiClient(useradm.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# try valid authsets
for d in devs_authsets:
for a in d.authsets:
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_AUTHSET_STATUS,
path_params={'did': d.id, 'aid': a.id })
assert r.status_code == 200
assert r.json()['status'] == a.status
# invalid authset or device
for did, aid in [(devs_authsets[0].id, "foo"),
("foo", "bar")]:
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_AUTHSET_STATUS,
path_params={'did': did, 'aid': aid })
assert r.status_code == 404
def do_test_put_status_accept(self, devs_authsets, user, tenant_token=''):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
useradmm = ApiClient(useradm.URL_MGMT)
deploymentsd = ApiClient(deployments.URL_DEVICES)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# select interesting devices - pending, rejected, or accepted/preauthd with extra authsets
devs = []
for status in ['pending',
'rejected',
'accepted',
'preauthorized']:
found = filter_and_page_devs(devs_authsets, status=status)
if status == 'accepted' or status == 'preauthorized':
found = [d for d in found if len(d.authsets) > 1]
devs.extend(found)
# test acceptance for various kinds of devs
for dev in devs:
# for accepted devs - first actually get a device token
dtoken = None
if dev.status == 'accepted':
accepted = [a for a in dev.authsets if a.status == 'accepted'][0]
body, sighdr = deviceauth_v1.auth_req(accepted.id_data,
accepted.pubkey,
accepted.privkey,
tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
dtoken = r.text
# find some pending or rejected authset
aset = [a for a in dev.authsets if a.status == 'pending' or a.status == 'rejected'][0]
# accept the authset
change_authset_status(dev.id, aset.id, 'accepted', utoken)
# in case of originally preauthd/accepted devs: the original authset must be rejected now
if dev.status in ['accepted', 'preauthorized']:
aset_to_reject = [a for a in dev.authsets if a.status == dev.status]
assert len(aset_to_reject) == 1
aset_to_reject[0].status = 'rejected'
# in all cases, device is now 'accepted', along with the just accepted authset
dev.status = 'accepted'
aset.status = 'accepted'
# verify device is correct in the api
self.verify_dev_after_status_update(dev, utoken)
# if the device used to be accepted - check it lost access
if dtoken is not None:
r = deploymentsd.with_auth(dtoken).call('GET',
deployments.URL_NEXT,
qs_params={'device_type': 'foo',
'artifact_name': 'bar'})
assert r.status_code == 401
# device should also be provisioned in inventory
time.sleep(1)
self.verify_dev_provisioned(dev, utoken)
def do_test_put_status_reject(self, devs_authsets, user, tenant_token=''):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
useradmm = ApiClient(useradm.URL_MGMT)
deploymentsd = ApiClient(deployments.URL_DEVICES)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
devs = []
for status in ['pending',
'accepted',
'preauthorized']:
found = filter_and_page_devs(devs_authsets, status=status)
devs.extend(found)
for dev in devs:
aset = None
dtoken = None
# for accepted or preauthd devs, reject the accepted/preauthd set
# otherwise just select something
if dev.status in ['accepted', 'preauthorized']:
aset = [a for a in dev.authsets if a.status == dev.status]
assert len(aset) == 1
aset = aset[0]
else:
aset = dev.authsets[0]
# for accepted devs, also have an active device and check it loses api access
if dev.status == 'accepted':
body, sighdr = deviceauth_v1.auth_req(aset.id_data,
aset.pubkey,
aset.privkey,
tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
dtoken = r.text
# reject the authset
change_authset_status(dev.id, aset.id, 'rejected', utoken)
# the given authset always changes to 'rejected'
aset.status='rejected'
# if all other asets are also rejected, the device becomes too
# otherwise it's 'pending'
rej_asets = [a for a in dev.authsets if a.id != aset.id and a.status == 'rejected']
if len(rej_asets) == len(dev.authsets) - 1:
dev.status = 'rejected'
else:
dev.status = 'pending'
# check if the api device is consistent
self.verify_dev_after_status_update(dev, utoken)
# if we rejected an accepted, active device, check that it lost access
if dtoken is not None:
r = deploymentsd.with_auth(dtoken).call('GET',
deployments.URL_NEXT,
qs_params={'device_type': 'foo',
'artifact_name': 'bar'})
assert r.status_code == 401
def do_test_put_status_failed(self, devs_authsets, user):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# not found: valid device, bogus authset
r = devauthm.with_auth(utoken).call('PUT',
deviceauth_v2.URL_AUTHSET_STATUS,
deviceauth_v2.req_status('accepted'),
path_params={'did': devs_authsets[0].id, 'aid': "foo" })
assert r.status_code == 404
# not found: bogus device
r = devauthm.with_auth(utoken).call('PUT',
deviceauth_v2.URL_AUTHSET_STATUS,
deviceauth_v2.req_status('accepted'),
path_params={'did': "foo", 'aid': "bar" })
assert r.status_code == 404
# bad request - invalid status
r = devauthm.with_auth(utoken).call('PUT',
deviceauth_v2.URL_AUTHSET_STATUS,
deviceauth_v2.req_status('invalid'),
path_params={'did': devs_authsets[0].id, 'aid': devs_authsets[0].authsets[0].id})
assert r.status_code == 400
# bad request - invalid payload
r = devauthm.with_auth(utoken).call('PUT',
deviceauth_v2.URL_AUTHSET_STATUS,
'{"foo": "bar"}',
path_params={'did': devs_authsets[0].id, 'aid': devs_authsets[0].authsets[0].id})
assert r.status_code == 400
def do_test_delete_status(self, devs_authsets, user, tenant_token=''):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
useradmm = ApiClient(useradm.URL_MGMT)
deploymentsd = ApiClient(deployments.URL_DEVICES)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
for dev in devs_authsets:
aset = None
dtoken = None
# for accepted or preauthd devs, reject the accepted/preauthd set
# otherwise just select something
if dev.status in ['accepted', 'preauthorized']:
aset = [a for a in dev.authsets if a.status == dev.status]
assert len(aset) == 1
aset = aset[0]
else:
aset = dev.authsets[0]
# for accepted devs, also have an active device and check it loses api access
if dev.status == 'accepted':
body, sighdr = deviceauth_v1.auth_req(aset.id_data,
aset.pubkey,
aset.privkey,
tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
dtoken = r.text
# delete authset
r = devauthm.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_AUTHSET,
path_params={'did': dev.id, 'aid': aset.id })
assert r.status_code == 204
# authset should be gone
dev.authsets.remove(aset)
# if it's the last authset of a preauth'd device - the device should be completely gone
if dev.status == 'preauthorized' and len(dev.authsets) == 0:
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev.id})
assert r.status_code == 404
return
else:
# in other cases the device remains
dev.status = self.compute_dev_status(dev.authsets)
# check api dev is consistent
self.verify_dev_after_status_update(dev, utoken)
# verify the device lost access, if we had one
if dtoken is not None:
r = deploymentsd.with_auth(dtoken).call('GET',
deployments.URL_NEXT,
qs_params={'device_type': 'foo',
'artifact_name': 'bar'})
assert r.status_code == 401
def do_test_delete_status_failed(self, devs_authsets, user):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# not found: valid device, bogus authset
r = devauthm.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_AUTHSET,
path_params={'did': devs_authsets[0].id, 'aid': "foo" })
assert r.status_code == 404
# not found: bogus device
r = devauthm.with_auth(utoken).call('DELETE',
deviceauth_v2.URL_AUTHSET,
path_params={'did': "foo", 'aid': "bar" })
assert r.status_code == 404
def verify_dev_after_status_update(self, dev, utoken):
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICE,
path_params={'id': dev.id})
assert r.status_code == 200
api_dev = r.json()
assert api_dev['status'] == dev.status
assert len(api_dev['auth_sets']) == len(dev.authsets)
for api_aset in api_dev['auth_sets']:
aset = [a for a in dev.authsets if a.id == api_aset['id']]
assert len(aset) == 1
aset = aset[0]
compare_aset(aset, api_aset)
def compute_dev_status(self, authsets):
accepted = [a for a in authsets if a.status == 'accepted']
if len(accepted) > 0:
return 'accepted'
preauthd = [a for a in authsets if a.status == 'preauthorized']
if len(preauthd) > 0:
return 'preauthorized'
pending = [a for a in authsets if a.status == 'pending']
if len(pending) > 0:
return 'pending'
# either the dev is actually 'rejected', or has no auth sets
return 'rejected'
def verify_dev_provisioned(self, dev, utoken):
invm = ApiClient(inventory.URL_MGMT)
r = invm.with_auth(utoken).call('GET',
inventory.URL_DEVICE,
path_params={'id': dev.id})
assert r.status_code == 200
api_dev = r.json()
class TestAuthsetMgmt(TestAuthsetMgmtBase):
def test_get_authset_status(self, devs_authsets, user):
self.do_test_get_authset_status(devs_authsets, user)
def test_put_status_accept(self, devs_authsets, user):
self.do_test_put_status_accept(devs_authsets, user)
def test_put_status_reject(self, devs_authsets, user):
self.do_test_put_status_reject(devs_authsets, user)
def test_put_status_failed(self, devs_authsets, user):
self.do_test_put_status_failed(devs_authsets, user)
def test_delete_status(self, devs_authsets, user):
self.do_test_delete_status(devs_authsets, user)
def test_delete_status_failed(self, devs_authsets, user):
self.do_test_delete_status_failed(devs_authsets, user)
class TestAuthsetMgmtEnterprise(TestAuthsetMgmtBase):
def test_get_authset_status(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_get_authset_status(t.devices, t.users[0])
def test_put_status_accept(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_put_status_accept(t.devices, t.users[0], t.tenant_token)
def test_put_status_reject(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_put_status_reject(t.devices, t.users[0], t.tenant_token)
def test_put_status_failed(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_put_status_failed(t.devices, t.users[0])
def test_delete_status(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_delete_status(t.devices, t.users[0], t.tenant_token)
def test_delete_status_failed(self, tenants_devs_authsets):
for t in tenants_devs_authsets:
self.do_test_delete_status_failed(t.devices, t.users[0])
def filter_and_page_devs(devs, page=None, per_page=None, status=None):
if status is not None:
devs = [d for d in devs if d.status==status]
if page is None:
page = 1
if per_page is None:
per_page = 20
lo = (page-1)*per_page
hi = lo + per_page
return devs[lo:hi]
def compare_aset(authset, api_authset):
assert authset.id == api_authset['id']
assert authset.id_data == api_authset['identity_data']
assert util.crypto.rsa_compare_keys(authset.pubkey, api_authset['pubkey'])
assert authset.status == api_authset['status']
|
pasinskim/integration
|
backend-tests/tests/test_devauth_v2.py
|
Python
|
apache-2.0
| 45,297
| 0.003378
|
#!/home/mjwtom/install/python/bin/python
# -*- coding: utf-8 -*-
import os
import subprocess
from nodes import storage_nodes as ips
def generate_rings():
print (os.environ["PATH"])
os.environ["PATH"] = '/home/mjwtom/install/python/bin' + ":" + os.environ["PATH"]
print (os.environ["PATH"])
dev = 'sdb1'
ETC_SWIFT='/etc/swift'
if not os.path.exists(ETC_SWIFT):
os.makedirs(ETC_SWIFT)
if os.path.exists(ETC_SWIFT+'/backups'):
cmd = ['rm',
'-rf',
'%s/backups' % ETC_SWIFT]
subprocess.call(cmd)
print 'current work path:%s' % os.getcwd()
os.chdir(ETC_SWIFT)
print 'change work path to:%s' % os.getcwd()
files = os.listdir(ETC_SWIFT)
for file in files:
path = ETC_SWIFT + '/' + file
if os.path.isdir(path):
continue
shotname, extentsion = os.path.splitext(file)
if (extentsion == '.builder') or (extentsion == '.gz'):
try:
os.remove(path)
except Exception as e:
print e
for builder, port in [('object.builder', 6000),
('object-1.builder', 6000),
('object-2.builder', 6000),
('container.builder', 6001),
('account.builder', 6002)]:
cmd = ['swift-ring-builder',
'%s' % builder,
'create',
'10',
'3',
'1']
subprocess.call(cmd)
i = 1
for ip in ips:
cmd = ['swift-ring-builder',
'%s' % builder,
'add',
'r%dz%d-%s:%d/%s' % (i, i, ip, port, dev),
'1']
subprocess.call(cmd)
i += 1
cmd = ['swift-ring-builder',
'%s' % builder,
'rebalance']
subprocess.call(cmd)
if __name__ == '__main__':
generate_rings()
|
mjwtom/swift
|
test/dedupe/bin/remakerings.py
|
Python
|
apache-2.0
| 1,973
| 0.003548
|
from mpi4py import MPI
import numpy as np
from oceansar import utils
class OceanSurfaceBalancer(object):
""" Ocean Surface Balancer class
This class is used to access a surface from
different MPI processes so that each one is
assigned an azimuth (y) portion of the surface and
also gives access to common properties
:param surface: Full ocean surface (only owned by root process)
:param dt: Interpolation differential
:param t0: Initialization time
:param root: Rank number of surface owner
"""
def __init__(self, surface, dt, t0=0., root=0):
# MPI
self.comm = MPI.COMM_WORLD
self.size, self.rank = self.comm.Get_size(), self.comm.Get_rank()
self.root = root
# Surface
if self.rank == self.root:
if not surface:
raise ValueError('Surface is needed by root process')
self.surface = surface
# Prepare surface properties for broadcasting
surface_prop = {'Lx': self.surface.Lx,
'Ly': self.surface.Ly,
'dx': self.surface.dx,
'dy': self.surface.dy,
'Nx': self.surface.Nx,
'Ny': self.surface.Ny,
'x': self.surface.x,
'y': self.surface.y,
'wind_dir': self.surface.wind_dir,
'wind_dir_eff': self.surface.wind_dir_eff,
'wind_fetch': self.surface.wind_fetch,
'wind_U': self.surface.wind_U,
'wind_U_eff': self.surface.wind_U_eff,
'current_mag': self.surface.current_mag,
'current_dir': self.surface.current_dir,
'compute': self.surface.compute}
else:
surface_prop = None
# Broadcast & save properties
surface_prop = self.comm.bcast(surface_prop, root=self.root)
self.Lx = surface_prop['Lx']
self.Ly = surface_prop['Ly']
self.dx = surface_prop['dx']
self.dy = surface_prop['dy']
self.Nx = surface_prop['Nx']
self.Ny_full = surface_prop['Ny']
self.x = surface_prop['x']
self.y_full = surface_prop['y']
self.wind_dir = surface_prop['wind_dir']
self.wind_dir_eff = surface_prop['wind_dir_eff']
self.wind_fetch = surface_prop['wind_fetch']
self.wind_U = surface_prop['wind_U']
self.wind_U_eff = surface_prop['wind_U_eff']
self.current_mag = surface_prop['current_mag']
self.current_dir = surface_prop['current_dir']
self.compute = surface_prop['compute']
# Setup balancing (counts, displacements) for 2-D matrixes [Ny,Nx]
self.counts, self.displ = utils.balance_elements(
self.Ny_full, self.size)
self.counts *= self.Nx
self.displ *= self.Nx
# Process-dependent properties
self.Ny = np.int(self.counts[self.rank] / self.Nx)
self.y = np.empty(self.Ny, dtype=np.float32)
if self.rank == self.root:
y = (np.ascontiguousarray(surface.y),
(self.counts / self.Nx, self.displ / self.Nx), MPI.FLOAT)
else:
y = None
self.comm.Scatterv(y, (self.y, MPI.FLOAT), root=self.root)
# INITIALIZE SURFACE
# Memory allocation (LOW (0) / HIGH (1) dt values)
if 'D' in self.compute:
self._Dx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Dy = np.empty_like(self._Dx, dtype=np.float32)
self._Dz = np.empty_like(self._Dx, dtype=np.float32)
if 'Diff' in self.compute:
self._Diffx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Diffy = np.empty_like(self._Diffx)
if 'Diff2' in self.compute:
self._Diffxx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Diffyy = np.empty_like(self._Diffxx)
self._Diffxy = np.empty_like(self._Diffxx)
if 'V' in self.compute:
self._Vx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Vy = np.empty_like(self._Vx)
self._Vz = np.empty_like(self._Vx)
if 'A' in self.compute:
self._Ax = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Ay = np.empty_like(self._Ax)
self._Az = np.empty_like(self._Ax)
if 'hMTF' in self.compute:
self._hMTF = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self.dt = dt
self.t_l_last = -1.
self.t_h_last = -1.
self.t = t0
@property
def t(self):
return self._t
@t.setter
def t(self, value):
self._t = np.float32(value)
# Update low/high times
t_l = np.float32(np.floor(self._t / self.dt) * self.dt)
t_h = t_l + self.dt
if (t_l != self.t_l_last) or (t_h != self.t_h_last):
# Only update t_h if 'going up'
if t_l == self.t_h_last:
if 'D' in self.compute:
self._Dx[0] = self._Dx[1]
self._Dy[0] = self._Dy[1]
self._Dz[0] = self._Dz[1]
if 'Diff' in self.compute:
self._Diffx[0] = self._Diffx[1]
self._Diffy[0] = self._Diffy[1]
if 'Diff2' in self.compute:
self._Diffxx[0] = self._Diffxx[1]
self._Diffyy[0] = self._Diffyy[1]
self._Diffxy[0] = self._Diffxy[1]
if 'V' in self.compute:
self._Vx[0] = self._Vx[1]
self._Vy[0] = self._Vy[1]
self._Vz[0] = self._Vz[1]
if 'A' in self.compute:
self._Ax[0] = self._Ax[1]
self._Ay[0] = self._Ay[1]
self._Az[0] = self._Az[1]
if 'hMTF' in self.compute:
self._hMTF[0] = self._hMTF[1]
t_update = np.array([[1, t_h]])
else:
t_update = np.array([[0, t_l], [1, t_h]])
# Initialize surface properties
for t_i in t_update:
if self.rank == self.root:
self.surface.t = t_i[1]
if 'D' in self.compute:
Dx_f = (self.surface.Dx,
(self.counts, self.displ), MPI.FLOAT)
Dy_f = (self.surface.Dy,
(self.counts, self.displ), MPI.FLOAT)
Dz_f = (self.surface.Dz,
(self.counts, self.displ), MPI.FLOAT)
if 'Diff' in self.compute:
Diffx_f = (self.surface.Diffx,
(self.counts, self.displ), MPI.FLOAT)
Diffy_f = (self.surface.Diffy,
(self.counts, self.displ), MPI.FLOAT)
if 'Diff2' in self.compute:
Diffxx_f = (self.surface.Diffxx,
(self.counts, self.displ), MPI.FLOAT)
Diffyy_f = (self.surface.Diffyy,
(self.counts, self.displ), MPI.FLOAT)
Diffxy_f = (self.surface.Diffxy,
(self.counts, self.displ), MPI.FLOAT)
if 'V' in self.compute:
Vx_f = (self.surface.Vx,
(self.counts, self.displ), MPI.FLOAT)
Vy_f = (self.surface.Vy,
(self.counts, self.displ), MPI.FLOAT)
Vz_f = (self.surface.Vz,
(self.counts, self.displ), MPI.FLOAT)
if 'A' in self.compute:
Ax_f = (self.surface.Ax,
(self.counts, self.displ), MPI.FLOAT)
Ay_f = (self.surface.Ay,
(self.counts, self.displ), MPI.FLOAT)
Az_f = (self.surface.Az,
(self.counts, self.displ), MPI.FLOAT)
if 'hMTF' in self.compute:
hMTF_f = (self.surface.hMTF,
(self.counts, self.displ), MPI.FLOAT)
else:
if 'D' in self.compute:
Dx_f = None
Dy_f = None
Dz_f = None
if 'Diff' in self.compute:
Diffx_f = None
Diffy_f = None
if 'Diff2' in self.compute:
Diffxx_f = None
Diffyy_f = None
Diffxy_f = None
if 'V' in self.compute:
Vx_f = None
Vy_f = None
Vz_f = None
if 'A' in self.compute:
Ax_f = None
Ay_f = None
Az_f = None
if 'hMTF' in self.compute:
hMTF_f = None
if 'D' in self.compute:
self.comm.Scatterv(
Dx_f, (self._Dx[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Dy_f, (self._Dy[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Dz_f, (self._Dz[int(t_i[0])], MPI.FLOAT), root=self.root)
if 'Diff' in self.compute:
self.comm.Scatterv(
Diffx_f, (self._Diffx[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Diffy_f, (self._Diffy[int(t_i[0])], MPI.FLOAT), root=self.root)
if 'Diff2' in self.compute:
self.comm.Scatterv(
Diffxx_f, (self._Diffxx[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Diffyy_f, (self._Diffyy[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Diffxy_f, (self._Diffxy[int(t_i[0])], MPI.FLOAT), root=self.root)
if 'V' in self.compute:
self.comm.Scatterv(
Vx_f, (self._Vx[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Vy_f, (self._Vy[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Vz_f, (self._Vz[int(t_i[0])], MPI.FLOAT), root=self.root)
if 'A' in self.compute:
self.comm.Scatterv(
Ax_f, (self._Ax[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Ay_f, (self._Ay[int(t_i[0])], MPI.FLOAT), root=self.root)
self.comm.Scatterv(
Az_f, (self._Az[int(t_i[0])], MPI.FLOAT), root=self.root)
if 'hMTF' in self.compute:
self.comm.Scatterv(
hMTF_f, (self._hMTF[int(t_i[0])], MPI.FLOAT), root=self.root)
self.t_l_last = t_l
self.t_h_last = t_h
# Apply linear interpolation
w_h = np.float32((self._t - t_l) / self.dt)
w_l = np.float32(1. - w_h)
if 'D' in self.compute:
self.Dx = w_l * self._Dx[0] + w_h * self._Dx[1]
self.Dy = w_l * self._Dy[0] + w_h * self._Dy[1]
self.Dz = w_l * self._Dz[0] + w_h * self._Dz[1]
if 'Diff' in self.compute:
self.Diffx = w_l * self._Diffx[0] + w_h * self._Diffx[1]
self.Diffy = w_l * self._Diffy[0] + w_h * self._Diffy[1]
if 'Diff2' in self.compute:
self.Diffxx = w_l * self._Diffxx[0] + w_h * self._Diffxx[1]
self.Diffyy = w_l * self._Diffyy[0] + w_h * self._Diffyy[1]
self.Diffxy = w_l * self._Diffxy[0] + w_h * self._Diffxy[1]
if 'V' in self.compute:
self.Vx = w_l * self._Vx[0] + w_h * self._Vx[1]
self.Vy = w_l * self._Vy[0] + w_h * self._Vy[1]
self.Vz = w_l * self._Vz[0] + w_h * self._Vz[1]
if 'A' in self.compute:
self.Ax = w_l * self._Ax[0] + w_h * self._Ax[1]
self.Ay = w_l * self._Ay[0] + w_h * self._Ay[1]
self.Az = w_l * self._Az[0] + w_h * self._Az[1]
if 'hMTF' in self.compute:
self.hMTF = w_l * self._hMTF[0] + w_h * self._hMTF[1]
|
pakodekker/oceansar
|
oceansar/surfaces/balancer.py
|
Python
|
gpl-3.0
| 13,512
| 0.001998
|
from unittest import TestCase
from settings import settings
from office365.outlookservices.outlook_client import OutlookClient
from office365.runtime.auth.authentication_context import AuthenticationContext
class OutlookClientTestCase(TestCase):
"""SharePoint specific test case base class"""
@classmethod
def setUpClass(cls):
# Due to Outlook REST API v1.0 BasicAuth Deprecation
# (refer https://developer.microsoft.com/en-us/office/blogs/outlook-rest-api-v1-0-basicauth-deprecation/)
# NetworkCredentialContext class should be no longer utilized
# ctx_auth = NetworkCredentialContext(username=settings['user_credentials']['username'],
# password=settings['user_credentials']['password'])
ctx_auth = AuthenticationContext(url=settings['tenant'])
ctx_auth.acquire_token_password_grant(client_credentials=settings['client_credentials'],
user_credentials=settings['user_credentials'])
cls.client = OutlookClient(ctx_auth)
|
vgrem/SharePointOnline-REST-Python-Client
|
tests/outlook_case.py
|
Python
|
mit
| 1,082
| 0.003697
|
from django.db import models
from django.utils import timezone
class ReceiveAddress(models.Model):
address = models.CharField(max_length=128, blank=True)
available = models.BooleanField(default=True)
@classmethod
def newAddress(cls, address):
receive_address = cls()
receive_address.address = address
receive_address.available = True
return receive_address
def use(self):
self.available = False
self.save()
class MoneySent(models.Model):
from_address = models.CharField(max_length=128)
to_address = models.CharField(max_length=128)
value = models.DecimalField(max_digits=16, decimal_places=8)
transaction_hash = models.CharField(max_length=128, null=True)
status = models.CharField(max_length=30)
creationDate = models.DateTimeField()
lastChangeDate = models.DateTimeField()
CREATED = 'CREATED'
SENT = 'SENT'
CONFIRMED_IPN = 'CONFIRMED_IPN'
CONFIRMED_TRN = 'CONFIRMED_TRN'
@classmethod
def newMoneySent(cls, from_address, to_address, value):
money_sent = cls()
money_sent.from_address = from_address
money_sent.to_address = to_address
money_sent.value = value
money_sent.status = MoneySent.CREATED
money_sent.creationDate = timezone.now()
money_sent.lastChangeDate = money_sent.creationDate
return money_sent
def touch(self):
self.lastChangeDate = timezone.now()
def sent(self, transaction_hash):
self.status = MoneySent.SENT
self.transaction_hash = transaction_hash
self.touch()
self.save()
def confirm_ipn(self):
if self.status == MoneySent.CREATED or self.status == MoneySent.SENT:
self.status = MoneySent.CONFIRMED_IPN
self.touch()
self.save()
def confirm_trn(self):
self.status = MoneySent.CONFIRMED_TRN
self.touch()
self.save()
|
freedomsponsors/www.freedomsponsors.org
|
djangoproject/bitcoin_frespo/models.py
|
Python
|
agpl-3.0
| 1,943
| 0.001544
|
from math import cos, sin, pi, ceil
from random import randint, random, expovariate, shuffle
import pygame as pg
from pygame import Rect
from ext import evthandler as eh
from conf import conf
from obj import Player, Star
from util import ir
import ui
random0 = lambda: 2 * random() - 1
def tile (screen, img, rect, ox = 0, oy = 0, full = None):
# get offset
if full is not None:
ox += rect[0] - full[0]
oy += rect[1] - full[1]
# draw
i_w, i_h = img.get_size()
ox %= i_w
oy %= i_h
x0, y0, w0, h0 = rect
x1, y1 = x0 + w0, y0 + h0
x = x0
while x < x1:
this_ox = ox if x == x0 else 0
w = min(i_w - this_ox, x1 - x)
y = y0
while y < y1:
this_oy = oy if y == y0 else 0
h = min(i_h - this_oy, y1 - y)
screen.blit(img, (x, y), (this_ox, this_oy, w, h))
y += h
x += w
class Level (object):
def __init__ (self, game, event_handler = None, ID = 0, cp = -1):
self.game = game
# input
if event_handler is not None:
event_handler.add_event_handlers({
pg.KEYDOWN: self.skip,
pg.MOUSEBUTTONDOWN: self.skip
})
event_handler.add_key_handlers([
(conf.KEYS_BACK, self.pause, eh.MODE_ONDOWN),
(conf.KEYS_RESET, self.reset, eh.MODE_ONDOWN),
(conf.KEYS_JUMP, self.jump, eh.MODE_ONDOWN_REPEAT, 1, 1)
] + [
(ks, [(self.move, (i,))], eh.MODE_HELD)
for i, ks in enumerate((conf.KEYS_LEFT, conf.KEYS_RIGHT))
])
w, h = conf.RES
self.centre = (w / 2, h / 2)
ww, wh = conf.WINDOW_SIZE
border = (2 * (ww + 5), 2 * (wh + 5))
self.window_bds = pg.Rect(0, 0, w, h).inflate(border)
self.clouds = []
self.load_graphics()
if event_handler is not None:
self.move_channel = game.move_channel
self.star_channel = game.star_channel
else:
self.move_channel = None
self.star_channel = None
# load first level
self.ID = None
self.init(ID, cp)
def init (self, ID = None, cp = None):
self.paused = False
self.dying = False
self.first_dying = False
self.winning = False
self.fading = False
self.particles = []
self.particle_rects = []
self.void_jitter = [conf.VOID_JITTER_X, conf.VOID_JITTER_Y, conf.VOID_JITTER_T]
self.first = True
# get level/current checkpoint
if ID is None:
# same level
ID = self.ID
if ID != self.ID:
# new level
self.ID = ID
self.current_cp = cp if cp is not None else -1
# clouds: randomise initial positions and velocities
self.clouds = cs = []
w, h = conf.RES
imgs = self.imgs
vx0 = conf.CLOUD_SPEED
vy0 = vx0 * conf.CLOUD_VERT_SPEED_RATIO
self.cloud_vel = [vx0 * random0(), vy0 * random0()]
vx = conf.CLOUD_MOD_SPEED_RATIO
vy = vx * conf.CLOUD_VERT_SPEED_RATIO
for c in conf.CLOUDS:
c_w, c_h = imgs[c].get_size()
s = (c_w, c_h)
c_w /= 2
c_h /= 2
pos = [randint(-c_w, w - c_w), randint(-c_h, h - c_h)]
vel = [vx * random0(), vy * random0()]
cs.append((pos, vel, s))
elif cp is not None:
self.current_cp = cp
data = conf.LEVELS[ID]
# background
self.bgs = data.get('bgs', conf.DEFAULT_BGS)
# player
if self.current_cp >= 0:
p = list(data['checkpoints'][self.current_cp][:2])
s_p, s_c = conf.PLAYER_SIZE, conf.CHECKPOINT_SIZE
for i in (0, 1):
p[i] += float(s_c[i] - s_p[i]) / 2
else:
p = data['player_pos']
self.player = Player(self, p)
# window
x, y = Rect(self.to_screen(self.player.rect)).center
w, h = conf.HALF_WINDOW_SIZE
self.window = Rect(x - w, y - h, 2 * w, 2 * h)
self.old_window = self.window.copy()
# checkpoints
s = conf.CHECKPOINT_SIZE
self.checkpoints = [Rect(p + s) for p in data.get('checkpoints', [])]
# goal
self.goal = Rect(data['goal'] + conf.GOAL_SIZE)
self.goal_img = self.goal.move(conf.GOAL_OFFSET)
self.goal_img.size = self.imgs['goal'].get_size()
# stars
self.stars = [Star(self, p, [ID, i] in conf.STARS)
for i, p in enumerate(data.get('stars', []))]
if self.star_channel is not None and not all(s.got for s in self.stars):
self.star_channel.unpause()
# rects
self.all_rects = [Rect(r) for r in data.get('rects', [])]
self.all_vrects = [Rect(r) for r in data.get('vrects', [])]
self.arects = [Rect(r) for r in data.get('arects', [])]
self.update_rects()
def skip (self, evt):
if self.dying and self.dying_counter < conf.DIE_SKIP_THRESHOLD and \
not (evt.type == pg.KEYDOWN and evt.key in conf.KEYS_BACK) and \
not self.winning:
self.init()
elif conf.DEBUG and evt.type == pg.MOUSEBUTTONDOWN:
r = self.player.rect
c = self.window.center
print 'moving to', c
for i in (0, 1):
r[i] = c[i] - (r[i + 2] / 2)
self.player.old_rect = r
def pause (self, *args):
if self.move_channel is not None:
self.move_channel.pause()
if self.star_channel is not None:
self.star_channel.pause()
self.game.start_backend(ui.Paused, self)
self.paused = True
def reset (self, *args):
if not self.winning:
self.init()
def jump (self, key, mode, mods):
self.player.jump(mode == 0)
def move (self, key, mode, mods, i):
self.player.move(i)
def update_window (self):
w = self.window
wp0 = w.topleft
wp1 = w.bottomright
s = conf.RES
self.inverse_win = rs = []
for px in (0, 1, 2):
for py in (0, 1, 2):
if px == py == 1:
continue
r = [0, 0, 0, 0]
for i, p in enumerate((px, py)):
if p == 0:
r[i + 2] = wp0[i]
if p == 1:
r[i] = wp0[i]
r[i + 2] = wp1[i] - wp0[i]
elif p == 2:
r[i] = wp1[i]
r[i + 2] = s[i] - wp1[i]
if r[2] > 0 and r[3] > 0:
rs.append(Rect(r))
def get_clip (self, r1, r2, err = 0):
x01, y01, w, h = r1
x11, y11 = x01 + w, y01 + h
x02, y02, w, h = r2
x12, y12 = x02 + w, y02 + h
x0, y0 = max(x01, x02), max(y01, y02)
x1, y1 = min(x11, x12), min(y11, y12)
w, h = x1 - x0, y1 - y0
if w > err and h > err:
return (x0, y0, w, h)
def update_rects (self):
self.update_window()
# rects
self.rects = rects = []
self.draw_rects = draw = []
w = self.window
for r in self.all_rects:
c = w.clip(r)
if c:
rects.append(c)
draw.append(r)
# vrects
self.vrects = rects = []
ws = self.inverse_win
for r in self.all_vrects:
for w in ws:
c = w.clip(r)
if c:
rects.append(c)
def handle_collisions (self):
get_clip = self.get_clip
p = self.player.rect
p0 = list(p)
for r in self.rects + self.vrects + self.arects:
if get_clip(r, p):
r_x0, r_y0, w, h = r
r_x1, r_y1 = r_x0 + w, r_y0 + h
p_x0, p_y0, w, h = p
p_x1, p_y1 = p_x0 + w, p_y0 + h
x, dirn = min((p_x1 - r_x0, 0), (p_y1 - r_y0, 1),
(r_x1 - p_x0, 2), (r_y1 - p_y0, 3))
axis = dirn % 2
p[axis] += (1 if dirn >= 2 else -1) * x
self.player.impact(axis, 0)
if axis == 1:
self.vert_dirn = dirn
# screen left/right
if p[0] < 0:
p[0] = 0
self.player.impact(0, 0)
elif p[0] + p[2] > conf.RES[0]:
p[0] = conf.RES[0] - p[2]
self.player.impact(0, 0)
# die if still colliding
axes = set()
e = conf.ERR
colliding = [r for r in self.rects + self.vrects + self.arects \
if get_clip(r, p, e)]
if colliding:
for r in colliding:
r_x0, r_y0, w, h = r
r_x1, r_y1 = r_x0 + w, r_y0 + h
p_x0, p_y0, w, h = p
p_x1, p_y1 = p_x0 + w, p_y0 + h
x, dirn = min((p_x1 - r_x0, 0), (p_y1 - r_y0, 1),
(r_x1 - p_x0, 2), (r_y1 - p_y0, 3))
axes.add(dirn % 2)
if len(axes) == 2:
dirn = .5
else:
dirn = .95 if axes.pop() == 0 else .1
self.die(dirn)
def die (self, dirn = .5):
self.first_dying = True
self.dying = True
self.dying_counter = conf.DIE_TIME
# particles
pos = list(Rect(self.to_screen(self.player.rect)).center)
self.add_ptcls('die', pos, dirn)
# sound
if self.move_channel is not None:
self.move_channel.pause()
self.game.play_snd('die')
def next_level (self, save = True, progress = True):
if progress:
if self.move_channel is not None:
self.move_channel.pause()
if self.star_channel is not None:
self.star_channel.pause()
i = self.ID
if not conf.COMPLETED and i + 1 in conf.EXISTS:
# there's a next level
if save:
conf.CURRENT_LEVEL = i + 1
if progress:
self.init(i + 1)
else:
if save:
conf.COMPLETED = True
if progress:
self.game.switch_backend(ui.LevelSelect)
def win (self):
if self.winning:
return
self.winning = True
self.next_level(progress = False)
if self.ID not in conf.COMPLETED_LEVELS:
conf.COMPLETED_LEVELS.append(self.ID)
conf.dump()
self.start_fading(lambda: self.next_level(False))
def update (self):
# fade counter
if self.fading:
self.fade_counter -= 1
if self.fade_counter == 0:
self.fading = False
del self.fade_sfc
self.fade_cb()
# move player
if not self.dying:
pl = self.player
pl.update()
# get amount to move window by
w = self.window
self.old_window = w.copy()
x0, y0 = self.centre
if self.paused or self.first:
dx = dy = 0
self.first = False
else:
x, y = pg.mouse.get_pos()
dx, dy = x - x0, y - y0
# don't move too far outside the screen
w_moved = w.move(dx, dy).clamp(self.window_bds)
dx, dy = w_moved[0] - w[0], w_moved[1] - w[1]
pg.mouse.set_pos(x0, y0)
wx0, wy0, ww, wh = self.total_window = w.union(w.move(dx, dy))
# move window
if self.dying:
# just move window
w.move_ip(dx, dy)
self.update_rects()
else:
self.vert_dirn = 3
if dx == dy == 0:
# just handle collisions
self.handle_collisions()
else:
# check if player and window intersect
wx1, wy1 = wx0 + ww, wy0 + wh
r = pl.rect
o_r = pl.old_rect
px0, py0 = min(r[0], o_r[0]), min(r[1], o_r[1])
px1 = max(r[0] + r[2], o_r[0] + o_r[2])
py1 = max(r[1] + r[3], o_r[1] + o_r[3])
if px1 > wx0 and py1 > wy0 and px0 < wx1 and py0 < wy1:
# if so, move window a few pixels at a time
c = conf.WINDOW_MOVE_AMOUNT
for axis, d in ((0, dx), (1, dy)):
dirn = 1 if d > 0 else -1
while d * dirn > 0:
d -= dirn * c
rel = [0, 0]
rel[axis] += c * dirn + (0 if d * dirn > 0 else d)
w.move_ip(rel)
self.update_rects()
if not self.dying:
self.handle_collisions()
else:
# else move it the whole way
w.move_ip(dx, dy)
self.update_rects()
self.handle_collisions()
if self.vert_dirn == 1:
pl.on_ground = conf.ON_GROUND_TIME
# clouds
if self.clouds:
# jitter
jx = conf.CLOUD_JITTER
jy = jx * conf.CLOUD_VERT_SPEED_RATIO
v0 = self.cloud_vel
v0[0] += jx * random0()
v0[1] += jy * random0()
r = conf.RES
for p, v, s in self.clouds:
for i, (i_w, r_w) in enumerate(zip(s, r)):
# move
x = p[i]
x += v0[i] + v[i]
# wrap
if x + i_w < 0:
x = r_w
elif x > r_w:
x = -i_w
p[i] = x
# particles
ptcls = []
rects = []
for k, j, group in self.particles:
g = []
x0, y0 = conf.RES
x1 = y1 = 0
for c, p, v, size, t in group:
x, y = p
# update boundary
if x < x0:
x0 = x
if y < y0:
y0 = y
if x + size > x1:
x1 = x + size
if y + size > y1:
y1 = y + size
t -= 1
if t != 0:
# move
vx, vy = v
x += vx
y += vy
# update boundary
if x < x0:
x0 = x
if y < y0:
y0 = y
if x + size > x1:
x1 = x + size
if y + size > y1:
y1 = y + size
# damp/jitter
vx *= k
vy *= k
vx += j * random0()
vy += j * random0()
g.append((c, (x, y), (vx, vy), size, t))
if g:
ptcls.append((k, j, g))
if x1 > x0 and y1 > y0:
rects.append((int(x0), int(y0), ceil(x1 - x0), ceil(y1 - y0)))
self.particles = ptcls
self.particle_rects = rects
# death counter
if self.dying:
self.dying_counter -= 1
if self.dying_counter == 0:
self.init()
return
# player velocity
pl.update_vel()
# die if OoB
if pl.rect[1] > conf.RES[1]:
self.die()
# win if at goal
p = pl.rect
c = w.clip(self.goal)
if c and self.get_clip(p, c):
self.win()
# check if at checkpoints
for c in self.checkpoints[self.current_cp + 1:]:
if w.clip(c) and self.get_clip(p, c):
self.current_cp += 1
# check if at stars
for i, s in enumerate(self.stars):
if not s.got and w.clip(s.rect) and self.get_clip(p, s.rect):
#self.game.play_snd('collectstar')
if self.star_channel is not None and all(s.got for s in self.stars):
self.star_channel.pause()
s.got = True
conf.STARS.append([self.ID, i])
conf.dump()
def load_graphics (self):
self.imgs = imgs = {}
for img in ('void', 'window', 'rect', 'vrect', 'arect',
'checkpoint-current', 'checkpoint', 'goal') + \
conf.BGS + conf.CLOUDS:
imgs[img] = self.game.img(img + '.png')
self.window_sfc = pg.Surface(conf.WINDOW_SIZE).convert_alpha()
def to_screen (self, rect):
return [ir(x) for x in rect]
def add_ptcls (self, key, pos, dirn = .5):
particles = []
data = conf.PARTICLES[key]
max_speed = data['speed']
max_size = data['size']
k = data['damping']
j = data['jitter']
max_life = data['life']
dirn *= pi / 2
for c, amount in data['colours']:
a, b = divmod(amount, 1)
amount = int(a) + (1 if random() < b else 0)
while amount > 0:
size = randint(1, max_size)
amount -= size
angle = random() * 2 * pi
speed = max_speed * expovariate(5)
v = (speed * cos(dirn) * cos(angle), speed * sin(dirn) * sin(angle))
life = int(random() * max_life)
if life > 0:
particles.append((c, tuple(pos), v, size, life))
self.particles.append((k, j, particles))
def start_fading (self, cb):
if not self.fading:
self.fading = True
self.fade_counter = conf.FADE_TIME
self.fade_sfc = pg.Surface(conf.RES).convert_alpha()
self.fade_cb = cb
def update_jitter (self, jitter):
if len(jitter) == 3:
jx, jy, t0 = jitter
t = t0
ox, oy = randint(0, jx), randint(0, jy)
jitter += [ox, oy, t]
else:
jx, jy, t0, ox, oy, t = jitter
if t == 0:
ox, oy = randint(0, jx), randint(0, jy)
jitter[3] = ox
jitter[4] = oy
jitter[5] = t0
jitter[5] -= 1
def draw (self, screen):
# don't draw on last frame
#if not self.game.running:
#return False
imgs = self.imgs
w = self.window
pl = self.player
pl.pre_draw()
# background
jitter = self.void_jitter
self.update_jitter(jitter)
ox, oy = jitter[3], jitter[4]
img = imgs['void']
draw_all = jitter[5] == conf.VOID_JITTER_T - 1 or self.fading or self.paused
if self.paused:
self.paused = False
if draw_all:
tile(screen, img, (0, 0) + screen.get_size(), ox, oy)
else:
draw_rects = self.particle_rects + [self.total_window, self.goal_img]
if self.first_dying or not self.dying:
draw_rects.append(pl.rect_img.union(pl.old_rect_img))
for r in draw_rects:
tile(screen, img, r, ox, oy, (0, 0))
# vrects
img = imgs['vrect']
for r in self.all_vrects:
tile(screen, img, r)
# window
offset = (-w[0], -w[1])
w_sfc = self.window_sfc
# window background: static images
for img in self.bgs:
if isinstance(img, str):
pos = (0, 0)
else:
img, pos = img
w_sfc.blit(imgs[img], Rect(pos + (0, 0)).move(offset))
# clouds
for c, (p, v, s) in zip(conf.CLOUDS, self.clouds):
w_sfc.blit(imgs[c], Rect(self.to_screen(p + [0, 0])).move(offset))
# rects in window
img = imgs['rect']
for r, r_full in zip(self.rects, self.draw_rects):
tile(w_sfc, img, r.move(offset), full = r_full.move(offset))
# checkpoints
for i, r in enumerate(self.checkpoints):
img = imgs['checkpoint' + ('-current' if i == self.current_cp else '')]
w_sfc.blit(img, r.move(offset))
# window border
w_sfc.blit(imgs['window'], (0, 0), None, pg.BLEND_RGBA_MULT)
# copy window area to screen
screen.blit(w_sfc, w)
# arects
img = imgs['arect']
for r in self.arects:
tile(screen, img, r)
# goal
screen.blit(imgs['goal'], self.goal_img)
# stars
for s in self.stars:
if not s.got:
s.draw(screen, (0, 0))
# player
if not self.dying:
pl.draw(screen)
# particles
for k, j, g in self.particles:
for c, p, v, size, t in g:
screen.fill(c, p + (size, size))
# fadeout
if self.fading:
t = conf.FADE_TIME - self.fade_counter
alpha = conf.FADE_RATE * float(t) / conf.FADE_TIME
alpha = min(255, ir(alpha))
self.fade_sfc.fill((0, 0, 0, alpha))
screen.blit(self.fade_sfc, (0, 0))
draw_all = True
if self.first_dying:
self.first_dying = False
if draw_all:
return True
else:
return draw_rects + self.arects
|
ikn/wvoas
|
game/level.py
|
Python
|
gpl-3.0
| 21,546
| 0.003063
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.