repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
pridkett/pyexiv2
|
test/rational.py
|
Python
|
gpl-2.0
| 3,750
| 0
|
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Copyright (C) 2008-2010 Olivier Tilloy <olivier@tilloy.net>
#
# This file is part of the pyexiv2 distribution.
#
# pyexiv2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# pyexiv2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyexiv2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA.
#
# Author: Olivier Tilloy <olivier@tilloy.net>
#
# ******************************************************************************
import unittest
from pyexiv2.utils import Rational
class TestRational(unittest.TestCase):
def test_constructor(self):
r = Rational(2, 1)
self.assertEqual(r.numerator, 2)
self.assertEqual(r.denominator, 1)
self.assertRaises(ZeroDivisionError, Rational, 1, 0)
def test_read_only(self):
r = Rational(3, 4)
try:
r.numerator = 5
except AttributeError:
pass
else:
self.fail('Numerator is not read-only.')
try:
r.denominator = 5
except AttributeError:
|
pass
else:
self.fail('Denominator is not read-only.')
def test_match_string(self):
self.assertEqual(Rational.match_string('4/3'), (4, 3))
self.assertEqual(Rational.match_string('-4/3'), (-4, 3))
self.assertEqual(Rational.match_string('0/3'), (0, 3))
self.assertEq
|
ual(Rational.match_string('0/0'), (0, 0))
self.assertRaises(ValueError, Rational.match_string, '+3/5')
self.assertRaises(ValueError, Rational.match_string, '3 / 5')
self.assertRaises(ValueError, Rational.match_string, '3/-5')
self.assertRaises(ValueError, Rational.match_string, 'invalid')
def test_from_string(self):
self.assertEqual(Rational.from_string('4/3'), Rational(4, 3))
self.assertEqual(Rational.from_string('-4/3'), Rational(-4, 3))
self.assertRaises(ValueError, Rational.from_string, '+3/5')
self.assertRaises(ValueError, Rational.from_string, '3 / 5')
self.assertRaises(ValueError, Rational.from_string, '3/-5')
self.assertRaises(ValueError, Rational.from_string, 'invalid')
self.assertRaises(ZeroDivisionError, Rational.from_string, '1/0')
self.assertRaises(ZeroDivisionError, Rational.from_string, '0/0')
def test_to_string(self):
self.assertEqual(str(Rational(3, 5)), '3/5')
self.assertEqual(str(Rational(-3, 5)), '-3/5')
def test_repr(self):
self.assertEqual(repr(Rational(3, 5)), 'Rational(3, 5)')
self.assertEqual(repr(Rational(-3, 5)), 'Rational(-3, 5)')
self.assertEqual(repr(Rational(0, 3)), 'Rational(0, 3)')
def test_to_float(self):
self.assertEqual(Rational(3, 6).to_float(), 0.5)
self.assertEqual(Rational(11, 11).to_float(), 1.0)
self.assertEqual(Rational(-2, 8).to_float(), -0.25)
self.assertEqual(Rational(0, 3).to_float(), 0.0)
def test_equality(self):
r1 = Rational(2, 1)
r2 = Rational(2, 1)
r3 = Rational(8, 4)
r4 = Rational(3, 2)
self.assertEqual(r1, r2)
self.assertEqual(r1, r3)
self.assertNotEqual(r1, r4)
|
pcmoritz/ray-1
|
rllib/tests/test_exec_api.py
|
Python
|
apache-2.0
| 2,301
| 0
|
import unittest
import ray
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \
STEPS_TRAINED_COUNTER
from ray.rllib.utils.test_utils import framework_iterator
class TestDistributedExecution(unittest.TestCase):
"""General tests for the distributed execution API."""
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_exec_plan_stats(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s": 0,
"framework": fw,
})
result = trainer.train()
assert isinstance(result, dict)
assert "info" in result
assert "learner" in result["info"]
assert STEPS_SAMPLED_COUNTER in result["info"]
assert STEPS_TRAINED_COUNTER in result["info"]
assert "timers" in result
assert "learn_time_ms" in result["timers"]
assert "learn_throughput" in result["timers"]
assert "sample_time_ms" in result["timers"]
assert "sample_throughput" in result["timers"]
assert "update_time_ms" in result["timers"]
def test_exec_plan_save_restore(ray_start_regular):
for fw in framework_iterator(frameworks=("torch", "tf")):
trainer = A2CTrainer(
env="CartPole-v0",
config={
"min_iter_time_s":
|
0,
"framework": fw,
})
res1 = trainer.train()
checkpoint = trainer.save()
for _ in
|
range(2):
res2 = trainer.train()
assert res2["timesteps_total"] > res1["timesteps_total"], \
(res1, res2)
trainer.restore(checkpoint)
# Should restore the timesteps counter to the same as res2.
res3 = trainer.train()
assert res3["timesteps_total"] < res2["timesteps_total"], \
(res2, res3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
funbaker/astropy
|
astropy/samp/tests/test_standard_profile.py
|
Python
|
bsd-3-clause
| 8,591
| 0.001048
|
import ssl
import tempfile
import pytest
from ...utils.data import get_pkg_data_filename
from ..hub import SAMPHubServer
from ..integrated_client import SAMPIntegratedClient
from ..errors import SAMPProxyError
# By default, tests should not use the internet.
from .. import conf
from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY
def setup_module(module):
conf.use_internet = False
class TestStandardProfile:
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1,
**self.hub_init_kwargs)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self, method):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1"}
self.metadata2 = {"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2"}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message['samp.mtype'] = "table.load.votable"
message['samp.params'] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification('table.load.votable',
rec2.receive_notification)
self.client2.bind_receive_call('table.load.votable',
rec2.receive_call)
self.client1.bind_receive_response('test-tag', rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}}
assert self.client2.get_subscribed_clients('table.load.votable') == {}
assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id)
assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
|
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.clie
|
nt1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify(self.client2.get_public_id(),
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all({'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call
params = random_params(self.tmpdir)
self.client1.call(self.client2.get_public_id(), 'test-tag',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall(self.client2.get_public_id(), 'test-tag',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all('tag1',
{'samp.mtype': 'table.load.votable',
'samp.params': params})
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.tmpdir)
self.client1.ecall_all('tag2',
"table.load.votable", **params)
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(self.client2.get_public_id(),
{'samp.mtype': 'table.load.votable',
'samp.params': params}, timeout=5)
assert result == TEST_REPLY
assert_output('table.load.votable', self.client2.get_private_key(),
self.client1_id, params, timeout=60)
params = random_params(self.t
|
Rene90/dl4nlp
|
hw6_babi_qa/babi2.py
|
Python
|
mit
| 6,138
| 0.006191
|
#!/usr/bin/python
#
# author:
#
# date:
# description:
#
'''Trains a memory network on the bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question a1ing: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
Reaches 98.6% accuracy on task 'single_supporting_fact_10k' after 120 epochs.
Time per epoch: 3s on CPU (core i7).
'''
from __future__ import print_function
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import Activation, Dense, Merge, Permute, Dropout
from keras.layers import LSTM, SimpleRNN, Input
from keras.layers.core import Flatten
from keras.utils.data_utils import get_file
from functools import reduce
import tarfile
from data import get_stories, vectorize_stories
path = 'babi-tasks-v1-2.tar.gz'
#origin='http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
vocab = sorted(reduce(lambda x, y: x | y, (set(story + q + [a1]) for story, q, a1 in train_stories + test_stories)))
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, query, a1):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, a1s_train = vectorize_stories(train_stories, word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, a1s_test = vectorize_stories(test_stories, word_idx, story_maxlen, query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('a1s: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('a1s_train shape:', a1s_train.shape)
print('a1s_test shape:', a1s_test.shape)
print('-')
print('Compiling...')
print(inputs_train.shape)
print(queries_train.shape)
X = Input(shape=(story_maxlen,), dtype="int32")
Q = Input(shape=(query_maxlen,), dtype="int32")
embedding_dim = story_maxlen
# embed the input sequence into a sequence of vectors
m1 = Sequential()
m1.add(Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=story_maxlen)(X))
# output: (samples, story_maxlen, embedding_dim)
# embed the question into a sequence of vectors
u1 = Sequential()
u1.add(Embedding(input_dim=vocab_size,
output_dim=embedding_dim,
input_length=query_maxlen)(Q))
# output: (samples, query_maxlen, embedding_dim)
# compute a 'w1' between input sequence elements (which are vectors)
# and the question vector sequence
w1 = Sequential()
w1.add(Merge([m1, u1], mode='dot', dot_axes=[2, 2]))
#w1.add(Activation('softmax'))
# output: (samples, story_maxlen, query_maxlen)
# embed the input into a single vector with size = story_maxlen:
c1 = Sequential()
c1.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen,
input_length=story_maxlen)(X))
# output: (samples, story_maxlen, query_maxlen)
# sum the w1 vector with the input vector:
o1 = Sequential()
o1.add(Merge([w1, c1], mode='sum'))
# output: (samples, story_maxlen, query_maxlen)
o1.add(Permute((2, 1))) # output: (samples, query_maxlen, story_maxlen)
#u2 = Sequential()
#u2.add(Merge([o1, u1], mode='sum'
|
))
#m2 = Sequential()
#m2.add(Embedding(input_dim=vocab_size,
#output_dim=embedding_dim,
|
#input_length=story_maxlen))
#w2 = Sequential()
#w2.add(Merge([m2, u2], mode='dot', dot_axes=[2, 2]))
#c2 = Sequential()
#c2.add(Embedding(input_dim=vocab_size,
#output_dim=query_maxlen,
#input_length=story_maxlen))
#o2 = Sequential()
#o2.add(Merge([w2, c2], mode='sum'))
#o2.add(Permute((2, 1)))
# concatenate the w1 vector with the question vector,
# and do logistic regression on top
a1 = Sequential()
a1.add(Merge([o1, u1], mode='sum'))
a1.add(Flatten()) # why not in original format?
# one regularization layer -- more would probably be needed.
a1.add(Dense(vocab_size))
# we output a probability distribution over the vocabulary
a1.add(Activation('softmax'))
a1.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# Note: you could use a Graph model to avoid repeat the input twice
a1.fit([inputs_train, queries_train], a1s_train,
batch_size=512,
nb_epoch=10,
validation_data=([inputs_test, queries_test], a1s_test))
from keras.utils.visualize_util import plot
if __name__ == "__main__" and False:
plot(a1, to_file='model.png')
json_model = a1.to_json()
with open("model.json", "w") as fh:
fh.write(json_model)
a1.save_weights("rnn_weights.h5")
|
yast/yast-python-bindings
|
examples/Heading2.py
|
Python
|
gpl-2.0
| 361
| 0.01662
|
# encoding: utf-8
from yast import
|
import_module
import_module('UI')
from yast import *
class Heading2Client:
def main(self):
UI.OpenDialog(
VBox(
Heading("This Is a Heading."),
Label("This is a Label."),
PushButton("&OK")
)
)
|
UI.UserInput()
UI.CloseDialog()
Heading2Client().main()
|
bdacode/hoster
|
hoster/junocloud_me.py
|
Python
|
gpl-3.0
| 3,651
| 0.003561
|
# -*- coding: utf-8 -*-
"""Copyright (C) 2013 COLDWELL AG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import time
from ... import hoster
@hoster.host
class this:
model = hoster.HttpPremiumHoster
name = 'junocloud.me'
patterns = [
hoster.Matcher('https?', '*.junocloud.me', '!/<id>'),
]
max_filesize_free = hoster.GB(2)
max_filesize_premium = hoster.GB(2)
url_template = 'http://junocloud.me/{id}'
login_url = 'http://junocloud.me/login.html'
account_url = 'http://junocloud.me/account.html'
def boot_account(account):
account.set_user_agent()
account.cookies["lang"] = "english"
if account.username is None:
return
data = {
'op': 'login',
'redirect': this.account_url,
'login': account.username,
'password': account.password,
'loginFormSubmit': 'Login',
}
resp = account.post(this.login_url, data=data)
if resp.url != this.account_url:
account.login_failed()
return
return resp
def on_initialize_account(account):
resp = boot_account(account)
if resp:
status = resp.soup.find('div', text=lambda a: 'Status:' in a if a else False).find_next('div').find('strong').text.strip()
if status != 'Premium':
account.premium = False
return
raise NotImplementedError('premium is not implemented')
def check_errors(ctx, resp):
if 'The origin web server timed out responding to this request.' in resp.text:
ctx.maintenance(180)
h1 = resp.soup.find('h1')
if h1:
if 'File Not Found' in h1.text or '404 Not Found' in h1.text:
ctx.set_offline()
def on_check_http(file, resp):
check_errors(file, resp)
name = resp.soup.find('input', attrs={'name': 'fname'}).get('value').strip()
size = resp.soup.find('p', 'request_filesize').text.strip().split(' ', 1)[1].strip()
file.set_infos(name=name, size=size)
def on_download_premium(chunk):
raise NotImplementedError('premium is untested')
def on_download_free(chunk):
resp = chunk.account.get(chunk.url, use_cache=True)
check_errors(chunk, resp)
resp = hoster.xfilesharing_download(resp, 1)[0]()
check_errors(chunk, resp)
m = re.search('You have to wait (.*?) till next download', resp.text)
if m:
wait = hoster.parse_seconds2(m.group(1)) + time.time()
if wait > 300:
chunk.ip_blocked(wait)
submit, data = hoster.xfilesharing_download(resp, 2)
wait = resp.soup.find('span', id='uglrto')
if wait:
wait = int(wait.text.strip().rplit(' ',
|
1)[1]) + time.time()
for result, challenge in chunk.solve_captcha('recaptcha', parse=resp.text, retries=5):
data['recaptcha_challenge_field'] = challenge
data['recaptcha_response_field'] = result
if wait and wait - time.time() > 0:
c
|
hunk.wait(wait - time.time())
resp = submit(allow_redirects=False)
if resp.status_code == 302:
return resp.headers['Location']
check_errors(chunk, resp)
|
matt-deboer/marathon-lb
|
zdd_exceptions.py
|
Python
|
apache-2.0
| 2,721
| 0
|
""" Exit Status 1 is already used in the script.
Zdd returns with exit status 1 when app is not force
deleted either through argument or through prompt.
Exit Status 2 is used for Unknown Exceptions.
"""
class InvalidArgException(Exception):
""" This exception indicates invalid combination of arguments
passed to zdd"""
def __init__(self, msg):
super(InvalidArgException, self).__init__(msg)
self.error = msg
self.zdd_exit_status = 3
class MissingFieldException(Exception):
""" This exception indicates required fields which are missing
in JSON payload passed to zdd"""
def __init__(self, msg, field):
super(MissingFieldException, self).__init__(msg)
self.error = msg
self.missing_field = field
self.zdd_exit_status = 4
class MarathonLbEndpointException(Exception):
""" This excaption indicates issue with one of the marathonlb
endpoints specified as argument to Zdd"""
def __init__(self, msg, url, error):
super(MarathonLbEndpointException, self).__init__(msg)
self.msg = msg
self.url = url
self.error = error
self.zdd_exit_status = 5
class Ma
|
rathonEndpointException(Exception):
""" This excaption indicates issue with marathon endpoint
specified as argument to Zdd"""
def __init__(self, msg, url, error):
super(MarathonEndpointException, s
|
elf).__init__(msg)
self.msg = msg
self.url = url
self.error = error
self.zdd_exit_status = 6
class AppCreateException(Exception):
""" This exception indicates there was a error while creating the
new App and hence it was not created."""
def __init__(self, msg, url, payload, error):
super(AppCreateException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.payload = payload
self.zdd_exit_status = 7
class AppDeleteException(Exception):
""" This exception indicates there was a error while deleting the
old App and hence it was not deleted """
def __init__(self, msg, url, appid, error):
super(AppDeleteException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.zdd_exit_status = 8
class AppScaleException(Exception):
""" This exception indicated there was a error while either scaling up
new app or while scaling down old app"""
def __init__(self, msg, url, payload, error):
super(AppScaleException, self).__init__(msg)
self.msg = msg
self.error = error
self.url = url
self.payload = payload
self.zdd_exit_status = 9
|
dadasoz/dj-translate
|
autotranslate/views.py
|
Python
|
mit
| 22,363
| 0.002862
|
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.utils.encoding import force_text
from django.contrib import messages
from microsofttranslator import Translator, TranslateApiException
from autotranslate.conf import settings as autotranslate_settings
from polib import pofile
from autotranslate.poutil import find_pos, pagination_range, timestamp_with_timezone
from autotranslate.signals import entry_changed, post_save
from autotranslate.storage import get_storage
from autotranslate.access import can_translate, can_translate_language
import json
import re
import autotranslate
import unicodedata
import hashlib
import os
import six
@never_cache
@user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
def _request_request(key, default=None):
if key in request.GET:
return request.GET.get(key)
elif key in request.POST:
return request.POST.get(key)
return default
storage = get_storage(request)
query = ''
if storage.has('autotranslate_i18n_fn'):
autotranslate_i18n_fn = storage.get('autotranslate_i18n_fn')
autotranslate_i18n_app = get_app_name(autotranslate_i18n_fn)
autotranslate_i18n_lang_code = storage.get('autotranslate_i18n_lang_code')
autotranslate_i18n_lang_bidi = autotranslate_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
autotranslate_i18n_write = storage.get('autotranslate_i18n_write', True)
if autotranslate_i18n_write:
autotranslate_i18n_pofile = pofile(autotranslate_i18n_fn, wrapwidth=autotranslate_settings.POFILE_WRAP_WIDTH)
for entry in autotranslate_i18n_pofile:
entry.md5hash = hashlib.md5(
(six.text_type(entry.msgid) +
six.text_type(entry.msgstr) +
six.text_type(entry.msgctxt or "")).encode('utf8')
).hexdigest()
else:
autotranslate_i18n_pofile = storage.get('autotranslate_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
storage.set('autotranslate_i18n_filter', filter_)
return HttpResponseRedirect(reverse('autotranslate-home'))
autotranslate_i18n_filter = storage.get('autotranslate_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = six.text_type(rx_plural.match(key).groups()[1])
# Above no longer true as of Polib 1.0.4
if plural_id and plural_id.isdigit():
plural_id = int(plural_id)
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = autotranslate_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
|
plural_string = fix_nls(entry.msgid_plural, value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in
|
entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=autotranslate_i18n_fn,
language_code=autotranslate_i18n_lang_code,
)
else:
storage.set('autotranslate_last_save_error', True)
if file_change and autotranslate_i18n_write:
try:
autotranslate_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (
getattr(request.user, 'first_name', 'Anonymous'),
getattr(request.user, 'last_name', 'User'),
getattr(request.user, 'email', 'anonymous@user.tld')
)).encode('ascii', 'ignore')
autotranslate_i18n_pofile.metadata['X-Translated-Using'] = u"dj-translate %s" % autotranslate.get_version(False)
autotranslate_i18n_pofile.metadata['PO-Revision-Date'] = timestamp_with_timezone()
except UnicodeDecodeError:
pass
try:
autotranslate_i18n_pofile.save()
po_filepath, ext = os.path.splitext(autotranslate_i18n_fn)
if autotranslate_settings.AUTO_COMPILE:
save_as_mo_filepath = po_filepath + '.mo'
autotranslate_i18n_pofile.save_as_mofile(save_as_mo_filepath)
post_save.send(sender=None, language_code=autotranslate_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if autotranslate_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSE
|
sjperkins/tensorflow
|
tensorflow/contrib/__init__.py
|
Python
|
apache-2.0
| 3,181
| 0
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.co
|
ntrib import nccl
from t
|
ensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
|
studiawan/pygraphc
|
pygraphc/abstraction/AbstractionUtility.py
|
Python
|
mit
| 2,712
| 0.001475
|
import json
class AbstractionUtility(object):
@staticmethod
def read_json(json_file):
# read json data
with open(json_file, 'r') as f:
data = json.load(f)
# change json key string to int
converted_data = {}
for key, value in data.iteritems():
converted_data[int(key)] = value
return converted_data
@staticmethod
def write_perabstraction(final_abstraction, log_file, perabstraction_file):
# read log file
with open(log_file, 'r') as f:
logs = f.readlines()
# write logs per abstraction to file
f_perabstraction = open(perabstraction_file, 'w')
for abstraction_id, abstraction in final_abstraction.iteritems():
f_perabstraction.write('Abstraction #' + str(abstraction_id) + ' ' + abstraction['abstraction'] + '\n')
for line_id in abstraction['original_id']:
f_perabstraction.write(str(line_id) + ' ' + logs[line_id])
f_perabstraction.write('\n')
f_perabstraction.close()
@staticmethod
def write_perline(final_abstraction, log_file, perline_file):
# read log file
with open(log_file, 'r') as f:
logs = f.readlines()
# get line id and abstraction id
abstraction_label = {}
for abstraction_id, abstraction in final_abstraction.iterit
|
ems():
for line_id in abstraction['original_id']:
abstraction_label[line_id] = abstraction_id
# write log per line with abstraction id
f_perline = open(perline_file, 'w')
for line_id, log in enumerate(logs):
f_perline.write(str(abstraction_label[line_id]) + '; ' + log)
f_perline.close()
@staticmethod
def get_abstractionid_from_groundtruth(logid_abstractionid_fi
|
le, abstractions):
# read ground truth
abstraction_groundtruth = AbstractionUtility.read_json(logid_abstractionid_file)
groundtruth_length = len(abstraction_groundtruth.keys())
abstractions_edited_id = {}
for abstraction_id, abstraction in abstractions.iteritems():
# if abstraction exist in ground truth, get id from dictionary key
if abstraction['abstraction'] in abstraction_groundtruth.values():
new_id = \
abstraction_groundtruth.keys()[abstraction_groundtruth.values().index(abstraction['abstraction'])]
# if not exist, new id is dictionary length + 1
else:
new_id = groundtruth_length
groundtruth_length += 1
abstractions_edited_id[new_id] = abstraction
return abstractions_edited_id
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/sanfrancisco/zone/SSS_travel_time_to_DDD.py
|
Python
|
gpl-2.0
| 1,871
| 0.017103
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from urbansim.abstract_variables.abstract_travel_time_variable_for_non_interaction_dataset import abstract_travel_time_variable_for_non_interaction_dataset
class SSS_travel_time_to_DDD(abstract_travel_time_variable_for_non_interaction_dataset):
"""Travel time by mode SSS to the zone whose ID is the DDD.
"""
default_value = 999
origin_zone_id = 'zone.zone_id'
def __init__(self, mode, number):
self.travel_data_attribute = "travel_data.%s" % mode
self.destination_zone_id = "destination_zone_id=%s+0*zone.zone_id" % number
abstract_travel_time_variable_for_non_interaction_dataset.__init__(self)
from opus_core.tests import opus_unittest
from numpy import array, arange
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def do(self,sss, ddd, should_be):
tester = VariableTester(
__file__,
package_order=['urbansim'],
test_data={
"zone":{
"zone_id":array([1,3])},
"travel_data":{
"from_zone_id":array([3,3,1,1]),
"to_zone_id":array([1,3,1,3]),
sss:array([1.1, 2.2, 3.3, 4.4])}
}
)
instance_name = "sanfrancisco.zone.%s_travel_time_to_%s" % (sss, ddd)
tester.test_is_close_for_family_variable(self, should_be, instance_name)
def test_to_1(self):
should_be = array([3.3, 1.1])
self.do('hwy', 1, should_be)
def test_to_3(self):
should_be = array([4.4, 2.2])
self.do('bart', 3, should_be)
i
|
f __name__=='__main__':
opus_uni
|
ttest.main()
|
llambeau/finitio.py
|
tests/type/set_type/test_low.py
|
Python
|
isc
| 619
| 0.001616
|
#!/usr/bin/env python
# -*- coding: ut
|
f-8 -*-
"""
test_equality
----------------------------------
Tests for the `SetType` low() method
"""
import unittest
from finitio.types import SetType, BuiltinType, Type
builtin_string = BuiltinType(str)
class TestSetTypeLow(unittest.TestCase):
class HighType(Type):
def low(self):
return builtin_string
subject = SetType(HighType(""))
def test_equals_itself(self):
expected = SetType(builtin_string)
|
self.assertEqual(self.subject.low(), expected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
strogo/djpcms
|
djpcms/models.py
|
Python
|
bsd-3-clause
| 354
| 0.019774
|
from djpcms import sites
|
if sites.settings.CMS_ORM == 'django':
from djpcms.core.cmsmodels._django import *
elif sites.sett
|
ings.CMS_ORM == 'stdnet':
from djpcms.core.cmsmodels._stdnet import *
else:
raise NotImplementedError('Objecr Relational Mapper {0} not available for CMS models'.format(sites.settings.CMS_ORM))
|
ahhz/raster
|
benchmarks/benchmark_3_layers_arcpy.py
|
Python
|
mit
| 478
| 0.004184
|
import time
import arcpy
from arcpy import env
from arcpy.sa import *
# Set environment settings
env.workspace = "" # set your workspace
arcpy.env.overwriteOutput = True
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
tic = time.clock()
a_file = "random_a.tif"
b
|
_fil
|
e = "random_b.tif"
c_file = "random_c.tif"
out_file = "output.tif"
a = Raster(a_file)
b = Raster(b_file)
c = Raster(c_file)
out = 3 * a + b * c
out.save(out_file)
|
emineKoc/WiseWit
|
wisewit_front_end/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/vs.py
|
Python
|
gpl-3.0
| 1,073
| 0
|
# -*- coding: utf-8 -*-
"""
pygments.styles.vs
~~~~~~~~~~~~~~~~~~
Simple style with MS Visual Studio colors.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Styl
|
e
from pygments.token import Keyword, Name, Comment, String, Error, \
Operator, Generic
class VisualStudioStyle(Style):
background_color = "#ffffff"
default_style = ""
styles
|
= {
Comment: "#008000",
Comment.Preproc: "#0000ff",
Keyword: "#0000ff",
Operator.Word: "#0000ff",
Keyword.Type: "#2b91af",
Name.Class: "#2b91af",
String: "#a31515",
Generic.Heading: "bold",
Generic.Subheading: "bold",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold",
Error: "border:#FF0000"
}
|
haizawa/odenos
|
src/test/python/org/o3project/odenos/core/component/network/flow/basic/test_flow_action.py
|
Python
|
apache-2.0
| 1,561
| 0.001281
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation.
|
#
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at
|
#
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.basic.flow_action\
import FlowAction
import unittest
class FlowActionTest(unittest.TestCase):
Type = "FlowActionOutput"
def setUp(self):
self.target = FlowAction(self.Type)
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.TYPE], self.Type)
def test_type(self):
self.assertEqual(self.target.type, self.Type)
if __name__ == '__main__':
unittest.main()
|
acsone/multi-company
|
purchase_sale_inter_company/models/purchase_order.py
|
Python
|
agpl-3.0
| 9,730
| 0.000103
|
# -*- coding: utf-8 -*-
# © 2013-Today Odoo SA
# © 2016 Chafique DELLI @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import api, models, _, fields
from openerp.exceptions import Warning as UserError
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
invoice_method = fields.Selection(
selection_add=[('intercompany', 'Based on intercompany invoice')])
@api.multi
def wkf_confirm_order(self):
""" Generate inter company sale order base on conditions."""
res = super(PurchaseOrder, self).wkf_confirm_order()
for purchase_order in self:
# get the company from partner then trigger action of
# intercompany relation
dest_company = self.env['res.company']._find_company_from_partner(
purchase_order.partner_id.id)
if dest_company:
purchase_order.sudo().\
with_context(force_company=dest_company.id).\
_inter_company_create_sale_order(dest_company.id)
return res
@api.multi
def _get_user_domain(self, dest_company):
self.ensure_one()
group_purchase_user = self.env.ref('purchase.group_purchase_user')
return [
('id', '!=', 1),
('company_id', '=', dest_company.id),
('id', 'in', group_purchase_user.users.ids),
]
@api.multi
def _check_intercompany_product(self, dest_company):
domain = self._get_user_domain(dest_company)
dest_user = self.env['res.users'].search(domain, limit=1)
if dest_user:
for purchase_line in self.order_line:
try:
purchase_line.product_id.sudo(dest_user).read(
['default_code'])
except:
raise UserError(_(
"You cannot create SO from PO because product '%s' "
"is not intercompany") % purchase_line.product_id.name)
@api.multi
def _inter_company_create_sale_order(self, dest_company_id):
""" Create a Sale Order from the current PO (self)
Note : In this method, should be call in sudo with the propert
destination company in the context
:param company : the company of the created PO
:rtype company : res.company record
"""
self.ensure_one()
dest_company = self.env['res.company'].browse(dest_company_id)
# check intercompany product
self._check_intercompany_product(dest_company)
# Accessing to selling partner with selling user, so data like
# property_account_position can be retrieved
company_partner = self.company_id.partner_id
# check pricelist currency should be same with PO/SO document
if self.pricelist_id.currency_id.id != (
company_partner.property_product_pricelist.currency_id.id):
raise UserError(_(
'You cannot create SO from PO because '
'sale price list currency is different from '
'purchase price list currency.'))
# create the SO and generate its lines from the PO lines
sale_order_data = self._prepare_sale_order_data(
self.name, company_partner, dest_company,
self.dest_address_id and self.dest_address_id.id or False)
sale_order = self.env['sale.order'].create(sale_order_data)
for purchase_line in self.order_line:
sale_line_data = self._prepare_sale_order_line_data(
purchase_line, dest_company, sale_order)
self.env['sale.order.line'].create(sale_line_data)
# write supplier reference field on PO
if not self.partner_ref:
self.partner_ref = sale_order.name
# write invoice method field on PO
if self.invoice_method != 'intercompany':
self.invoice_method = 'intercompany'
# Validation of sale order
if dest_company.sale_auto_validation:
sale_order.signal_workflow('order_confirm')
@api.multi
def _prepare_sale_order_data(self, name, partner, dest_company,
direct_delivery_address):
""" Generate the Sale Order values from the PO
:param name : the origin client reference
:rtype name : string
:param partner : the partner reprenseting the company
:rtype partner : res.partner record
:param company : the company of the created SO
:rtype company : res.company record
:param direct_delivery_address : the address of the SO
:rtype direct_delivery_address : res.partner record
"""
self.ensure_one()
partner_addr = partner.address_get(['default',
'invoice',
'delivery',
'contact'])
# find location and warehouse, pick warehouse from company object
warehouse = (
dest_company.warehouse_id and
dest_company.warehouse_id.company_id.id == dest_company.id and
dest_company.warehouse_id or False)
if not warehouse:
raise UserError(_(
'Configure correct warehouse for company (%s) in '
'Menu: Settings/companies/companies' % (dest_company.name)))
partner_shipping_id = (
self.picking_type_id.warehouse_id and
self.picking_type_id.warehouse_id.partner_id and
self.picking_type_id.warehouse_id.partner_id.id or False)
return {
'name': (
self.env['ir.sequence'].next_by_code('sale.order') or '/'
),
'company_id': dest_company.id,
'client_order_ref': name,
'partner_id': partner.id,
'warehouse_id': warehouse.id,
'pricelist_id': partner.property_product_pricelist.id,
'partner_invoice_id': partner_addr['invoice'],
'date_order': self.date_order,
'fiscal_position': (partner.property_account_position and
partner.property_account_position.id or False),
'user_id': False,
'auto_purchase_order_id': self.id,
'partner_shipping_id': (direct_delivery_address or
partner_shipping_id or
partner_addr['delivery']),
'note': self.notes
}
@api.model
def _prepare_sale_order_line_data(
self, purchase_line, dest_company, sale_order):
""" Generate the Sale Order Line values from the PO line
:param line : the origin Purchase Order Line
:rtype line : purchase.order.line record
:param company : the company of the created SO
:rtype company : res.company record
:param sale_order : the Sale Order
"""
context = self._context.copy()
context['company_id'] = dest_company.id
# get sale line data from product onchange
sale_line_obj = self.env['sale.order.line'].browse(False)
sale_line_data = sale_line_obj.with_context(
context).product_id_change_with_wh(
pricelist=sale_order.pricelist_id.id,
product=(purchase_line.product_id and
purchase_line.product_id.id or False),
qty=purchase_line.product_qty,
uom=(purchase_line.product_id and
purchase_line.product_id.uom_id.id or False),
qt
|
y_uos=0,
uos=False,
name='',
partner_id=sale_order.partner_id.id,
lang=False,
update_tax=True,
date_order=sale_order.date_order,
packaging=False,
fiscal_p
|
osition=sale_order.fiscal_position.id,
flag=False,
warehouse_id=sale_order.warehouse_id.id)
sale_line_data['value']['product_id']
|
tpainter/df_everywhere
|
df_everywhere/util/consoleInput.py
|
Python
|
gpl-2.0
| 1,794
| 0.007246
|
#from: http://stackoverflow.com/questions/10361820/simple-twisted-echo-client
#and
#from: http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
from twisted.internet.threads import deferToThread as _deferToThread
from twisted.internet import reactor
class ConsoleInput(object):
def __init__(self, stopF
|
unction, reconnectFunction):
self.stopFunction = stopFunction
self.reconnectFunction = reconnectF
|
unction
def start(self):
self.terminator = 'q'
self.restart = 'r'
self.getKey = _Getch()
self.startReceiving()
def startReceiving(self, s = ''):
if s == self.terminator:
self.stopFunction()
elif s == self.restart:
self.reconnectFunction()
_deferToThread(self.getKey).addCallback(self.startReceiving)
else:
_deferToThread(self.getKey).addCallback(self.startReceiving)
class _Getch:
"""
Gets a single character from standard input. Does not echo to the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
|
atiqueahmedziad/addons-server
|
src/olympia/bandwagon/tests/test_serializers.py
|
Python
|
bsd-3-clause
| 6,482
| 0
|
# -*- coding: utf-8 -*-
import mock
from rest_framework import serializers
from waffle.testutils import override_switch
from olympia.amo.tests import (
BaseTestCase, addon_factory, collection_factory, TestCase, user_factory)
from olympia.bandwagon.models import CollectionAddon
from olympia.bandwagon.serializers import (
CollectionAddonSerializer, CollectionAkismetSpamValidator,
CollectionSerializer, CollectionWithAddonsSerializer)
from olympia.lib.akismet.models import AkismetReport
class TestCollectionAkismetSpamValidator(TestCase):
def setUp(self):
self.validator = CollectionAkismetSpamValidator(
('name', 'description'))
serializer = mock.Mock()
serializer.instance = collection_factory(
name='name', description='Big Cheese')
request = mock.Mock()
request.user = user_factory()
request.META = {}
serializer.context = {'request': request}
self.validator.set_context(serializer)
self.data = {
'name': {'en-US': 'Collection', 'fr': u'Collection'},
'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'},
'random_data': {'en-US': 'to ignore'},
'slug': 'cheese'}
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_waffle_off(self, comment_check_mock):
self.validator(self.data)
# No Akismet checks
assert AkismetReport.objects.count() == 0
comment_check_mock.assert_not_called()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
assert comment_check_mock.call_count == 2
@override_switch('akism
|
et-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_spam(self, comment_check_mock):
comment_check_mock.return_value =
|
AkismetReport.MAYBE_SPAM
with self.assertRaises(serializers.ValidationError):
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
# After the first comment_check was spam, additinal ones are skipped.
assert comment_check_mock.call_count == 1
class TestCollectionSerializer(BaseTestCase):
serializer = CollectionSerializer
def setUp(self):
super(TestCollectionSerializer, self).setUp()
self.user = user_factory()
self.collection = collection_factory()
self.collection.update(author=self.user)
def serialize(self):
return self.serializer(self.collection).data
def test_basic(self):
data = self.serialize()
assert data['id'] == self.collection.id
assert data['uuid'] == self.collection.uuid
assert data['name'] == {'en-US': self.collection.name}
assert data['description'] == {'en-US': self.collection.description}
assert data['url'] == self.collection.get_abs_url()
assert data['addon_count'] == self.collection.addon_count
assert data['modified'] == (
self.collection.modified.replace(microsecond=0).isoformat() + 'Z')
assert data['author']['id'] == self.user.id
assert data['slug'] == self.collection.slug
assert data['public'] == self.collection.listed
assert data['default_locale'] == self.collection.default_locale
class TestCollectionAddonSerializer(BaseTestCase):
def setUp(self):
self.collection = collection_factory()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.item = CollectionAddon.objects.get(addon=self.addon,
collection=self.collection)
self.item.comments = u'Dis is nice'
self.item.save()
def serialize(self):
return CollectionAddonSerializer(self.item).data
def test_basic(self):
data = self.serialize()
assert data['addon']['id'] == self.collection.addons.all()[0].id
assert data['notes'] == {'en-US': self.item.comments}
class TestCollectionWithAddonsSerializer(TestCollectionSerializer):
serializer = CollectionWithAddonsSerializer
def setUp(self):
super(TestCollectionWithAddonsSerializer, self).setUp()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
def serialize(self):
mock_viewset = mock.MagicMock()
collection_addons = CollectionAddon.objects.filter(
addon=self.addon, collection=self.collection)
mock_viewset.get_addons_queryset.return_value = collection_addons
return self.serializer(
self.collection, context={'view': mock_viewset}).data
def test_basic(self):
super(TestCollectionWithAddonsSerializer, self).test_basic()
collection_addon = CollectionAddon.objects.get(
addon=self.addon, collection=self.collection)
data = self.serialize()
assert data['addons'] == [
CollectionAddonSerializer(collection_addon).data
]
assert data['addons'][0]['addon']['id'] == self.addon.id
|
MachineandMagic/django-avatar
|
tests/urls.py
|
Python
|
bsd-3-clause
| 108
| 0
|
from django.conf
|
.urls import include, url
urlpatterns = [
url(
|
r'^avatar/', include('avatar.urls')),
]
|
electronic-structure/sirius
|
python_module/setup.py
|
Python
|
bsd-2-clause
| 499
| 0.002004
|
import setuptools
setuptools.setup(
name="sirius",
version="0.
|
5",
author="",
author_email="simon.pintarelli@cscs.ch",
description="pySIRIUS",
url="https://github.com/electronic_structure/SIRIUS",
packages=['sirius'],
install_requires=['mpi4py', 'voluptuous', 'numpy', 'h5py', 'scipy', 'PyYAML'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| |
nkoech/csacompendium
|
csacompendium/locations/api/precipitation/precipitationviews.py
|
Python
|
mit
| 2,016
| 0.002976
|
from csacompendium.locations.models import Precipitation
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PrecipitationListFilter
from csacompendium.locations.api.precipitation.precipitationserializers import precipitation_serializers
def precipitation_views():
"""
Precipitation views
:return: All precipitation views
:rtype: Object
"""
precipitation_serializer = precipitation_serializers()
class PrecipitationCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationDetailSerializer']
permission_classes = [IsAuthenticated]
class PrecipitationListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PrecipitationListFilter
pagination_class = APILimitOffsetPagination
class PrecipitationDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = Precipitation.objects.all()
serializer_class = precipitation_serializer['PrecipitationDetailSerializer']
permission_classes = [IsAuthenticated, IsAd
|
minUser]
lookup_field
|
= 'pk'
return {
'PrecipitationListAPIView': PrecipitationListAPIView,
'PrecipitationDetailAPIView': PrecipitationDetailAPIView,
'PrecipitationCreateAPIView': PrecipitationCreateAPIView
}
|
JohnUrban/poreminion
|
poreminion/poreminion_main.py
|
Python
|
mit
| 59,570
| 0.012641
|
#!/usr/bin/env python
import os.path
import sys
import argparse
from poretools.Fast5File import *
#logger
import logging
logger = logging.getLogger('poreminion')
# poreminion imports
import poreminion.version
def run_subtool(parser, args):
if args.command == 'uncalled':
import findUncalled as submodule
elif args.command == 'timetest':
import findTimeErrors as submodule
elif args.command == 'fragstats':
import fragstats as submodule
elif args.command == 'fragsummary':
import fragsummary as submodule
elif args.command == 'fragrobust':
import robust as submodule
elif args.command == 'nx':
import nX as submodule
elif args.command == 'pct2d':
import pct2D as submodule
elif args.command == 'has2d':
import has2D as submodule
elif args.command == 'numevents':
import numevents as submodule
elif args.command == 'events':
import get_events as submodule
elif args.command == 'staypos':
import staypos as submodule
elif args.command == 'info':
import info as submodule
elif args.command == 'g4' or args.command == 'regex':
import quadparsersuite as submodule
elif args.command == 'seqlen':
import seqlen as submodule
elif args.command == 'dataconc':
import dataconc as submodule
elif args.command == 'qualpos':
import qual_v_pos as submodule
elif args.command == 'kmer':
import kmer as submodule
elif args.command == 'kmerplot':
import kmerplot as submodule
elif args.command == 'kmerdiff':
import kmerdiff as submodule
## elif args.command == 'align':
## import align as submodule
elif args.command == 'winner':
import winner as submodule
elif args.command == 'qualdist':
import qualdist as submodule
# run the chosen submodule.
submodule.run(parser, args)
class ArgumentParserWithDefaults(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(ArgumentParserWithDefaults, self).__init__(*args, **kwargs)
self.add_argument("-q", "--quiet", help="Do not output warnings to stderr",
action="store_true",
dest="quiet")
def main():
logging.basicConfig()
#########################################
# create the top-level parser
#########################################
parser = argparse.ArgumentParser(prog='poreminion', description=""" Poreminion - additional tools for analyzing nanopore sequencing data.""", formatter_class=argparse.RawTextHelpFormatter)#ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed poreminion version",
action="version",
version="%(prog)s " + str(poreminion.version.__version__))
subparsers = parser.add_subparsers(title='[sub-commands]', dest='command', parser_class=ArgumentParserWithDefaults)
#########################################
# create the individual tool parsers
#########################################
##########
# find uncalled (not basecalled) files
##########
parser_uncalled = subparsers.add_parser('uncalled',
help='Find Fast5 files that were not base-called.')
parser_uncalled.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_uncalled.add_argument('--outprefix', "-o",
type=str, required=True,
help='Uses this as basename for the following output files: (1) list of files not basecalled because template events not found, (2) list of files not basecalled because too few events found, (3) list of files not basecalled because too many events found. (4) event stats on each.')
parser_uncalled.add_argument('--move', "-m",
action='store_true', default=False,
help='''If specified, will move each non-basecalled file type to an approp labeled dir
inside same dir that has the dir reads with reads in it (e.g. downloads --> pass,
downloads --> fail, downloads --> "notemplate", etc).
Still writes out stats file.''')
parser_uncalled.set_defaults(func=run_subtool)
##########
# findTime
|
Errors
##########
parser_timetest = subparsers.add_parser('timetest',
help='Find Fast5 files that have event times that are earlier than event times before it suggesting malfunction/erroneous read.')
pars
|
er_timetest.add_argument('files', metavar='FILES', nargs='+',
help='The input FAST5 files.')
parser_timetest.add_argument('--outprefix', "-o",
type=str, default=False,
help='Uses this as basename for file containing list of files with time errors.')
parser_timetest.add_argument('--move', "-m",
action='store_true', default=False,
help='''If specified, will move files with time error dir labeled time_errors
inside same dir that has the dir with reads in it (e.g. downloads --> pass,
downloads --> fail, downloads --> "time_errors", etc).
Still writes out list file above.''')
parser_timetest.add_argument('--verbose', "-v",
action='store_true', default=False,
help='''Will print to stderr info about how far along it is in process.''')
parser_timetest.set_defaults(func=run_subtool)
##########
# fragstats
##########
parser_fragstats = subparsers.add_parser('fragstats',
help='''Run this on set of base-called fast5 files.
Returns tab-delimited table with columns:
1 = readname,
2 = estimated molecule/fragment size,
3 = number input events,
4 = if complement detected,
5 = if 2D detected,
6 = num template events,
7 = num complement events,
8 = length of 2D sequence,
9 = length of template sequence,
10 = length of complement sequence,
11 = mean qscore of 2D sequence,
12 = mean qscore of template sequence,
13 = mean qscore of complement,
14 = ratio of number template events to number complement events,
15 = channel number molecule traversed
16 = heat sink temperature while molecule traversed
17 = num called template events (after events pruned during base-calling)
18 = num called complement events (after events pruned during base-calling)
19 = num skips in template (is actually number 0 moves found in extensive analysis)
20 = num skips in complement (is actually number 0 moves found in extensive analysis)
21 = num stays in template (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here)
22 = num stays in complement (is actually number 2 moves found in extensive analysis, any 3,4,5 moves not counted here)
23 = strand score template
24 = strand score complement
25 = num stutters in template
26 = num stutters in complement
If --extensive used:
27 = starttime,
28 = endtime,
29 = slope across all events,
30 = mean duration across all events,
31 = median duration across all events,
32 = sd of all event durations,
33 = min event duration,
34 = max event duration,
35-40 = num temp events with 0,1,2,3,4,5 moves from base-caller,
41-46 = num comp events with 0,1,2,3,4,5 moves from base caller.
If -g4/--quadruplex used:
Final+1 = number of G4 motifs in 2D read: '([gG]{3,}\w{1,7}){3,}[gG]{3,}'
Final+2 = number of G4 motifs in template read
Final+3 = number of G4 motifs in complement read
Final+4 = number of G4 complement motifs in 2D reads: '([cC]{3,}\w{1,7}){3,}[cC]{3,}'
Final+5 = number of G4 complement motifs in template read (i.e. inferred complement strand count given template read)
Final+6 = number of G4 complement motifs in complement read (i.
|
zhangg/trove
|
trove/guestagent/strategies/restore/experimental/couchbase_impl.py
|
Python
|
apache-2.0
| 9,593
| 0
|
# Copyright (c) 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
import os.path
import time
from oslo_log import log as logging
from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent.strategies.restore import base
LOG = logging.getLogger(__name__)
class CbBackup(base.RestoreRunner):
"""
Implementation of Restore Strategy for Couchbase.
"""
__strategy_name__ = 'cbbackup'
base_restore_cmd = 'sudo tar xpPf -'
def __init__(self, *args, **kwargs):
super(CbBackup, self).__init__(*args, **kwargs)
def pre_restore(self):
try:
operating_system.remove(system.COUCHBASE_DUMP_DIR, force=True)
except exception.ProcessExecutionError:
LOG.exception(_("Error during pre-restore phase."))
raise
def post_restore(self):
try:
# Root enabled for the backup
pwd_file = system.COUCHBASE_DUMP_DIR + system.SECRET_KEY
if os.path.exists(pwd_file):
with open(pwd_file, "r") as f:
pw = f.read().rstrip("\n")
root = service.CouchbaseRootAccess()
root.set_password(pw)
# Get current root password
root = service.CouchbaseRootAccess()
root_pwd = root.get_password()
# Iterate through each bucket config
buckets_json = system.COUCHBASE_DUMP_DIR + system.BUCKETS_JSON
with open(buckets_json, "r") as f:
out = f.read()
if out == "[]":
# No buckets or data to restore. Done.
return
d = json.loads(out)
for i in range(len(d)):
bucket_name = d[i]["name"]
bucket_type = d[i]["bucketType"]
if bucket_type == "membase":
bucket_type = "couchbase"
ram = int(utils.to_mb(d[i]["quota"]["ram"]))
auth_type = d[i]["authType"]
password = d[i]["saslPassword"]
port = d[i]["proxyPort"]
replica_number = d[i]["replicaNumber"]
replica_index = 1 if d[i]["replicaIndex"] else 0
threads = d[i]["threadsNumber"]
flush = 1 if "flush" in d[i]["controllers"] else 0
# cbrestore requires you to manually create dest buckets
create_bucket_cmd = ('curl -X POST -u root:' + root_pwd +
' -d name="' +
bucket_name + '"' +
' -d bucketType="' +
bucket_type + '"' +
' -d ramQuotaMB="' +
str(ram) + '"' +
' -d authType="' +
auth_type + '"' +
' -d saslPassword="' +
password + '"' +
' -d proxyPort="' +
str(port) + '"' +
' -d replicaNumber="' +
|
str(replica_number) + '"' +
' -d replicaIndex="' +
str(replica_index) + '"' +
' -d threadsNumber="' +
str(threads) + '"' +
' -d flushEnabled="' +
|
str(flush) + '" ' +
system.COUCHBASE_REST_API +
'/pools/default/buckets')
utils.execute_with_timeout(create_bucket_cmd,
shell=True, timeout=300)
if bucket_type == "memcached":
continue
# Wait for couchbase (membase) bucket creation to complete
# (follows same logic as --wait for couchbase-cli)
timeout_in_seconds = 120
start = time.time()
bucket_exist = False
while ((time.time() - start) <= timeout_in_seconds and
not bucket_exist):
url = (system.COUCHBASE_REST_API +
'/pools/default/buckets/')
outfile = system.COUCHBASE_DUMP_DIR + '/buckets.all'
utils.execute_with_timeout('curl -u root:' + root_pwd +
' ' + url + ' > ' + outfile,
shell=True, timeout=300)
with open(outfile, "r") as file:
out = file.read()
buckets = json.loads(out)
for bucket in buckets:
if bucket["name"] == bucket_name:
bucket_exist = True
break
if not bucket_exist:
time.sleep(2)
if not bucket_exist:
raise base.RestoreError("Failed to create bucket '%s' "
"within %s seconds"
% (bucket_name,
timeout_in_seconds))
# Query status
# (follows same logic as --wait for couchbase-cli)
healthy = False
while ((time.time() - start) <= timeout_in_seconds):
url = (system.COUCHBASE_REST_API +
'/pools/default/buckets/' +
bucket_name)
outfile = system.COUCHBASE_DUMP_DIR + '/' + bucket_name
utils.execute_with_timeout('curl -u root:' + root_pwd +
' ' + url + ' > ' + outfile,
shell=True, timeout=300)
all_node_ready = True
with open(outfile, "r") as file:
out = file.read()
bucket = json.loads(out)
for node in bucket["nodes"]:
if node["status"] != "healthy":
all_node_ready = False
break
if not all_node_ready:
time.sleep(2)
else:
healthy = True
break
if not healthy:
raise base.RestoreError("Bucket '%s' is created but "
"not ready to use within %s "
"seconds"
|
michaelpacer/scikit-image
|
skimage/feature/_hog.py
|
Python
|
bsd-3-clause
| 7,022
| 0.000142
|
from __future__ import division
import numpy as np
from .._shared.utils import assert_nD
from . import _hoghistogram
def hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), visualise=False, normalise=False):
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalisation
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalising across blocks
5. flattening into a feature vector
Parameters
----------
image : (M, N) ndarray
Input image (greyscale).
orientations : int
Number of orientation bins.
pixels_per_cell : 2 tuple (int, int)
Size (in pixels) of a cell.
cells_per_block : 2 tuple (int,int)
Number of cells in each block.
visualise : bool, optional
Also return an image of the HOG.
normalise : bool, optional
Apply power law compression to normalise the image before
processing.
Returns
-------
newarr : ndarray
HOG for the image as a 1D (flattened) array.
hog_image : ndarray (if visualise=True)
A visualisation of the HOG image.
References
----------
* http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
* Dalal, N and Triggs, B, Histograms of Oriented Gradients for
Human Detection, IEEE Computer Society Conference on Computer
Vision and Pattern Recognition 2005 San Diego, CA, USA
"""
image = np.atleast_2d(image)
"""
The first stage applies an optional global image normalisation
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each colour channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
"""
assert_nD(image, 2)
if normalise:
image = np.sqrt(image)
"""
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
colour channel is used, which provides colour invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
"""
if image.dtype.kind == 'u':
# convert uint image to float
# to avoid problems with subtracting unsigned numbers in np.diff()
image = image.astype('float')
gx = np.empty(image.shape, dtype=np.double)
gx[:, 0] = 0
gx[:, -1] = 0
gx[:, 1:-1] = image[:, 2:] - image[:, :-2]
gy = np.empty(image.shape, dtype=np.double)
gy[0, :] = 0
gy[-1, :] = 0
gy[1:-1, :] = image[2:, :] - image[:-2, :]
"""
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [Lowe 2004]
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
"""
sy, sx = image.shape
cx, cy = pixels_per_cell
bx, by = cells_per_block
n_cellsx = int(np.floor(sx // cx)) # number of cells in x
n_cellsy = int(np.floor(sy // cy)) # number of cells in y
# compute orientations integral images
orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
_hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy,
orientations, orientation_histogram)
# now for each cell, compute the histogram
hog_image = None
if visualise:
from .. import draw
radius = min(cx, cy) // 2 - 1
orientations_arr = np.arange(orientations)
dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)
dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)
cr2 = cy + cy
cc2 = cx + cx
hog_image = np.zeros((sy, sx), dtype=float)
for x in range(n_cellsx):
for y in range(n_cellsy):
for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):
centre = tuple([y * cr2 // 2, x * cc2 // 2])
rr, cc = draw.line(int(centre[0] - dx),
int(centre[1] + dy),
int(centre[0] + dx),
int(centre[1] - dy))
hog_image[rr, cc] += orientation_histogram[y, x, o]
"""
The fourth stage computes normalisation, which takes local groups of
cells and contrast normalises their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalise each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalisations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalisations. This may seem redundant but it improves the performance.
We refer to the normalised block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
"""
n_blocksx = (n_cellsx - bx) + 1
n_blocksy = (n_cellsy - by) + 1
normalised_blocks = np.zeros((n_blocksy, n_blocksx,
by, bx, orientations))
for x in range(n_blocksx):
for y in range(n_blocksy):
block = orientation_histogram[y:y + by, x:x + bx, :]
eps = 1e-5
normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2
|
+ eps)
"""
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
|
"""
if visualise:
return normalised_blocks.ravel(), hog_image
else:
return normalised_blocks.ravel()
|
olivierlemasle/murano
|
murano/common/i18n.py
|
Python
|
apache-2.0
| 1,149
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# u
|
nder the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='murano')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
|
makinacorpus/ionyweb
|
ionyweb/website/models.py
|
Python
|
bsd-3-clause
| 9,120
| 0.004715
|
# -*- coding: utf-8 -*-
" WebSite models "
import os
import shutil
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.db import connection
from django.db import models
from django.db.utils import IntegrityError
from django.utils.translation import ugettext_lazy as _
from django.template.loader import get_template
from ionyweb.file_manager.models import FileManager, Directory
# Sites
clas
|
s WebSite(models.Model):
''' WebSite
New contract of WebSite.
Everything is linked to an instance of this model.
(Pages, Files, ...)
'''
slug = models.SlugField(_(u"url"),
max_length=100,
unique=True)
title = models.CharField(_(u"title"),
max_length=50)
logo = models.ImageField(_(u"Logo
|
"),
upload_to='media_root',
# TEMP -> pbs with PIL...
blank=True)
ndds = models.ManyToManyField(Site,
related_name="website")
owners = models.ManyToManyField(User,
through='WebSiteOwner')
domain = models.ForeignKey(Site,
related_name="website_set",
unique=True,
on_delete=models.PROTECT,
help_text=_(u"Represents the main domain of the "
"website."))
analytics_key = models.CharField(_("Analytics key"), max_length=20, blank=True, null=True,
#regex=r'UA-[0-9]{7}-[0-9]{1}',
help_text=u'e.g. "UA-2456069-3"')
main_menu_levels = models.PositiveIntegerField(_("Main menu levels"), default=1)
meta_keywords = models.CharField(_(u"META Keywords"),
max_length="255", blank=True)
meta_description = models.TextField(_(u"META Description"), blank=True)
theme = models.CharField(_(u'Theme slug'),
max_length=100)
default_template = models.CharField(_(u'Default template'),
max_length=100, blank=True)
default_layout = models.CharField(_(u'Default layout'),
max_length=100)
# Warning, please use directory() to access the Files Library object
files_library = models.ForeignKey(FileManager,
related_name="website",
blank=True,
null=True,
help_text=_(u"Files Library"))
in_maintenance = models.BooleanField(_(u'Maintenance mode'), default=False, blank=True)
class Meta:
verbose_name = _(u"website")
verbose_name_plural = _(u'websites')
def __unicode__(self):
return u'%s' % self.title
def delete(self, *args, **kwargs):
""" Delete this domain names linked to it and the files """
for ndd in self.ndds.all():
if ndd != self.domain:
ndd.delete()
save_ndd = self.domain
#shutil.rmtree(self.media_root())
super(WebSite, self).delete(*args, **kwargs)
# The domain name is protected until the website is deleted successfully
save_ndd.delete()
def get_theme(self):
if len(self.theme.split('/')) <= 1:
return "%s/default" % self.theme
return self.theme
def file_manager(self):
if self.files_library:
return self.files_library
else:
# Create root directory
root = Directory.objects.create(name=self.slug)
self.files_library = FileManager.objects.create(root=root)
self.save()
try:
os.makedirs(self.media_root())
except OSError:
pass
# Create
try:
os.makedirs(os.path.join(self.media_root(), 'storage'))
except OSError:
pass
return self.files_library
def media_root(self):
"Get the filemanager site root"
return os.path.join('websites', self.slug, 'storage')
def get_size(self):
"Give the size used for quota in bytes"
return folder_size(self.media_root())
def get_screenshot(self):
"Return the url of the screenshot or None for the default image"
return None
def get_absolute_url(self):
if getattr(settings, 'SERVER_PORT', 80) != 80:
return u'http://%s:%d' % (self.domain.domain,
settings.SERVER_PORT)
else:
return u'http://%s' % self.domain.domain
def get_medias(self):
# medias_list = []
# # Add css file of the template
# medias_list.append(
# u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % (
# self.domain.domain, self.skin.template.css_file ))
# # Add css file of the skin
# medias_list.append(
# u'<link href="http://%s%s" type="text/css" media="all" rel="stylesheet" />' % (
# self.domain.domain, self.skin.css_file ))
# return u"\n".join(medias_list)
return ""
medias = property(get_medias)
def _get_layout(self, layout_name=None):
if layout_name is not None:
return 'layouts/%s' % layout_name
else:
return ''
def get_default_layout(self):
return self._get_layout(self.default_layout)
layout = property(get_default_layout)
# def get_header_layout(self):
# return self._get_layout(self.header_layout)
# def get_footer_layout(self):
# return self._get_layout(self.footer_layout)
# def render_header(self, request):
# """
# Returns the header rendering of website.
# """
# return render_plugins_header_or_footer(
# request,
# plugins_list=self.header_plugins.order_by('plugin_order'),
# layout=self.get_header_layout())
# def render_footer(self, request):
# """
# Returns the footer rendering of website.
# """
# return render_plugins_header_or_footer(
# request,
# plugins_list=self.footer_plugins.order_by('plugin_order'),
# layout=self.get_footer_layout())
def get_url_home_page(self):
return u'/'
class WebSiteOwner(models.Model):
website = models.ForeignKey(WebSite, related_name='websites_owned')
user = models.ForeignKey(User, related_name='websites_owned')
is_superuser = models.BooleanField(_('superuser status'),
default=False,
help_text=_("Designates that this user "
"has all permissions without "
"explicitly assigning them."))
def __unicode__(self):
return u'%s owns %d (%s)' % (self.user, self.website.id, self.is_superuser)
def delete(self, *args, **kwargs):
number_of_owners = self.website.websites_owned.filter(is_superuser=True).count()
if number_of_owners <= 1 and self.is_superuser:
raise IntegrityError('This user is the only superuser of this website')
else:
super(WebSiteOwner, self).delete(*args, **kwargs)
# SIGNALS
def catch_wrong_deletion_of_user(sender, instance, **kwargs):
''' Verify that if we delete the website owner, it will still have
no orphans websites
'''
cursor = connection.cursor()
cursor.execute("""
SELECT ws.title, COUNT(*) as owners FROM website_website ws
INNER JOIN website_websiteowner wso
ON ws.id = wso.website_id
AND wso.is_superuser = TRUE
AND ws.id IN (SELECT website_id
|
clebergnu/avocado-vt
|
virttest/virsh.py
|
Python
|
gpl-2.0
| 152,004
| 0.000329
|
"""
Utility classes and functions to handle connection to a libvirt host system
The entire contents of callables in this module (minus the names defined in
NOCLOSE below), will become methods of the Virsh and VirshPersiste
|
nt classes.
A Closure class is used to wrap the module functions, lambda does not
properly store instance state in this implementation.
Because none of the methods have a 'self' parameter defined, the classes
are defined to be dict-like, and get passed in to the methods as a the
special ``**dargs`` parameter. All v
|
irsh module functions _MUST_ include a
special ``**dargs`` (variable keyword arguments) to accept non-default
keyword arguments.
The standard set of keyword arguments to all functions/modules is declared
in the VirshBase class. Only the 'virsh_exec' key is guaranteed to always
be present, the remainder may or may not be provided. Therefor, virsh
functions/methods should use the dict.get() method to retrieve with a default
for non-existant keys.
:copyright: 2012 Red Hat Inc.
"""
import signal
import logging
import re
import weakref
import time
import select
import locale
import base64
import aexpect
from avocado.utils import path
from avocado.utils import process
from six.moves import urllib
from virttest import propcan
from virttest import remote
from virttest import utils_misc
# list of symbol names NOT to wrap as Virsh class methods
# Everything else from globals() will become a method of Virsh class
NOCLOSE = list(globals().keys()) + [
'NOCLOSE', 'SCREENSHOT_ERROR_COUNT', 'VIRSH_COMMAND_CACHE',
'VIRSH_EXEC', 'VirshBase', 'VirshClosure', 'VirshSession', 'Virsh',
'VirshPersistent', 'VirshConnectBack', 'VIRSH_COMMAND_GROUP_CACHE',
'VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL',
]
# Needs to be in-scope for Virsh* class screenshot method and module function
SCREENSHOT_ERROR_COUNT = 0
# Cache of virsh commands, used by help_command_group() and help_command_only()
# TODO: Make the cache into a class attribute on VirshBase class.
VIRSH_COMMAND_CACHE = None
VIRSH_COMMAND_GROUP_CACHE = None
VIRSH_COMMAND_GROUP_CACHE_NO_DETAIL = False
# This is used both inside and outside classes
try:
VIRSH_EXEC = path.find_command("virsh")
except path.CmdNotFoundError:
logging.warning("Virsh executable not set or found on path, "
"virsh module will not function normally")
VIRSH_EXEC = '/bin/true'
class VirshBase(propcan.PropCanBase):
"""
Base Class storing libvirt Connection & state to a host
"""
__slots__ = ('uri', 'ignore_status', 'debug', 'virsh_exec', 'readonly')
def __init__(self, *args, **dargs):
"""
Initialize instance with virsh_exec always set to something
"""
init_dict = dict(*args, **dargs)
init_dict['virsh_exec'] = init_dict.get('virsh_exec', VIRSH_EXEC)
init_dict['uri'] = init_dict.get('uri', None)
init_dict['debug'] = init_dict.get('debug', False)
init_dict['ignore_status'] = init_dict.get('ignore_status', False)
init_dict['readonly'] = init_dict.get('readonly', False)
super(VirshBase, self).__init__(init_dict)
def get_uri(self):
"""
Accessor method for 'uri' property that must exist
"""
# self.get() would call get_uri() recursivly
try:
return self.__dict_get__('uri')
except KeyError:
return None
class VirshSession(aexpect.ShellSession):
"""
A virsh shell session, used with Virsh instances.
"""
# No way to get virsh sub-command "exit" status
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['error:\s*.+$', '.*failed.*']
def __init__(self, virsh_exec=None, uri=None, a_id=None,
prompt=r"virsh\s*[\#\>]\s*", remote_ip=None,
remote_user=None, remote_pwd=None,
ssh_remote_auth=False, readonly=False,
unprivileged_user=None,
auto_close=False, check_libvirtd=True):
"""
Initialize virsh session server, or client if id set.
:param virsh_exec: path to virsh executable
:param uri: uri of libvirt instance to connect to
:param id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param prompt: Regular expression describing the shell's prompt line.
:param remote_ip: Hostname/IP of remote system to ssh into (if any)
:param remote_user: Username to ssh in as (if any)
:param remote_pwd: Password to use, or None for host/pubkey
:param auto_close: Param to init ShellSession.
:param ssh_remote_auth: ssh to remote first.(VirshConnectBack).
Then execute virsh commands.
Because the VirshSession is designed for class VirshPersistent, so
the default value of auto_close is False, and we manage the reference
to VirshSession in VirshPersistent manually with counter_increase and
counter_decrease. If you really want to use it directly over VirshPe-
rsistent, please init it with auto_close=True, then the session will
be closed in __del__.
* session = VirshSession(virsh.VIRSH_EXEC, auto_close=True)
"""
self.uri = uri
self.remote_ip = remote_ip
self.remote_user = remote_user
self.remote_pwd = remote_pwd
# Special handling if setting up a remote session
if ssh_remote_auth: # remote to remote
if remote_pwd:
pref_auth = "-o PreferredAuthentications=password"
else:
pref_auth = "-o PreferredAuthentications=hostbased,publickey"
# ssh_cmd is not None flags this as remote session
ssh_cmd = ("ssh -o UserKnownHostsFile=/dev/null %s -p %s %s@%s"
% (pref_auth, 22, self.remote_user, self.remote_ip))
if uri:
self.virsh_exec = ("%s \"%s -c '%s'\""
% (ssh_cmd, virsh_exec, self.uri))
else:
self.virsh_exec = ("%s \"%s\"" % (ssh_cmd, virsh_exec))
else: # setting up a local session or re-using a session
self.virsh_exec = virsh_exec
if self.uri:
self.virsh_exec += " -c '%s'" % self.uri
ssh_cmd = None # flags not-remote session
if readonly:
self.virsh_exec += " -r"
if unprivileged_user:
self.virsh_exec = "su - %s -c '%s'" % (unprivileged_user,
self.virsh_exec)
# aexpect tries to auto close session because no clients connected yet
aexpect.ShellSession.__init__(self, self.virsh_exec, a_id,
prompt=prompt, auto_close=auto_close)
# Handle remote session prompts:
# 1.remote to remote with ssh
# 2.local to remote with "virsh -c uri"
if ssh_remote_auth or self.uri:
# Handle ssh / password prompts
remote.handle_prompts(self, self.remote_user, self.remote_pwd,
prompt, debug=True)
# fail if libvirtd is not running
if check_libvirtd:
if self.cmd_status('list', timeout=60) != 0:
logging.debug("Persistent virsh session is not responding, "
"libvirtd may be dead.")
self.auto_close = True
raise aexpect.ShellStatusError(virsh_exec, 'list')
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None, safe=False):
"""
Send a virsh command and return its exit status and output.
:param cmd: virsh command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string paramete
|
frossigneux/blazar
|
climate/tests/db/test_utils.py
|
Python
|
apache-2.0
| 741
| 0
|
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.ro
|
ssigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
|
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from climate import tests
class DBUtilsTestCase(tests.TestCase):
"""Test case for DB Utils."""
pass
|
Bolt64/my_code
|
Code Snippets/domain_coloring.py
|
Python
|
mit
| 3,030
| 0.022112
|
#!/home/bolt/.python_compiled/bin/python3
import math
from PIL import Image
def complex_wrapper(func, scale_factor=1):
"""
Modifies a complex function that takes a complex
argument and returns a complex number to take a
tuple and return a tuple.
"""
def inner(real, imag):
complex_num=complex(real, imag)
return_value=func(complex_num/scale_factor)
return return_value.real, return_value.imag
return inner
def decorate_atan(func):
"""
A decorator to modify the range of atan from -pi to pi to 0 to 2*pi
"""
def inner(y, x):
return_val=func(y, x)
if return_val>=0:
return return_val
else:
return 2*math.pi+return_val
return inner
atan=decorate_atan(math.atan2)
def assign_color_shade(position):
"""
This function assigns a unique color shade to each angle in [0, 2*pi)
"""
x,y=position
if (x,y)==(0,0):
return (255, 255, 255)
angle=atan(y,x)
mod_angle=angle%(2/3*math.pi)
mixing=mod_angle/(2/3*math.pi)*255
if angle<=2/3*math.pi:
return (255-mixing, mixing, 0)
elif 2/3*math.pi<angle<=4/3*math.pi:
return (0, 255-mixing, mixing)
else:
return (mixing, 0, 255-mixing)
def color_intensity(position, radius, gradient):
"""
This function assigns an intensity based on the radial distance and the gra
|
dient
"""
x,y=position
shade_tuple=assign_color_shade(position)
if x**2+y**2<radius**2:
r,b,g=shade_tuple
ratio=((x**2+y**2)/(radius**2))**gradient
r_new,b_new,g_new=255-ratio*(255-r),255-ratio*(255-b),255-ratio*(255-g)
return r_new,b_new,g_new
else:
ratio=((radius**2)/(x**2+y**2))**gradient
r,b,g=shade_tuple
return r*ratio,b*ratio,g*ratio
def colorize_point(position, radius, gradient=1):
"""
This function combines the last
|
2 functions and returns the shade of each point
"""
r,b,g=color_intensity(position, radius, gradient)
return round(r), round(b), round(g)
def generate_plane_image(x_size, y_size, radius, gradient):
"""
This function generates the domain plane
"""
image=Image.new('RGB', (x_size, y_size))
x_c,y_c=x_size//2, y_size//2
for x in range(x_size):
for y in range(y_size):
image.putpixel((x,y), colorize_point((x-x_c, y-y_c), radius, gradient))
return image
def map_function(plane_image, func, radius, gradient):
"""
This function maps the function on the domain plane
"""
image=Image.new('RGB', plane_image.size)
x_size,y_size=plane_image.size
x_c,y_c=x_size//2, y_size//2
for x in range(x_size):
for y in range(y_size):
x_new,y_new=func(x-x_c, y-y_c)
try:
new_colors=plane_image.getpixel((x_new+x_c, y_new+y_c))
except IndexError:
new_colors=colorize_point((x_new, y_new), radius, gradient)
image.putpixel((x,y), new_colors)
return image
|
sanyaade-mobiledev/clusto
|
src/clusto/commands/console.py
|
Python
|
bsd-3-clause
| 2,107
| 0.003322
|
#!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencodi
|
ng=utf-8
#
# Shell command
# Copyright 2010, Jeremy Grosser <synack@digg.com>
i
|
mport argparse
import os
import sys
import clusto
from clusto import script_helper
class Console(script_helper.Script):
'''
Use clusto's hardware port mappings to console to a remote server
using the serial console.
'''
def __init__(self):
script_helper.Script.__init__(self)
def _add_arguments(self, parser):
user = os.environ.get('USER')
parser.add_argument('--user', '-u', default=user,
help='SSH User (you can also set this in clusto.conf too'
'in console.user: --user > clusto.conf:console.user > "%s")' % user)
parser.add_argument('server', nargs=1,
help='Object to console to (IP or name)')
def add_subparser(self, subparsers):
parser = self._setup_subparser(subparsers)
self._add_arguments(parser)
def run(self, args):
try:
server = clusto.get(args.server[0])
if not server:
raise LookupError('Object "%s" does not exist' % args.server)
except Exception as e:
self.debug(e)
self.error('No object like "%s" was found' % args.server)
return 1
server = server[0]
if not hasattr(server, 'console'):
self.error('The object %s lacks a console method' % server.name)
return 2
user = os.environ.get('USER')
if args.user:
self.debug('Grabbing user from parameter')
user = args.user
else:
self.debug('Grabbing user from config file or default')
user = self.get_conf('console.user', user)
self.debug('User is "%s"' % user)
return(server.console(ssh_user=user))
def main():
console, args = script_helper.init_arguments(Console)
return(console.run(args))
if __name__ == '__main__':
sys.exit(main())
|
kschoelz/abacuspb
|
test/test_data.py
|
Python
|
gpl-2.0
| 5,915
| 0.005748
|
from datetime import datetime
#####################
# Account Test Data #
#####################
account = {
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890'
}
account_put = {
'name': 'Savings Account',
'type': 'Savings'
}
db_account = {
'id': 'acct_testaccountname',
'name': 'Test Account Name',
'type': 'Checking',
'bank_name': 'Bank of Catonsville',
'account_num': '1234567890',
'bal_uncleared': 2635.63,
'bal_cleared': -40.92,
'bal_reconciled': 1021.61,
'budget_monitored': True
}
db_account_2 = {
'id': 'acct_toaccountname',
'name': 'To Account Name',
'type': 'Savings',
'bank_name': 'Bank of Catonsville',
'account_num': '0987654321',
'bal_uncleared': 100.00,
'bal_cleared': 100.00,
'bal_reconciled': 200.00,
'budget_monitored': False
}
db_account_3 = {
'id': 'acct_to2accountname',
'name': 'To 2 Account Name',
'type': 'Savings',
'bank_name': 'Bank of Caton
|
sville',
'account_num': '0987654320',
'bal_uncleared': 500.00,
'bal_cleared': 500.00,
'bal_reconciled': 600.00,
'budget_monitored': False
}
#########################
# Transaction Test Data #
#########################
transaction = {
'date': '2014-08-10',
'type': 'EFT',
'payee': 'Giant',
# need: category/account, split -> consider fields.Nested
'reconciled': '',
'amount': -52.08,
'memo': ''
}
transaction_transfer = {
|
'date': '2014-08-10',
'type': 'XFER',
'payee': 'Move to Savings',
'reconciled': '',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
}
transaction_put_amount = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'memo': 'Birthday present'
}
transaction_put_reconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'reconciled': 'C'
}
transaction_put_amountreconciled = { # id = 53f69e77137a001e344259cb (Amazon.com)
'amount': -14.01,
'reconciled': 'C'
}
db_transactions= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'DEP',
'payee': 'Sandy Spring Bank',
'reconciled': 'R',
'amount': 1145.06,
'memo': 'Sandy\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,8,1),
'type': 'EFT',
'payee': 'Costco',
'reconciled': 'R',
'amount': -123.45,
'memo': 'Test transaction memo',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259c9',
'date': datetime(2014,8,6),
'type': 'EFT',
'payee': 'Exxon',
'reconciled': 'C',
'amount': -40.92,
'memo': '',
'cat_or_acct_id': '2'
},
{
'id': '53f69e77137a001e344259ca',
'date': datetime(2014,8,18),
'type': 'DEP',
'payee': 'U.S. Government',
'reconciled': '',
'amount': 2649.52,
'memo': 'Kyle\'s Salary',
'cat_or_acct_id': '1'
},
{
'id': '53f69e77137a001e344259cb',
'date': datetime(2014,8,12),
'type': 'EFT',
'payee': 'Amazon.com',
'reconciled': '',
'amount': -13.89,
'memo': '',
'cat_or_acct_id': '2'
}
]
db_transfer_transactions_fromAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'acct_toaccountname'
},
{
'id': '53f69e77137a001e344259c8',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'C',
'amount': -100.00,
'memo': '',
'cat_or_acct_id': 'somecategoryidstring'
}
]
db_transfer_transactions_toAcct= [
{
'id': '53f69e77137a001e344259c7',
'date': datetime(2014,7,31),
'type': 'XFER',
'payee': 'To Savings',
'reconciled': 'R',
'amount': 100.00,
'memo': '',
'cat_or_acct_id': 'acct_testaccountname'
}
]
###################
# Payee Test Data #
###################
payee = { 'name': 'Costco' }
payee_put = { 'name': 'Newegg.com' }
db_payees = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Costco'
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Amazon.com'
},
{
'id': '53f69e77137a001e344259f3',
'name': 'U.S. Government'
},
{
'id': '53f69e77137a001e344259f4',
'name': 'Exxon'
},
{
'id': '53f69e77137a001e344259f5',
'name': 'Sandy Spring Bank'
}
]
######################
# Category Test Data #
######################
category_1 = {
'name': 'Tithe',
'parent_id': None
}
category_2 = {
'name': 'Gas & Electric',
'parent_id': '1234567890'
}
category_put = { 'parent_id': '1234567890' }
db_categories = [
{
'id': '53f69e77137a001e344259f1',
'name': 'Auto',
'budget_tracked': False,
'parent_id': None
},
{
'id': '53f69e77137a001e344259fa',
'name': 'Gas',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259fb',
'name': 'Service',
'budget_tracked': True,
'parent_id': '53f69e77137a001e344259f1' # Parent = Auto
},
{
'id': '53f69e77137a001e344259f2',
'name': 'Dining & Entertainment',
'budget_tracked': True,
'parent_id': None
},
{
'id': '53f69e77137a001e344259f3',
'name': 'Tithe',
'budget_tracked': True,
'parent_id': None
}
]
|
adobe-research/spark-cluster-deployment
|
application-deployment-fabfile.py
|
Python
|
apache-2.0
| 4,180
| 0.013636
|
# fabfile.py
# TODO - Description.
#
###########################################################################
##
## Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
from fabric.api import *
from textwrap import dedent, wrap
import io
import re
import pickle
import sys
import os
import yaml
script_dir = os.path.dirname(__file__)
with open(script_dir+"/config.yaml", "r") as f:
config = yaml.load(f)
if os.path.isfile('config.yaml'):
with open('config.yaml', 'r') as f:
config.update(yaml.load(f))
else:
print("Error: Current directory must have local application config.")
sys.exit(-1)
env.roledefs['master'] = config['master']
env.roledefs['workers'] = config['workers']
env.roledefs['all'] = config['all']
env.use_ssh_config = True
@task
def assembly():
local("sbt assembly &> assembly.log")
@task
def sync():
# put(config['local_jar_dir'] + '/' + config['jar'], config['remote_jar_dir'])
for server in config['all']:
local("rsync -azrv --progress {}/{} {}:/{}".format(
config['local_jar_dir'],
config['jar'],
server,
confi
|
g['remote_jar_dir']
))
@task
@roles('master')
def start():
outIO = io.BytesIO(); errIO = io.BytesIO()
sudo(' '.join([
config['remote_spark_dir'] + '/bin/spark-submit ',
'--class', config['main_class'], '--master', config['spark_master'],
'--deploy-mode', 'cluster', config['remote_jar_dir'] + '/' + config['jar']
]), stdout=outIO, stderr=errIO)
outIO.seek(0); errIO.seek(0)
outSt
|
r = outIO.read()
driverRe = re.search("State of (driver-\d*-\d*) is (\S*)", outStr)
driverId = driverRe.group(1)
status = driverRe.group(2)
print(" DriverID: " + driverId)
print(" Status: " + status)
if status == "ERROR":
msg = """
The error state occurs when the Spark Master rejects the job,
which is likely due to a misconfiguration in the Spark context
of your application.
Once checking your Spark context for accuracy, next ssh into the node
that failed and go to Spark work directory, which contains
the output for Spark applicaitons and drivers.
Check stderr and stdout in the driver and application directories.
"""
print(dedent(msg))
elif status == "RUNNING":
driverServerRe = re.search("Driver running on (\S*):\d* ", outStr)
driverServer = driverServerRe.group(1)
print(" DriverServer: " + driverServer)
with open('lastJobStarted.pickle', 'wb') as f:
pickle.dump({
'driverId': driverId,
'driverServer': driverServer
}, f)
else:
print(status)
@task
@roles('master')
def kill(driverId=None):
if not driverId:
try:
with open('lastJobStarted.pickle', 'rb') as f:
m = pickle.load(f)
except IOError as e:
print("Unable to open lastJobStarted.pickle")
driverId = m['driverId']
sudo(' '.join([
config['remote_spark_dir'] + '/bin/spark-class ',
"org.apache.spark.deploy.Client kill",
config['spark_master'],
driverId
]))
@task
def getOutput(driverId=None,driverServer=None):
if not driverId:
try:
with open('lastJobStarted.pickle', 'rb') as f:
m = pickle.load(f)
except IOError as e:
print("Unable to open lastJobStarted.pickle")
sys.exit(-1)
driverId = m['driverId']
driverServer = m['driverServer']
local("scp " +
driverServer + ":" + config['spark_work'] + "/" + driverId +
"/stdout " + "stdout.txt")
local("scp " +
driverServer + ":" + config['spark_work'] + "/" + driverId +
"/stderr " + "stderr.txt")
|
earwig/mwparserfromhell
|
src/mwparserfromhell/definitions.py
|
Python
|
mit
| 3,915
| 0
|
# Copyright (C) 2012-2020 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains data about certain markup, like HTML tags and external links.
When updating this file, please also update the the C tokenizer version:
- m
|
wparserfromhell/parser/ctokenizer/definitions.c
- mwparserfromhell/parser/ctokenizer/definitions.h
"""
__all__ = [
"get_html_tag",
"is_parsable",
"is_visible",
"is_single",
"is_single_only",
"is_scheme",
]
URI_SCHEMES = {
# [wikimedia/mediawiki.git]/includes/DefaultSettings.php @ 5c660de5d0
"bitcoin": False,
"ftp": True,
"ftps": True,
"geo": False,
"git": True,
"gopher": True,
"http": True,
"https": True,
"irc": True,
"ircs": True,
"magnet": False,
"mailto": False,
"mms": True,
"news": False,
"nntp": True,
"redis": True,
"sftp": True,
"sip": False,
"sips": False,
"sms": False,
"ssh": True,
"svn": True,
"tel": False,
"telnet": True,
"urn": False,
"worldwind": True,
"xmpp": False,
}
PARSER_BLACKLIST = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"ce",
"chem",
"gallery",
"graph",
"hiero",
"imagemap",
"inputbox",
"math",
"nowiki",
"pre",
"score",
"section",
"source",
"syntaxhighlight",
"templatedata",
"timeline",
]
INVISIBLE_TAGS = [
# https://www.mediawiki.org/wiki/Parser_extension_tags @ 2020-12-21
"categorytree",
"gallery",
"graph",
"imagemap",
"inputbox",
"math",
"score",
"section",
"templatedata",
"timeline",
]
# [wikimedia/mediawiki.git]/includes/parser/Sanitizer.php @ 95e17ee645
SINGLE_ONLY = ["br", "wbr", "hr", "meta", "link", "img"]
SINGLE = SINGLE_ONLY + ["li", "dt", "dd", "th", "td", "tr"]
MARKUP_TO_HTML = {
"#": "li",
"*": "li",
";": "dt",
":": "dd",
}
def get_html_tag(markup):
"""Return the HTML tag associated with the given wiki-markup."""
return MARKUP_TO_HTML[markup]
def is_parsable(tag):
"""Return if the given *tag*'s contents should be passed to the parser."""
return tag.lower() not in PARSER_BLACKLIST
def is_visible(tag):
"""Return whether or not the given *tag* contains visible text."""
return tag.lower() not in INVISIBLE_TAGS
def is_single(tag):
"""Return whether or not the given *tag* can exist without a close tag."""
return tag.lower() in SINGLE
def is_single_only(tag):
"""Return whether or not the given *tag* must exist without a close tag."""
return tag.lower() in SINGLE_ONLY
def is_scheme(scheme, slashes=True):
"""Return whether *scheme* is valid for external links."""
scheme = scheme.lower()
if slashes:
return scheme in URI_SCHEMES
return scheme in URI_SCHEMES and not URI_SCHEMES[scheme]
|
fabioz/PyDev.Debugger
|
tests_python/resources/_debugger_case_wait_for_attach.py
|
Python
|
epl-1.0
| 174
| 0.005747
|
if _
|
_name__ == '__main__':
# We want to call _enable_attach inside an import to make sure that it works properly that way.
import _debugger_case_wait_for
|
_attach_impl
|
Alex-Diez/python-tdd-katas
|
old-katas/sort-kata/day-1.py
|
Python
|
mit
| 2,549
| 0.002354
|
# -*- codeing: utf-8 -*-
def bubble_sort(to_sort):
index = 0
while index < len(to_sort):
offset = index
while offset > 0 and to_sort[offset - 1] > to_sort[offset]:
temp = to_sort[offset]
to_sort[offset] = to_sort[offset - 1]
to_sort[offset - 1] = temp
offset -= 1
index += 1
return to_sort
def quick_sort(to_sort):
result = []
if to_sort:
eq = to_sort[0]
lt, gt = _split_by(to_sort, eq)
for e in quick_sort(lt):
result.append(e)
result.append(eq)
for e in quick_sort(gt):
result.append(e)
return result
def _split_by(to_sort, eq):
lt = []
gt = []
for e in to_sort[1:]:
if e < eq:
lt.append
|
(e)
if e > eq:
gt.append(e)
return (lt, gt)
import unittest
class BubbleSortTest(unittest.TestCase):
def test_sorts_empty_list(self):
self.assertEqual([],
|
bubble_sort([]))
def test_sorts_single_element_list(self):
self.assertEqual([1], bubble_sort([1]))
def test_sorts_two_elements_sorted_list(self):
self.assertEqual([1, 2], bubble_sort([1, 2]))
def test_sorts_two_elements_unsorted_list(self):
self.assertEqual([1, 2], bubble_sort([2, 1]))
def test_sorts_three_elements_sorted_list(self):
self.assertEqual([1, 2, 3], bubble_sort([1, 2, 3]))
def test_sorts_2_1_3_list(self):
self.assertEqual([1, 2, 3], bubble_sort([2, 1, 3]))
def test_sorts_1_3_2_list(self):
self.assertEqual([1, 2, 3], bubble_sort([1, 3, 2]))
def test_sorts_3_2_1_list(self):
self.assertEqual([1, 2, 3], bubble_sort([3, 2, 1]))
class QuickSortTest(unittest.TestCase):
def test_sorts_an_empty_list(self):
self.assertEqual([], quick_sort([]))
def test_sorts_single_element_list(self):
self.assertEqual([1], quick_sort([1]))
def test_sorts_two_elements_sorted_list(self):
self.assertEqual([1, 2], quick_sort([1, 2]))
def test_sorts_two_elements_unsorted_list(self):
self.assertEqual([1, 2], quick_sort([2, 1]))
def test_sorts_three_elements_sorted_list(self):
self.assertEqual([1, 2, 3], quick_sort([1, 2, 3]))
def test_sorts_2_1_3_list(self):
self.assertEqual([1, 2, 3], quick_sort([2, 1, 3]))
def test_sorts_1_3_2_list(self):
self.assertEqual([1, 2, 3], quick_sort([1, 3, 2]))
def test_sorts_3_2_1_list(self):
self.assertEqual([1, 2, 3], quick_sort([3, 2, 1]))
|
alfa-jor/addon
|
plugin.video.alfa/channels/peliculasyseries.py
|
Python
|
gpl-3.0
| 12,489
| 0.008091
|
# -*- coding: utf-8 -*-
# -*- Channel PeliculasySeries -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'la': 'Latino', 'lat':'Latino', 'cas':'Castellano','es': 'Castellano', 'vs': 'VOSE', 'vos':'VOSE', 'vo':'VO',
'ori':'VO', 'so':'VOS', 'sor':'VOS'}
list_language = IDIOMAS.values()
list_quality = ['TS','Screener','DVDRip','HDRip', 'HDTV', 'micro720', 'micro1080']
list_servers = ['openload', 'rapidvideo', 'powvideo', 'gamovideo', 'streamplay', 'flashx', 'clipwatching', 'vidoza',
'thevideome']
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'peliculasyseries')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'peliculasyseries')
host = 'https://peliculasyseries.org/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'series', action='list_all', type='tvshows',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'buscar/q/', thumbnail=get_thumb("search", auto=True),
extra='movie'))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Todas', url=host + 'movie
|
s', action='list_all',
thumbnail=get_thumb('all', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t| |<br
|
>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_data = lang_data.replace('language-ES', '').replace('medium', '').replace('serie', '').replace('-','')
if 'class' in lang_data:
lang_list = scrapertools.find_multiple_matches(lang_data, 'class=" ([^"]+)"')
else:
return lang_data.strip()
for lang in lang_list:
if lang not in IDIOMAS:
lang = 'VOS'
if lang not in language:
language.append(IDIOMAS[lang])
return language
def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host)
data = scrapertools.find_single_match(data, 'data-toggle="dropdown">Géneros.*?multi-column-dropdown">.*?"clearfix"')
if 'Genero' in item.title:
patron = '<li><a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
if title not in duplicados:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=title, action='list_all',
type=item.type))
duplicados.append(title)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.type == 'movies':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?'
patron += '<div class="calidad" >([^<]+)</div> <div class="audio-info">'
patron += '(.*?)<div class="w3l-action-icon">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, lang_data, year in matches:
title = '%s [%s] [%s]' % (scrapedtitle, year, quality)
if 'screener' in quality.lower():
quality = 'Screener'
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
language = get_language(lang_data)
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=language,
quality=quality,
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<div class="col-md-2 w3l-movie-gride-agile"><a href="([^"]+)" class=".*?">'
patron += '<img src="([^"]+)" title="([^"]+)" class="img-responsive".*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = scrapedtitle
contentSerieName = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
itemlist.append(item.clone(action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=contentSerieName,
context=filtertools.context(item, list_language, list_quality),
infoLabels={'year':year}))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<a class='last' href='([^']+)'>»</a>")
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<a href="([^"]+)"><img class="thumb-item" src="([^"]+)" alt="[^"]+" >'
patron += '<div class="season-item">Temporada (\d+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, season in matches:
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action='episodesxseasons',
thumbnail=scrapedthumbnail, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
data=get_source(item.url)
patron ='class="row-serie-item"><a href="([^"]+)">.*?<img class="episode-thumb-item" src="([^"]+)" alt="([^"]+)" >'
patron += '<divclass="audio-info-series">(.*?)<div class="episode-item">%s+x(\d+)</div>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedthumbnail, scrapedtitle, la
|
eventql/eventql
|
deps/3rdparty/spidermonkey/mozjs/build/subconfigure.py
|
Python
|
agpl-3.0
| 14,193
| 0.000705
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# This script is used to capture the content of config.status-generated
# files and subsequently restore their timestamp if they haven't changed.
import argparse
import errno
import itertools
import os
import re
import subprocess
import sys
import pickle
import mozpack.path as mozpath
class Pool(object):
def __new__(cls, size):
try:
import multiprocessing
size = min(size, multiprocessing.cpu_count())
return multiprocessing.Pool(size)
except:
return super(Pool, cls).__new__(cls)
def imap_unordered(self, fn, iterable):
return itertools.imap(fn, iterable)
def close(self):
pass
def join(self):
pass
class File(object):
def __init__(self, path):
self._path = path
self._content = open(path, 'rb').read()
stat = os.stat(path)
self._times = (stat.st_atime, stat.st_mtime)
@property
def path(self):
return self._path
@property
def mtime(self):
return self._times[1]
@property
def modified(self):
'''Returns whether the file was modified since the instance was
created. Result is memoized.'''
if hasattr(self, '_modified'):
return self._modified
modified = True
if os.path.exists(self._path):
if open(self._path, 'rb').read() == self._content:
modified = False
self._modified = modified
return modified
def update_time(self):
'''If the file hasn't changed since the instance was created,
restore its old modification time.'''
if not self.modified:
os.utime(self._path, self._times)
# As defined in the various sub-configures in the tree
PRECIOUS_VARS = set([
'build_alias',
'host_alias',
'target_alias',
'CC',
'CFLAGS',
'LDFLAGS',
'LIBS',
'CPPFLAGS',
'CPP',
'CCC',
'CXXFLAGS',
'CXX',
'CCASFLAGS',
'CCAS',
])
CONFIGURE_DATA = 'configure.pkl'
# Autoconf, in some of the sub-configures used in the tree, likes to error
# out when "precious" variables change in value. The solution it gives to
# straighten things is to either run make distclean or remove config.cache.
# There's no reason not to do the latter automatically instead of failing,
# doing the cleanup (which, on buildbots means a full clobber), and
# restarting from scratch.
def maybe_clear_cache(data):
env = dict(data['env'])
for kind in ('target', 'host', 'build'):
arg = data[kind]
if arg is not None:
env['%s_alias' % kind] = arg
# configure can take variables assignments in its arguments, and that
# overrides whatever is in the environment.
for arg in data['args']:
if arg[:1] != '-' and '=' in arg:
key, value = arg.split('=', 1)
env[key] = value
comment = re.compile(r'^\s+#')
cache = {}
with open(data['cache-file']) as f:
for line in f:
if not comment.match(line) and '=' in line:
key, value = line.rstrip(os.linesep).split('=', 1)
# If the value is quoted, unquote it
if value[:1] == "'":
value = value[1:-1].replace("'\\''", "'")
cache[key] = value
for precious in PRECIOUS_VARS:
# If there is no entry at all for that precious variable, then
# its value is not precious for that particular configure.
if 'ac_cv_env_%s_set' % precious not in cache:
continue
is_set = cache.get('ac_cv_env_%s_set' % precious) == 'set'
value = cache.get('ac_cv_env_%s_value' % precious) if is_set else None
if value != env.get(precious):
print 'Removing %s because of %s value change from:' \
% (data['cache-file'], precious)
print ' %s' % (value if value is not None else 'undefined')
print 'to:'
print ' %s' % env.get(precious, 'undefined')
os.remove(data['cache-file'])
return True
return False
def split_template(s):
"""Given a "file:template" string, returns "file", "template". If the string
is of the form "file" (without a template), returns "file", "file.in"."""
if ':' in s:
return s.split(':', 1)
return s, '%s.in' % s
def get_config_files(data):
config_status = mozpath.join(data['objdir'], 'config.status')
if not os.path.exists(config_status):
return [], []
configure = mozpath.join(data['srcdir'], 'configure')
config_files = []
command_files = []
# Scan the config.status output for information about configuration files
# it generates.
config_status_output = subprocess.check_output(
[data['shell'], '-c', '%s --help' % config_status],
stderr=subprocess.STDOUT).splitlines()
state = None
for line in config_status_output:
if line.startswith('Configuration') and line.endswith(':'):
if line.endswith('commands:'):
state = 'commands'
else:
state = 'config'
elif not line.strip():
state = None
elif state:
for f, t in (split_template(couple) for couple in line.split()):
f = mozpath.join(data['objdir'], f)
t = mozpath.join(data['srcdir'], t)
if state == 'commands':
command_files.append(f)
else:
config_files.append((f, t))
return config_files, command_files
def prepare(srcdir, objdir, shell, args):
parser = argparse.ArgumentParser()
parser.add_argument('--target', type=str)
parser.add_argument('--host', type=str)
parser.add_argument('--build', type=str)
parser.add_argument('--cache-file', type=str)
# The --srcdir argument is simply ignored. It's a useless autoconf feature
# that we don't support well anyways. This makes it stripped from `others`
# and allows to skip setting it when calling the subconfigure (configure
# will take it from the configure path anyways).
parser.add_argument('--srcdir', type=str)
data_file = os.path.join(objdir, CONFIGURE_DATA)
previous_args = None
if os.path.exists(data_file):
with open(data_file, 'rb') as f:
data = pickle.load(f)
previous_args = data['args']
# Msys likes to break environment variables and command line arguments,
# so read those from stdin, as they are passed from the configure script
# when necessary (on windows).
# However, for some reason, $PATH is not handled like other environment
# variables, an
|
d msys remangles it even when giving it is already a msys
# $PATH. Fortunately, the mangling/demangling is just find for $PATH, so
# we can just take the value from the environment. Msys will convert it
# back properly when calling subconfigure.
input = sys.stdin.read()
if input:
data = {a: b for [a, b] in eval(input)}
environ = {a: b for a, b in data['env']}
environ['PATH'] = os.environ['PA
|
TH']
args = data['args']
else:
environ = os.environ
args, others = parser.parse_known_args(args)
data = {
'target': args.target,
'host': args.host,
'build': args.build,
'args': others,
'shell': shell,
'srcdir': srcdir,
'env': environ,
}
if args.cache_file:
data['cache-file'] = mozpath.normpath(mozpath.join(os.getcwd(),
args.cache_file))
else:
data['cache-file'] = mozpath.join(objdir, 'config.cache')
if previous_args is not None:
data['previous-args'] = previous_args
try:
os.makedirs(objdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(data_file, 'wb') as f:
pickle.dump(data, f)
def prefix_lines(text, prefix):
return ''
|
mate-desktop/python-mate-desktop
|
tests/runtests.py
|
Python
|
lgpl-2.1
| 722
| 0.00554
|
#!/usr/bin/env python
import glob
import os
impor
|
t sys
import unittest
import common
if len(sys.argv) > 1:
builddir = sys.argv[1]
no_import_hooks = True
else:
builddir = '..'
no_import_hooks = False
common.run_import_tests(builddir, no_import_hooks)
SKIP_FILES = ['common', 'runtests']
dir = os.path.split(os.path.abspath(__file__))[0]
os.chdir(dir)
def gettestnames():
files = [fname[:-3] for fname in glob.glob('test*.py')
if fname not in SKIP_FIL
|
ES]
return files
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for name in gettestnames():
suite.addTest(loader.loadTestsFromName(name))
testRunner = unittest.TextTestRunner()
testRunner.run(suite)
|
Fullbiter/EECS-293
|
pa12-13/airville/src/passenger.py
|
Python
|
gpl-3.0
| 1,260
| 0.000794
|
# Kevin Nash (kjn33)
# EECS 293
# Assignment 12
from entity import Entity
from random import randint
class Passenger(Entity):
""" Entities that need to be checked in fol
|
lowing queueing """
def __init__(self):
"""
Passengers follow Entity initialization,
are randomly given special parameters
"""
super(Passenger, self).__init__()
# 50% chance of being a frequent flyer
self.frequent = randint(1, 2) % 2 == 0
# 10% chance of having a given special condition
self.oversize = randint(1, 10) % 10 == 0
self.rerouted = randint(1, 10) % 10 =
|
= 0
self.overbook = randint(1, 10) % 10 == 0
self.time = 2
self.calc_time()
def __str__(self):
""" Represent Passenger by name, ID, and flyer type """
flyer_type = "regular"
if self.frequent:
flyer_type = "frequent"
return "%s %d (%s)" % (self.__class__.__name__, self.id, flyer_type)
def calc_time(self):
""" Set the time required for check in based on special parameters """
if self.oversize:
self.time += 2
if self.rerouted:
self.time += 2
if self.overbook:
self.time += 2
|
stivosaurus/rpi-snippets
|
reference_scripts/basic_pygame.py
|
Python
|
unlicense
| 266
| 0.015038
|
import pygame
pygame.i
|
nit()
screen = pygame.display.set_mode((
|
400, 300))
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pygame.display.flip()
|
walterfan/snippets
|
python/exam/EchoServer.py
|
Python
|
apache-2.0
| 296
| 0.013514
|
from twisted.internet import protocol, reactor
class Echo(protocol.Protocol):
def dataReceived(self, data):
self.transport.write(data)
class E
|
cho
|
Factory(protocol.Factory):
def buildProtocol(self, addr):
return Echo()
reactor.listenTCP(1234, EchoFactory())
reactor.run()
|
huku-/pyrsistence
|
tests/em_dict_basic.py
|
Python
|
bsd-2-clause
| 766
| 0
|
#!/usr/bin/env python
'''em_dict_basic.py - Basic benchmark for external memory dictionary.'''
__author__ = 'huku <huku@grhack.net>'
import sys
import shutil
import random
import time
import util
import pyrsistence
def main(argv):
# Initialize new external memory dictionary.
util.msg('Populating external memory dictionary')
t1 = time.time()
dirname = util.make_temp_name('em_dict')
em_dict = pyrsistence.EMDict(dirname)
for i in util.xrange(0x1000000):
em_dict[i] = i
t2 = time.time()
|
util.msg('Done in %d sec.' % (t2 - t1))
#
|
Close and remove external memory dictionary from disk.
em_dict.close()
shutil.rmtree(dirname)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
# EOF
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python2/docutils/parsers/rst/languages/zh_tw.py
|
Python
|
mit
| 5,129
| 0.001366
|
# -*- coding: utf-8 -*-
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)':
|
'superscript',
'sup (translation required)': 'superscript',
|
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
tsujamin/digi-approval
|
src/digiapproval_project/digiapproval_project/apps/digiapproval/migrations/0009_add_last_read_mm_auto.py
|
Python
|
gpl-3.0
| 8,804
| 0.007383
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field last_read_by on 'Message'
m2m_table_name = db.shorten_name(u'digiapproval_message_last_read_by')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('message', models.ForeignKey(orm[u'digiapproval.message'], null=False)),
('user', models.ForeignKey(orm[u'auth.user'], null=False))
))
db.create_unique(m2m_table_name, ['message_id', 'user_id'])
def backwards(self, orm):
# Removing M2M table for field last_read_by on 'Message'
db.delete_table(db.shorten_name(u'digiapproval_message_last_read_by'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'digiapproval.customeraccount': {
'Meta': {'object_name': 'CustomerAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CUSTOMER'", 'max_length': '16'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_accounts'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['digiapproval.CustomerAccount']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'digiapproval.message': {
'Meta': {'object_name': 'Message'},
'_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'last_read'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'message': ('django.db.models.fields.TextField', [], {}),
'posted': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.task': {
'Meta': {'object_name': 'Task'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('jsonfield.fields.JSONField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': "'36'"}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.Workflow']"})
},
u'digiapproval.userfile': {
'Meta': {'object_name': 'UserFile'},
'_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'virus_status': ('django.db.models.fields.CharField', [], {'default': "'UNSCANNED'", 'max_length': '16'})
},
u'digiapproval.workflow': {
'Meta': {'object_name': 'Workflow'},
'approver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'workflow_approver'", 'to': u"orm['auth.User']"}),
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'customer': ('django.db.models.fields.related.Foreig
|
nKey', [], {'related_name': "'workflow_customer'", 'to': u"orm['digiapproval.Custome
|
rAccount']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spec': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['digiapproval.WorkflowSpec']"}),
'state': ('django.db.models.fields.CharField', [], {'default': "'STARTED'", 'max_length': '10'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'70558195da6a4488b22d6e8749f86580'", 'max_length': '36'}),
'workflow': ('digiapproval_project.apps.digiapproval.fields.WorkflowField', [], {})
},
u'digiapproval.workflowspec': {
'Meta': {'object_name': 'WorkflowSpec'},
'approvers': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_approvers'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'delegators': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'workflowspecs_delegators'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.Group']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('dja
|
stormi/tsunami
|
src/secondaires/auberge/commandes/auberge/liste.py
|
Python
|
bsd-3-clause
| 3,069
| 0.000979
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'auberge liste'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmListe(Parametre):
"""Commande 'auberge liste'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "liste", "list")
self.aide_courte = "affiche les auberges existantes"
self.aide_longue = \
"Cette commande permet de lister les auberges existantes."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande."""
auberges = sorted([a for a in importeur.auberge.auberges.values()],
key=lambda a: a.cle)
if auberges:
en_tete = "+-" + "-" * 15 + "-+-" + "-" * 25 + "-+-" + \
"-" * 8 + "-+-" + "-" * 6 + "-+"
msg = en_tete + "\n"
msg += "| Clé | Salle | " \
|
"Chambres | Occupé |\n"
msg += en_tete
for auberge in auberges:
cle = auberge.cle
ident = auberge.ident_comptoir
nb_chambres = len(auberge
|
.chambres)
pct_occupation = auberge.pct_occupation
msg += "\n| {:<15} | {:<25} | {:>8} | {:>5}% |".format(
cle, ident, nb_chambres, pct_occupation)
msg += "\n" + en_tete
personnage << msg
else:
personnage << "Aucune auberge n'existe pour l'heure."
|
waltBB/neutron_read
|
neutron/agent/metadata_agent.py
|
Python
|
apache-2.0
| 1,584
| 0
|
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import config as agent_conf
from neutron.agent.metadata import agent
from neutron.agent.metadata import c
|
onfig as metadata_conf
from neutron.common import config
from neut
|
ron.common import utils
from neutron.openstack.common.cache import cache
LOG = logging.getLogger(__name__)
def main():
cfg.CONF.register_opts(metadata_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS)
cfg.CONF.register_opts(metadata_conf.METADATA_PROXY_HANDLER_OPTS)
cache.register_oslo_configs(cfg.CONF)
cfg.CONF.set_default(name='cache_url', default='memory://?default_ttl=5')
agent_conf.register_agent_state_opts_helper(cfg.CONF)
config.init(sys.argv[1:])
config.setup_logging()
utils.log_opt_values(LOG)
# metadata agent need not connect DB
cfg.CONF.set_override("connection", "", "database")
proxy = agent.UnixDomainMetadataProxy(cfg.CONF)
proxy.run()
|
CN-UPB/OpenBarista
|
components/sandman-pasta/sandman_pasta/sandman_pasta.py
|
Python
|
mpl-2.0
| 14,698
| 0.004763
|
"""
sandman_pasta reimplements the behaviour of decaf-masta, but instead evaluates all calls to deployable heat templates
"""
import json
from decaf_storage.json_base import StorageJSONEncoder
from decaf_storage import Endpoint
from decaf_utils_components.base_daemon import daemonize
import yaml
import time
import urllib
from decaf_utils_components import BasePlugin, In, Out
import base64
import sys
import math
import traceback
__author__ = "Banana PG-SANDMAN"
__date__ = "$01-jun-2016$"
TMPDIR = "/tmp/decaf/"
class Pasta(BasePlugin):
__version__ = "0.1-dev01"
datacenters = dict()
config = None
logger = None
def __init__(self, logger=None, config=None):
super(Pasta, self).__init__(logger=logger, config=config)
with open('/etc/decaf/pastad.cfg') as file:
self.config = yaml.safe_load(file)
if self.config is None:
self.logger.error("No configuration file found or not in yaml format.")
sys.exit(1)
try:
self.datacenters = self.config["datacenters"]
except KeyError as e:
self.logger.error("Please check the configuration. There is no datacenter defined.")
sys.exit(1)
self.logger.debug('Configuration seems sane.')
def _before_connect(self, url=None, rpc=None, routing_key=None):
pass
# same behaviour as masta
def _after_connect(self):
self.rpc.set_json_encoder(StorageJSONEncoder)
self.storage = Endpoint(self.rpc, self.logger)
# Check if all the datacenters are also registered in Storage, if not, register them
storage_datacenters = self.storage.get('datacenter', options=[], filters={})
def connect(self, url=None, rpc=None, routing_key=None):
# fake being masta, so we don't have to change other code
super(Pasta, self).connect(self.config["rpc"]["url"], None, "decaf_masta")
@In("datacenter_id", int)
@Out("success_code", int)
def initialize_datacenter(self, datacenter_config):
"""
Reimplemented method of decaf_masta
:param datacenter_config: A DatacenterConfig object describing the datacenter to be added.
:return: The id of the new entry.
"""
self.logger.info("Call to initialize_datacenter")
return 0
@In("keystone_credentials", dict)
@Out("keystone_id", int)
def create_keystone_credentials(self, keystone_credentials):
self.logger.info("Call to create_keystone_credentials")
return 0
@In("keystone_id", int)
@Out("keystone_credentials", dict)
def get_keystone_credentials(self, keystone_id):
"""
Gets a keystone entry from the database.
:param keystone_id: The id of the database entry.
:return: The data of the keystone entry with the given id, or an error code if not found.
"""
return 400
@Out("keystone_list", list)
def get_keystones(self):
"""
Get keystone entries contained in the database.
:return: A list of keystone entries currently existing in the Masta database.
"""
return None
# ----------------------------------------------------------
# DATACENTERS
# Every datacenter has a respective set of keystone credentials and a region.
# Keystone does not have to be installed on the actual datacenter, but could.
# ----------------------------------------------------------
@In("datacenter", dict)
@Out("datacenter_id", int)
def create_datacent
|
er(self, datacenter):
"""
Adds a datacenter entry to the database.
:param datacenter: A Datacenter dictionary containing information of the datacenter.
:return: The id of the new entry in the datab
|
ase.
"""
return int(datacenter.datacenter_id)
@Out("datacenter_list", list)
def get_datacenters(self):
"""
Get datacenter entries contained in the database.
:return: A list of datacenter entries currently existing in the Masta database.
"""
return [datacenter.to_dict() for datacenter in self.datacenters]
@In("datacenter_id", int)
@Out("datacenter_stats", dict)
def get_datacenter_stats(self, datacenter_id):
"""
Returns information about the datacenter.
:param datacenter_id: The id of the datacenter.
:return: A list of datacenter entries currently existing in the Masta database
"""
return datacenter_stats
@In("datacenter_id", int)
@Out("ip_namespace", str)
def get_datacenter_ip_namespace(self, datacenter_id):
"""
Returns the name of the IP namespace of the router on the given datacenter.
:param datacenter_id: The masta id of the datacenter.
:return: IP namespace name.
"""
ip_namespace = "qrouter-1"
return ip_namespace
# ----------------------------------------------------------
# DEPLOY SCENARIO
# A scenario is deployed in two steps: First, the edges are created.
# Secondly, the nodes are created.
# If the process fails at one step, MaSta will rollback the deployment.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def deploy_scenario(self, instance_graph):
"""
Deploy scenario on the infrastructure.
:param instance_graph: An object of type InstanceGraph to be deployed.
:return: The modified instance graph with ips and keynames, if successful.
"""
return instance_graph
# ----------------------------------------------------------
# DESTROY SCENARIO
# Deletes all the nodes and edges and removes
# the scenario from the database.
# ----------------------------------------------------------
@In("scenario_instance_id", str)
@Out("success_code", int)
def destroy_scenario(self, scenario_instance_id):
"""
Destroy scenario by deleting all its nodes and removing from database.
:param scenario_instance_id: The id of the scenario instance.
:return: 200, if successful. 404, if not found.
"""
return 200
@Out("success_code", int)
def destroy_all_scenarios(self):
"""
Destroys all scenarios in the MaSta database.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# ALTER SCENARIO
# Methods to change a running scenario.
# ----------------------------------------------------------
@In("instance_graph", dict)
@Out("instance_graph", dict)
def extend_scenario(self, instance_graph):
"""
Method to extend an existing scenario.
:param instance_graph: An InstanceGraph with all the nodes and edges to add.
:return: 200, if successful.
"""
return 200
@In("shrink_graph", dict)
@Out("success_code", int)
def shrink_scenario(self, shrink_graph):
"""
Method to shrink an existing scenario.
:param shrink_graph: An object of type InstanceGraph that lists all the nodes and edges to delete.
:return: 200, if successful.
"""
return 200
# ----------------------------------------------------------
# INTERNAL SCENARIO METHODS
# Internal methods for creation and deletion
# of nodes and edges.
# ----------------------------------------------------------
def create_nodes(self, instance_graph, session):
"""
Internal method to create nodes in database and deploy the nodes on the infrastructure.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
def create_edges(self, instance_graph, session):
"""
Internal method to create edges in the database and set up the networks in OpenStack.
:param instance_graph: The graph of the scenario.
:param session: The session object.
:return:
"""
pass
|
hal0x2328/neo-python
|
neo/Storage/Common/CloneCache.py
|
Python
|
mit
| 828
| 0
|
from neo.Storage.Common.DataCache import DataCache
class CloneCache(DataCache):
def __init__(self, innerCache):
super(CloneCache, self).__init__()
self.innerCache = innerCache
def AddInternal(self, key, value):
self.innerCache.Add(key, value)
def DeleteInternal(self, key):
self.innerCache.Delete(key)
def FindInternal(self, key_prefix):
for k, v in self.innerCache.Find(key_prefix):
yield k, v.Clone()
def GetInternal(self, key):
return se
|
lf.innerCache[key].Clone()
def TryGetInternal(self, key):
res = self.innerCache.TryGet(key)
if res is None:
return None
else:
return r
|
es.Clone()
def UpdateInternal(self, key, value):
self.innerCache.GetAndChange(key).FromReplica(value)
|
bretlowery/snakr
|
lib/PyOpenGraph/PyOpenGraph.py
|
Python
|
bsd-3-clause
| 3,011
| 0.016274
|
#!/usr/bin/env python
#Copyright (c) 2010 Gerson Minichiello
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import urllib
from HTMLParser import HTMLParser
class PyOpenGraph(object):
types = {'activity':['activity', 'sport'],
'business':['bar', 'company', 'cafe', 'hotel', 'restaurant'],
'group':['cause' 'sports_league' 'sports_team'],
'organization':['band', 'government', 'non_profit', 'school', 'university'],
'person':['actor', 'athlete', 'author', 'director', 'musician', 'politician', 'public_figure'],
'place':['city', 'country', 'landmark', 'state_province'],
'product':['album', 'book', 'drink', 'food', 'game', 'isbn', 'movie', 'product', 'song', 'tv_show', 'upc'],
'website':['article', 'blog', 'website']}
def __init__(self, url):
f = urllib.urlopen(url)
contents = f.read()
f.close()
p = PyOpenGraphParser()
p.feed(contents)
p.close()
self.metadata = p.properties
def is_valid(self):
required = set(['title', 'type', 'image', 'url'])
if (set(s
|
elf.metadata.keys()).intersection(required)) == required:
return True
else:
return False
def __str__(
|
self):
return self.metadata['title']
class PyOpenGraphParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.properties = {}
def handle_starttag(self, tag, attrs):
if tag == 'meta':
attrdict = dict(attrs)
if attrdict.has_key('property') and attrdict['property'].startswith('og:') and attrdict.has_key('content'):
self.properties[attrdict['property'].replace('og:', '')] = attrdict['content']
def handle_endtag(self, tag):
pass
def error(self, msg):
pass
if __name__ == '__main__':
# Usage
og = PyOpenGraph('http://www.rottentomatoes.com/m/10011268-oceans/')
print og.metadata
print og.metadata['title']
|
energyPATHWAYS/energyPATHWAYS
|
energyPATHWAYS/_obsolete/temp.py
|
Python
|
mit
| 11,014
| 0.003087
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 06 13:07:14 2015
@author: Ryan Jones
"""
class DemandTechnology:
def __init__(self, drivers, ID, **kwargs):
self.ID = ID
self.drivers = drivers
for col, att in util.object_att_from_table('DemandTechs', ID):
setattr(self, col, att)
self.stocks = {}
def add_stock(self, GAU=None, DAU=None):
if (GAU, DAU) in self.stocks:
# ToDo note that a technology was added twice
return
self.stocks[GAU, DAU] = DemandStock()
def tech_efficiency(self, ID, efficiency, start=None, end=None):
"""
returns function parameters based on identification of efficiency type - main or aux
"""
vintage_start = int(cfgfile.get('vintage', 'start_year'))
vintage_end = int(cfgfile.get('vintage', 'end_year'))
vintages = np.arange(vintage_start, vintage_end + 1)
model_start_year = int(cfgfile.get('case', 'model_start_year'))
model_end_year = int(cfgfile.get('case', 'model_end_year'))
years = np.arange(model_start_year, model_end_year + 1)
# years = np.arange (start, end)
# vintages = np.arange (start, end)
stock = self.stock
if efficiency == "main":
efficiency_key = 'main_energy_efficiency'
ref_ID = stock.techs[ID].reference_main_efficiency_id
decay = 'main_energy_efficiency_decay'
else:
efficiency_key = 'aux_energy_efficiency'
ref_ID = stock.techs[ID].reference_aux_efficiency_id
decay = 'aux_energy_efficiency_decay'
eff_def = stock.techs[ID].efficiency_definition
if eff_def == "absolute":
ref_ID = ID
else:
ref_eff_def = stock.techs[ref_ID].efficiency_definition
if ref_eff_def == "relative":
error_text = "reference technology for technology %s not defined in absolute terms" % ID
raise ValueError(error_text)
else:
pass
# units to convert efficiency values to
sd_unit_type = self.service_demand.unit_type
sd_unit = self.service_demand.unit_base
energy_unit = cfgfile.get('case', 'energy_unit')
# converts efficiency values of technologies that are defined in
# absolute terms ex. miles/gallon for subsectors with inputs defined
# in energy service terms ex. kilometers to consistent efficiency
# units of energy_unit/service_demand_unit ex. gigajoule/kilometer
if eff_def == 'absolute' and sd_unit_type == 'service':
eff = util.efficiency_convert(getattr(stock.techs[ID], efficiency_key),
stock.techs[ID].efficiency_numerator_unit,
stock.techs[ID].efficiency_denominator_unit,
energy_unit, sd_unit)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
clean_eff_numerator_unit = energy_unit
clean_eff_denominator_unit = sd_unit
# no conversion is used if the service_demand unit is energy, as
# the efficiency values will be normalized in later calculations
elif eff_def == 'absolute' and sd_unit_type == 'energy':
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
clean_eff_numerator_unit = stock.techs[ID].efficiency_numerator_unit
clean_eff_denominator_unit = stock.techs[ID].efficiency_denominator_unit
# converts efficiency values for reference technologies
# that are defined in absolute terms ex. miles/gallon for
# subsectors with inputs defined in energy service terms ex.
# kilometers to consistent efficiency units of
# energy_unit/service_demand_unit ex. gigajoule/kilometer
elif eff_def == "relative" and sd_unit_type == 'service':
ref_eff = util.efficiency_convert(
getattr(stock.techs[ref_ID], efficiency_key),
stock.techs[ref_ID].efficiency_numerator_unit,
stock.techs[ref_ID].efficiency_denominator_unit, energy_unit,
sd_unit)
ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages)
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest", newindex=vintages)
eff *= ref_eff
clean_eff_numerator_unit = energy_unit
clean_eff_denominator_unit = sd_unit
# no conversion is used if the service_demand unit is energy, as
# the efficiency values will be normalized in later calculations.
# efficiency values are multiplied by reference technology efficiencies
else:
ref_eff = getattr(stock.techs[ref_ID], efficiency_key)
ref_eff = TimeSeries.clean(ref_eff, extrapolation_method="nearest", newindex=vintages)
eff = getattr(stock.techs[ID], efficiency_key)
eff = TimeSeries.clean(eff, extrapolation_method="nearest",
newindex=vintages)
eff *= ref_eff
clean_eff_numerator_unit = stock.techs[ref_ID].efficiency_numerator_unit
clean_eff_denominator_unit = stock.techs[ref_ID].efficiency_denominator_unit
decay_df = stockrollover.vintage_age(years, vintages)
decay_df *= stockrollover.vintage_exist(years, vin
|
tages)
if eff_def == "absolute":
decay_df = 1 - (decay_df * getattr(stock.techs[ID], decay))
else:
decay_df = 1 - (decay_df * getattr(stock.techs[ref_ID], decay))
eff = eff.transpose()
eff = (decay_df.values * eff.values, years, vintages)
setattr(stock.techs[ID], 'clean_%s_efficiency' % efficiency, eff)
setattr(stock.techs[ID], 'clean_%s_efficiency_numerator
|
_unit' % efficiency, clean_eff_numerator_unit)
setattr(stock.techs[ID], 'clean_%s_efficiency_denominator_unit' % efficiency, clean_eff_denominator_unit)
def stock_efficiency(self):
sd_unit_type = self.service_demand.unit_type
if sd_unit_type == 'energy':
# ==============================================================================
# in order to calculate a normalized efficiency for a stock, which is
# used when the service demand is defined in energy terms, all
# absolute efficiency values must be in the same units. This code converts
# all efficiency values to the same units.
# ==============================================================================
primary_key = self.stock.techs[min(self.stock.techs.keys())]
setattr(self.stock, 'primary_efficiency_ID', primary_key)
setattr(self.stock, 'primary_efficiency_numerator_unit',
stock.techs[primary_key].clean_main_efficiency_numerator_unit)
setattr(self.stock, 'primary_efficiency_denominator_unit',
stock.techs[primary_key].clean_main_efficiency_denominator_unit)
for key in self.stock.techs:
for eff_type in ['main', 'aux']:
data = getattr(self.stock.techs[key],
'clean_%s_efficiency' % eff_type)
unit_from_denominator = getattr(self.stock.techs[key],
'clean_%s_efficiency_denominator_unit' % eff_type)
unit_from_numerator = getattr(self.stock.techs[key],
'clean_%s_efficiency_numerator_unit' % eff_type)
unit_to_denominator = getattr(self.stock, 'primary_efficiency_denominator_unit')
unit_to_numerator = getattr(self.stock, 'primary_efficiency_numerator_unit')
eff = util.efficiency_convert(data, unit_from_numerator, unit_from_denominator, unit_to_numerat
|
dyoung418/tensorflow
|
tensorflow/examples/image_retraining/retrain_test.py
|
Python
|
apache-2.0
| 4,548
| 0.005057
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-bad-import-order,unused-import
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
from tensorflow.examples.image_retraining import retrain
from tensorflow.python.framework import test_util
class ImageRetrainingTest(test_util.TensorFlowTestCase):
def dummyImageLists(self):
return {'label_one': {'dir': 'somedir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']},
'label_two': {'dir': 'otherdir', 'training': ['image_one.jpg',
'image_two.jpg'],
'testing': ['image_three.jpg', 'image_four.jpg'],
'validation': ['image_five.jpg', 'image_six.jpg']}}
def test
|
GetImagePath(self):
image_lists = self.dummyImageLists()
self.assertEqual('image_dir/somedir/image_one.jpg', retrain.get_image_path(
image_lists, 'label_one', 0, 'image_dir', 'training'))
self.assertEqual('image_dir/otherdir/image_four.jpg',
retrain.get_image_path(image_lists, 'label_two', 1,
'image_dir', 'testing'))
def testGetBottleneckPath
|
(self):
image_lists = self.dummyImageLists()
self.assertEqual('bottleneck_dir/somedir/image_five.jpg_imagenet_v3.txt',
retrain.get_bottleneck_path(
image_lists, 'label_one', 0, 'bottleneck_dir',
'validation', 'imagenet_v3'))
def testShouldDistortImage(self):
self.assertEqual(False, retrain.should_distort_images(False, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(True, 0, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 10, 0, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 1, 0))
self.assertEqual(True, retrain.should_distort_images(False, 0, 0, 50))
def testAddInputDistortions(self):
with tf.Graph().as_default():
with tf.Session() as sess:
retrain.add_input_distortions(True, 10, 10, 10, 299, 299, 3, 128, 128)
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortJPGInput:0'))
self.assertIsNotNone(sess.graph.get_tensor_by_name('DistortResult:0'))
@tf.test.mock.patch.object(retrain, 'FLAGS', learning_rate=0.01)
def testAddFinalTrainingOps(self, flags_mock):
with tf.Graph().as_default():
with tf.Session() as sess:
bottleneck = tf.placeholder(
tf.float32, [1, 1024],
name='bottleneck')
retrain.add_final_training_ops(5, 'final', bottleneck, 1024)
self.assertIsNotNone(sess.graph.get_tensor_by_name('final:0'))
def testAddEvaluationStep(self):
with tf.Graph().as_default():
final = tf.placeholder(tf.float32, [1], name='final')
gt = tf.placeholder(tf.float32, [1], name='gt')
self.assertIsNotNone(retrain.add_evaluation_step(final, gt))
def testAddJpegDecoding(self):
with tf.Graph().as_default():
jpeg_data, mul_image = retrain.add_jpeg_decoding(10, 10, 3, 0, 255)
self.assertIsNotNone(jpeg_data)
self.assertIsNotNone(mul_image)
def testCreateModelInfo(self):
did_raise_value_error = False
try:
retrain.create_model_info('no_such_model_name')
except ValueError:
did_raise_value_error = True
self.assertTrue(did_raise_value_error)
model_info = retrain.create_model_info('inception_v3')
self.assertIsNotNone(model_info)
self.assertEqual(299, model_info['input_width'])
if __name__ == '__main__':
tf.test.main()
|
AsherBond/MondocosmOS
|
grass_trunk/raster/r.gwflow/valid_calc_excavation.py
|
Python
|
agpl-3.0
| 1,561
| 0.012172
|
#!/usr/bin/env python
# Shellscript to verify r.gwflow calculation, this calculation is based on
# the example at page 167 of the following book:
# author = "Kinzelbach, W. and Rausch, R.",
# title = "Grundwassermodellierung",
# publisher = "Gebr{\"u}der Borntraeger (Berlin, Stuttgart)",
# year = "1995"
#
import sys
import os
import grass.script as grass
# Overwrite existing maps
grass.run_command("g.gisenv", set="OVERWRITE=1")
grass.message(_("Set the region"))
# The area is 2000m x 1000m with a cell size of 25m x 25m
grass.run_command("g.region", res=50, n=950, s=0, w=0, e=2000)
grass.run_command("r.mapcalc", expression="phead= if(row() == 19, 5, 3)")
grass.run_command("r.mapcalc", expression="status=if((col() == 1 && row() == 13) ||\
(col() == 1 && row() == 14) ||\
(col() == 2 && row() == 13) ||\
(col() == 2 && row() == 14) ||\
(row() == 19), 2, 1)")
grass.run_command("r.mapcalc", expression="hydcond=0.001")
grass
|
.run_command("r.mapcalc", expression="recharge=0.000000006")
grass.run_command("r.mapcalc", expression="top=20")
grass.run_command("r.mapcalc", expression="bottom=0")
grass.run_command("r.mapcalc", expression="syield=0.001")
grass.run_command("r.mapcalc", expression="null
|
=0.0")
#compute a steady state groundwater flow
grass.run_command("r.gwflow", "f", solver="cholesky", top="top", bottom="bottom", phead="phead", \
status="status", hc_x="hydcond", hc_y="hydcond", s="syield", \
recharge="recharge", output="gwresult", dt=864000000000, type="unconfined", budget="water_budget")
|
courtneypattison/second-response
|
secondresponse/database/dbconnect.py
|
Python
|
mit
| 483
| 0.00207
|
""" Module summary:
Variables:
db_session - A connection to the farmfinder database.
"""
from sqlalc
|
hemy import create_engine
from sqlalchemy.orm import sessionmaker
from dbsetup import Base
###############################################################
|
#############
# Connect to database and create database session:
engine = create_engine("sqlite:///secondresponse/database/sr.db")
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
db_session = DBSession()
|
youtube/cobalt
|
third_party/web_platform_tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
|
Python
|
bsd-3-clause
| 2,576
| 0.001553
|
import unittest
from cStringIO import StringIO
from ..backends import static
# There aren't many tests here because it turns out to be way more convenient to
# use test_serializer for the majority of cases
class TestStatic(unittest.TestCase):
def compile(self, input_text, input_data):
return static.compile(input_text, input_data)
def test_get_0(self):
data = """
key: value
[Heading 1]
other_key:
if a =
|
= 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 2})
self.assertEquals(manifest.get("key"), "value")
children = list(item for item in manifest.iterchildren())
self.assertEquals(len(ch
|
ildren), 1)
section = children[0]
self.assertEquals(section.name, "Heading 1")
self.assertEquals(section.get("other_key"), "value_2")
self.assertEquals(section.get("key"), "value")
def test_get_1(self):
data = """
key: value
[Heading 1]
other_key:
if a == 1: value_1
if a == 2: value_2
value_3
"""
manifest = self.compile(data, {"a": 3})
children = list(item for item in manifest.iterchildren())
section = children[0]
self.assertEquals(section.get("other_key"), "value_3")
def test_get_3(self):
data = """key:
if a == "1": value_1
if a[0] == "ab"[0]: value_2
"""
manifest = self.compile(data, {"a": "1"})
self.assertEquals(manifest.get("key"), "value_1")
manifest = self.compile(data, {"a": "ac"})
self.assertEquals(manifest.get("key"), "value_2")
def test_get_4(self):
data = """key:
if not a: value_1
value_2
"""
manifest = self.compile(data, {"a": True})
self.assertEquals(manifest.get("key"), "value_2")
manifest = self.compile(data, {"a": False})
self.assertEquals(manifest.get("key"), "value_1")
def test_api(self):
data = """key:
if a == 1.5: value_1
value_2
key_1: other_value
"""
manifest = self.compile(data, {"a": 1.5})
self.assertFalse(manifest.is_empty)
self.assertEquals(manifest.root, manifest)
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
def test_is_empty_1(self):
data = """
[Section]
[Subsection]
"""
manifest = self.compile(data, {})
self.assertTrue(manifest.is_empty)
|
NoSmartNoMan/algorithm-1
|
lib/queue.py
|
Python
|
gpl-2.0
| 833
| 0.002466
|
#!/usr/bin/env python
# -*- coding:UTF-8
__author__ = 'shenshijun'
import copy
class Queue(object):
"""
使用Python的list快速实现一个队列
"""
def __init__(self, *arg):
super(Queue, self).__init__()
self.__queue = list(copy
|
.copy(arg))
self.__size = len(self.__queue)
def enter(self, value):
self.__size += 1
self.__queue.append(value)
def exit(self):
if self.__size <= 0:
return None
else:
value = self.__queue[0]
self.__size -= 1
del self.__queue[0]
return value
def __len__(self):
|
return self.__size
def empty(self):
return self.__size <= 0
def __str__(self):
return "".join(["Queue(list=", str(self.__queue), ",size=", str(self.__size)])
|
benoitfragit/VOXGenerator
|
setup.py
|
Python
|
gpl-2.0
| 1,170
| 0.026496
|
from distutils.core import setup
setup(
name = 'voxgenerator',
packages = ['voxgenerator',
'voxgenerator.core',
'voxgenerator.plugin',
'voxgenerator.pipeline',
'voxgenerator.generator',
'voxgenerator.service',
'voxgenerator.control'],
version = '
|
1.0.3',
des
|
cription = 'Vox generator',
url = 'https://github.com/benoitfragit/VOXGenerator/tree/master/voxgenerator',
author = 'Benoit Franquet',
author_email = 'benoitfraubuntu@gmail.com',
scripts = ['run_voxgenerator.py', 'run_voxgenerator', 'run_voxgenerator_gui.py'],
keywords = ['voice', 'control', 'pocketsphinx'],
classifiers = ["Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"]
)
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multiclusters/data_migration/test_migrate_migrated_vm.py
|
Python
|
apache-2.0
| 791
| 0.006321
|
'''
New Integration Test for migrate between clusters
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
test_obj_dict = test_
|
state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
data_migration = test_stub.DataMigration()
def test():
data_migration.create_vm()
data_migration.migrate_vm()
test_stu
|
b.migrate_vm_to_random_host(data_migration.vm)
data_migration.vm.check()
data_migration.vm.destroy()
test_util.test_pass('Migrate migrated VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
if data_migration.vm:
try:
data_migration.vm.destroy()
except:
pass
|
django-danceschool/django-danceschool
|
danceschool/financial/migrations/0002_auto_20170425_0010.py
|
Python
|
bsd-3-clause
| 3,541
| 0.003106
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-25 00:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
('filer', '0007_auto_20161016_1055'),
('vouchers', '0001_initial'),
('financial', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='revenueitem',
name='purchasedVoucher',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vouchers.Voucher', verbose_name='Purchased voucher/gift certificate'),
),
migrations.AddField(
model_name='revenueitem',
name='registration',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Registration'),
),
migrations.AddField(
model_name='revenueitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='revenuessubmittedby', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='attachment',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expense_attachment', to='filer.File', verbose_name='Attach File (optional)'),
),
migrations.AddField(
model_name='expenseitem',
name='category',
|
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='financial.ExpenseCategory'),
),
migrations.AddField(
model_name='expenseitem',
name='event',
field=models.ForeignKey(blank=True, help_text='If this item is associated with an Event, enter it here.', null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event'),
),
|
migrations.AddField(
model_name='expenseitem',
name='eventstaffmember',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.EventStaffMember'),
),
migrations.AddField(
model_name='expenseitem',
name='eventvenue',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='venueexpense', to='core.Event'),
),
migrations.AddField(
model_name='expenseitem',
name='payToLocation',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Location'),
),
migrations.AddField(
model_name='expenseitem',
name='payToUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='payToUser', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='expenseitem',
name='submissionUser',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expensessubmittedby', to=settings.AUTH_USER_MODEL),
),
]
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/tools/objectinspector/ObjectInspector.py
|
Python
|
apache-2.0
| 17,296
| 0.003296
|
import os
import inspect
import types
from collections import OrderedDict
import json
from JumpScale import j
# api codes
# 4 function with params
# 7 ???
# 8 property
class Arg:
"""
Wrapper for argument
"""
def __init__(self, name, defaultvalue):
self.name = name
self.defaultvalue = defaultvalue
def __str__(self):
out = ""
if self.defaultvalue is not None:
out += "- %s = %s\n" % (self.name, self.defaultvalue)
else:
out += "- %s\n" % (self.name)
return out
def __repr__(self):
return self.__str__()
def attrib(name, type, doc=None, objectpath=None, filepath=None, extra=None):
"""
Helper function for codecompletion tree.
"""
return (name, type, doc, objectpath, filepath, extra)
class MethodDoc:
"""
Method documentation
"""
def __init__(self, method, name, classdoc):
self.classdoc = classdoc
self.params = []
inspected = inspect.getargspec(method)
if inspected.defaults is not None:
counter = len(inspected.defaults) - len(inspected.args)
else:
counter = -99999
for param in inspected.args:
if inspected.defaults
|
is not None and counter > -1:
defval = inspected.defaults[counter]
if j.data.types.string.check(defval):
defval = "'%s'" % defval
else:
defval = None
counter += 1
if param != "self":
self.params.append(Arg(param, defval))
if inspected.varargs is not None:
self.params.append(Arg("*%s" % inspected.varargs, None))
if inspected.keywords is not No
|
ne:
self.params.append(Arg("**%s" % inspected.keywords, None))
self.comments = inspect.getdoc(method)
if self.comments is None:
self.comments = ""
self.comments = j.data.text.strip(self.comments)
self.comments = j.data.text.wrap(self.comments, 90)
self.linenr = inspect.getsourcelines(method)[1]
self.name = name
# self.methodline=inspect.getsourcelines(method)[0][0].strip().replace("self, ","").replace("self,","").replace("self","").replace(":","")
def __str__(self):
"""
Markdown representation of the method and its arguments
"""
out = ""
param_s = ""
if len(self.params) > 0:
param_s = ", ".join([str(arg.name) + "=" + str(arg.defaultvalue)
if arg.defaultvalue else arg.name for arg in self.params])
param_s = "*%s*" % param_s
out += "#### %s(%s) \n\n" % (self.name, param_s)
if self.comments is not None and self.comments.strip() != "":
out += "```\n" + self.comments + "\n```\n\n"
return out
def __repr__(self):
return self.__str__()
class ClassDoc:
def __init__(self, classobj, location):
self.location = location
self.methods = {}
self.comments = inspect.getdoc(classobj)
module = inspect.getmodule(classobj)
self.path = inspect.getabsfile(module)
self.errors = ""
self.properties = []
for key, val in classobj.__dict__.items():
if key.startswith("_"):
continue
self.properties.append(key)
def getPath(self):
for method in self.methods:
return inspect.getabsfile(method)
def addMethod(self, name, method):
try:
source = inspect.getsource(method)
except:
self.errors += '#### Error trying to add %s source in %s.\n' % (name, self.location)
print("ADD METHOD:%s %s" % (self.path, name))
md = MethodDoc(method, name, self)
self.methods[name] = md
return source, md.params
def undersore_location(self):
return self.location.replace(".", "_")
def write(self, dest):
dest2 = j.sal.fs.joinPaths(dest, self.location.split(".")[1], "%s.md" % self.undersore_location())
destdir = j.sal.fs.getDirName(dest2)
j.sal.fs.createDir(destdir)
content = str(self)
content = content.replace("\n\n\n", "\n\n")
content = content.replace("\n\n\n", "\n\n")
content = content.replace("\n\n\n", "\n\n")
# ugly temp hack, better to do with regex
content = content.replace("\{", "$%[")
content = content.replace("\}", "$%]")
content = content.replace("{", "\{")
content = content.replace("}", "\}")
content = content.replace("$%]", "\}")
content = content.replace("$%[", "\{")
j.sal.fs.writeFile(filename=dest2, contents=content)
return dest2
def __str__(self):
C = "<!-- toc -->\n"
C += "## %s\n\n" % self.location
C += "- %s\n" % self.path
if self.properties != []:
C += "- Properties\n"
for prop in self.properties:
C += " - %s\n" % prop
C += "\n### Methods\n"
C += "\n"
if self.comments is not None:
C += "\n%s\n\n" % self.comments
keys = sorted(self.methods.keys())
for key in keys:
method = self.methods[key]
C2 = str(method)
C += C2
return C
def __repr__(self):
return self.__str__()
class ObjectInspector:
"""
functionality to inspect object structure and generate apifile
and pickled ordereddict for codecompletion
"""
def __init__(self):
self.__jslocation__ = "j.tools.objectinspector"
self.apiFileLocation = j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi", "jumpscale.api")
# j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.cfgDir, "codecompletionapi"))
self.classDocs = {}
self.visited = []
self.root = None
self.manager = None
self.logger = j.logger.get('j.tools.objectinspector')
self.jstree = OrderedDict() # jstree['j.sal']={'unix': unixobject, 'fs': fsobject}
def importAllLibs(self, ignore=[], base="%s/lib/JumpScale/" % j.dirs.base):
self.base = os.path.normpath(base)
towalk = j.sal.fs.listDirsInDir(base, recursive=False, dirNameOnly=True, findDirectorySymlinks=True)
errors = "### errors while trying to import libraries\n\n"
for item in towalk:
path = "%s/%s" % (base, item)
for modname in j.sal.fs.listDirsInDir(path, False, True, True):
if modname not in ignore:
toexec = "import JumpScale.%s.%s" % (item, modname)
try:
exec(toexec)
except Exception as e:
self.logger.error(("COULD NOT IMPORT %s" % toexec))
errors += "**%s**\n\n" % toexec
errors += "%s\n\n" % e
return errors
def raiseError(self, errormsg):
self.logger.error("ERROR:%s" % errormsg)
errormsg = errormsg.strip()
errormsg = errormsg.strip("-")
errormsg = errormsg.strip("*")
errormsg = errormsg.strip()
errormsg = "* %s\n" % errormsg
j.sal.fs.writeFile(filename="%s/errors.md" % self.dest, contents=errormsg, append=True)
def generateDocs(self, dest, ignore=[], objpath="j"):
"""
Generates documentation of objpath in destination direcotry dest
@param dest: destination directory to write documentation.
@param objpath: object path
@param ignore: modules list to be ignored during the import.
"""
self.dest = dest
self.apiFileLocation = "%s/jumpscale.api" % self.dest
j.sal.fs.writeFile("%s/errors.md" % dest, "")
j.sal.fs.createDir(self.dest)
self.errors = self.importAllLibs(ignore=ignore)
#self.errors = ''
objectLocationPath = objpath
# extract the object name (j.sal.unix ) -> unix to make a stub out of it.
objname = ''
filepath = ''
if '.' in objpath:
objname = objpath.split(".")[-1]
el
|
afbarnard/barnapy
|
barnapy/test/numpy_utils_test.py
|
Python
|
mit
| 1,182
| 0
|
"""Tests `numpy_utils.py`."""
# Copyright (c) 2021 Aubrey Barnard.
#
# This is free, open software released under the MIT license. See
# `LICENSE` for details.
import random
import unittest
import numpy.random
from .. import numpy_utils
class NumpyAsStdlibPrngTest(unittest.TestCase):
def test_random_floats(self):
seed = 0xdeadbeeffeedcafe
n_samples = 10
orig_prng = numpy.random.default_rng(seed)
expected = [orig_prng.random() for _ in range(n_samples)]
wrap_prng = numpy_utils.Numpy
|
AsStdlibPrng(
numpy.random.default_rng(seed))
actual = [wrap_prng.random() for _ in range(n_samples)]
self.assertEqual(expected, actual)
class NumpyBitGeneratorTest(unittest.TestCase):
def test_random_floats(self):
seed = 0xdeadbeeffeedcafe
n_samples = 10
old_prng = random.Random(seed)
expected = [old_prng.random() for _ in range(n_samples)]
new_prng = numpy.random.Generator(
numpy_utils.numpy_bit_generator(
|
random.Random(seed)))
actual = [new_prng.random() for _ in range(n_samples)]
self.assertEqual(expected, actual)
|
TouK/vumi
|
vumi/tests/test_connectors.py
|
Python
|
bsd-3-clause
| 14,757
| 0
|
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.connectors import (
BaseConnector, ReceiveInboundConnector, ReceiveOutboundConnector,
IgnoreMessage)
from vumi.tests.utils import LogCatcher
from vumi.worker import BaseWorker
from vumi.message import TransportUserMessage
from vumi.middleware.tests.utils import RecordingMiddleware
from vumi.tests.helpers import VumiTestCase, MessageHelper, WorkerHelper
class DummyWorker(BaseWorker):
def setup_connectors(self):
pass
def setup_worker(self):
pass
def teardown_worker(self):
pass
class BaseConnectorTestCase(VumiTestCase):
connector_class = None
def setUp(self):
self.msg_helper = self.add_helper(MessageHelper())
self.worker_helper = self.add_helper(WorkerHelper())
@inlineCallbacks
def mk_connector(self, worker=None, connector_name=None,
prefetch_count=None, middlewares=None, set
|
up=False):
if worker is None:
worker = yield self.worker_hel
|
per.get_worker(DummyWorker, {})
if connector_name is None:
connector_name = "dummy_connector"
connector = self.connector_class(worker, connector_name,
prefetch_count=prefetch_count,
middlewares=middlewares)
if setup:
yield connector.setup()
returnValue(connector)
@inlineCallbacks
def mk_consumer(self, *args, **kwargs):
conn = yield self.mk_connector(*args, **kwargs)
consumer = yield conn._setup_consumer('inbound', TransportUserMessage,
lambda msg: None)
returnValue((conn, consumer))
class TestBaseConnector(BaseConnectorTestCase):
connector_class = BaseConnector
@inlineCallbacks
def test_creation(self):
conn = yield self.mk_connector(connector_name="foo")
self.assertEqual(conn.name, "foo")
self.assertTrue(isinstance(conn.worker, BaseWorker))
@inlineCallbacks
def test_middlewares_consume(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn, consumer = yield self.mk_consumer(
worker=worker, connector_name='foo', middlewares=middlewares)
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[(str(i), 'inbound', 'foo')
for i in range(3)])
@inlineCallbacks
def test_middlewares_publish(self):
worker = yield self.worker_helper.get_worker(DummyWorker, {})
middlewares = [RecordingMiddleware(
str(i), {'consume_priority': 0, 'publish_priority': 0}, worker)
for i in range(3)]
conn = yield self.mk_connector(
worker=worker, connector_name='foo', middlewares=middlewares)
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
record = msgs[0].payload.pop('record')
self.assertEqual(record,
[[str(i), 'outbound', 'foo']
for i in range(2, -1, -1)])
@inlineCallbacks
def test_pretech_count(self):
conn, consumer = yield self.mk_consumer(prefetch_count=10)
self.assertEqual(consumer.channel.qos_prefetch_count, 10)
@inlineCallbacks
def test_setup_raises(self):
conn = yield self.mk_connector()
self.assertRaises(NotImplementedError, conn.setup)
@inlineCallbacks
def test_teardown(self):
conn, consumer = yield self.mk_consumer()
self.assertTrue(consumer.keep_consuming)
yield conn.teardown()
self.assertFalse(consumer.keep_consuming)
@inlineCallbacks
def test_paused(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(conn.paused)
consumer.unpause()
self.assertFalse(conn.paused)
@inlineCallbacks
def test_pause(self):
conn, consumer = yield self.mk_consumer()
consumer.unpause()
self.assertFalse(consumer.paused)
conn.pause()
self.assertTrue(consumer.paused)
@inlineCallbacks
def test_unpause(self):
conn, consumer = yield self.mk_consumer()
consumer.pause()
self.assertTrue(consumer.paused)
conn.unpause()
self.assertFalse(consumer.paused)
@inlineCallbacks
def test_setup_publisher(self):
conn = yield self.mk_connector(connector_name='foo')
publisher = yield conn._setup_publisher('outbound')
self.assertEqual(publisher.routing_key, 'foo.outbound')
@inlineCallbacks
def test_setup_consumer(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
self.assertTrue(consumer.paused)
self.assertEqual(consumer.routing_key, 'foo.inbound')
self.assertEqual(consumer.message_class, TransportUserMessage)
@inlineCallbacks
def test_set_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, 'dummy_endpoint')
msg = self.msg_helper.make_inbound("inbound")
msg.set_routing_endpoint('dummy_endpoint')
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_none_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_endpoint_handler('inbound', msgs.append, None)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_set_default_endpoint_handler(self):
conn, consumer = yield self.mk_consumer(connector_name='foo')
consumer.unpause()
msgs = []
conn._set_default_endpoint_handler('inbound', msgs.append)
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
self.assertEqual(msgs, [msg])
@inlineCallbacks
def test_publish_message_with_endpoint(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn._setup_publisher('outbound')
msg = self.msg_helper.make_outbound("outbound")
yield conn._publish_message('outbound', msg, 'dummy_endpoint')
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [msg])
class TestReceiveInboundConnector(BaseConnectorTestCase):
connector_class = ReceiveInboundConnector
@inlineCallbacks
def test_setup(self):
conn = yield self.mk_connector(connector_name='foo')
yield conn.setup()
conn.unpause()
with LogCatcher() as lc:
msg = self.msg_helper.make_inbound("inbound")
yield self.worker_helper.dispatch_inbound(msg, 'foo')
[msg_log] = lc.messages()
self.assertTrue(msg_log.startswith("No inbound handler for 'foo'"))
with LogCatcher() as lc:
event = self.msg_helper.make_ack()
yield self.worker_helper.dispatch_event(event, 'foo')
[event_log] = lc.messages()
self.assertTrue(event_log.startswith("No event handler for 'foo'"))
msg = self.msg_helper.make_outbound("outbound")
yield conn.publish_outbound(msg)
msgs = self.worker_helper.get_dispatched_outbound('foo')
self.assertEqual(msgs, [ms
|
hpfn/charcoallog
|
charcoallog/investments/apps.py
|
Python
|
gpl-3.0
| 265
| 0
|
from django.apps import AppConfig
class InvestmentsConfig(AppConfig
|
):
name = 'charcoallog.investments'
def ready(self):
# using @receiver decorator
# do not optimize
|
import !!!
import charcoallog.investments.signals # noqa: F401
|
edisonlz/fruit
|
web_project/base/site-packages/docutils/frontend.py
|
Python
|
apache-2.0
| 33,065
| 0.000726
|
# $Id: frontend.py 6154 2009-10-05 19:08:10Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
* `OptionParser`: Standard Docutils command-line processing.
* `Option`: Customized version of `optparse.Option`; validation support.
* `Values`: Runtime settings; objects are simple structs
(``object.attribute``). Supports cumulative list settings (attributes).
* `ConfigParser`: Standard Docutils config file processing.
Also exports the following functions:
* Option callbacks: `store_multiple`, `read_config_file`.
* Setting validators: `validate_encoding`,
`validate_encoding_error_handler`,
`validate_encoding_and_error_handler`, `validate_boolean`,
`validate_threshold`, `validate_colon_separated_string_list`,
`validate_dependency_file`.
* `make_paths_absolute`.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import sys
import warnings
import ConfigParser as CP
import codecs
import docutils
import docutils.utils
import docutils.nodes
import optparse
from optparse import SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
try:
new_settings = parser.get_config_file_settings(value)
except ValueError, error:
parser.error(error)
parser.values.update(new_settings, parser)
def validate_encoding(setting, value, option_parser,
config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
raise (LookupError('setting "%s": unknown encoding: "%s"'
% (setting, value)),
None, sys.exc_info()[2])
return value
def validate_encoding_error_handler(setting, value, option_parser,
|
config_parser=None, config_section=None):
|
try:
codecs.lookup_error(value)
except AttributeError: # TODO: remove (only needed prior to Python 2.3)
if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
None, sys.exc_info()[2])
except LookupError:
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", "replace", "backslashreplace", '
'"xmlcharrefreplace", and possibly others; see documentation for '
'the Python ``codecs`` module)' % value),
None, sys.exc_info()[2])
return value
def validate_encoding_and_error_handler(
setting, value, option_parser, config_parser=None, config_section=None):
"""
Side-effect: if an error handler is included in the value, it is inserted
into the appropriate place as if it was a separate setting/option.
"""
if ':' in value:
encoding, handler = value.split(':')
validate_encoding_error_handler(
setting + '_error_handler', handler, option_parser,
config_parser, config_section)
if config_parser:
config_parser.set(config_section, setting + '_error_handler',
handler)
else:
setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
validate_encoding(setting, encoding, option_parser,
config_parser, config_section)
return encoding
def validate_boolean(setting, value, option_parser,
config_parser=None, config_section=None):
if isinstance(value, unicode):
try:
return option_parser.booleans[value.strip().lower()]
except KeyError:
raise (LookupError('unknown boolean value: "%s"' % value),
None, sys.exc_info()[2])
return value
def validate_nonnegative_int(setting, value, option_parser,
config_parser=None, config_section=None):
value = int(value)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def validate_threshold(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return int(value)
except ValueError:
try:
return option_parser.thresholds[value.lower()]
except (KeyError, AttributeError):
raise (LookupError('unknown threshold: %r.' % value),
None, sys.exc_info[2])
def validate_colon_separated_string_list(
setting, value, option_parser, config_parser=None, config_section=None):
if isinstance(value, unicode):
value = value.split(':')
else:
last = value.pop()
value.extend(last.split(':'))
return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def validate_dependency_file(setting, value, option_parser,
config_parser=None, config_section=None):
try:
return docutils.utils.DependencyList(value)
except IOError:
return docutils.utils.DependencyList(None)
def validate_strip_class(setting, value, option_parser,
config_parser=None, config_section=None):
if config_parser: # validate all values
class_values = value
else: # just validate the latest value
class_values = [value[-1]]
for class_value in class_values:
normalized = docutils.nodes.make_id(class_value)
if class_value != normalized:
raise ValueError('invalid class value %r (perhaps %r?)'
% (class_value, normalized))
return value
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwd()
for key in keys:
if key in pathdict:
value = pathdict[key]
if isinstance(value, list):
value = [make_one_path_absolute(base_path, path)
for path in value]
elif value:
value = make_one_path_absolute(base_path, value)
pathdict[key] = value
def make_one_path_absolute(base_path, path):
return os.path.abspath(os.path.join(base_path, path))
class Values(optparse.Values):
"""
Updates list attributes by extension rather than by replacement.
Works in conjunction with the `OptionParser.lists` instance attribute.
"""
def __init__(self, *args, **kwargs):
optparse.Values.__init__(self, *args, **kwargs)
if (not hasattr(self, 'record_dependencies')
or self.record_dependencies is None):
# Set up dependency list, in case it is needed.
self.record_dependencies = docutils.utils.DependencyList()
def update(self, other_dict, option_parser):
if isinstance(other_dict, Values):
other_dict = other_dict.__dict__
other_dict = other_dict.copy()
for setting in option_parser.lists.keys():
if (hasattr(self, setting) and setting in other_dict):
value =
|
ankutty/OCR-Tesseract
|
ocr_app.py
|
Python
|
apache-2.0
| 1,764
| 0.00907
|
import os
from flask import Flask, render_template, request
from PIL import Image
import sys
import pyocr
import pyocr.builders
import re
import json
__author__ = 'K_K_N'
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
def ocr(image_file):
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
# The tools are returned in the recommended order of usage
tool = tools[0]
#print("Will use tool '%s'" % (tool.get_name()))
# Ex: Will use tool 'libtesseract'
langs = tool.get_available_languages()
#print("Available languages: %s" % ", ".join(langs))
lang = langs[1]
#print("Will use lang '%s'" % (lang))
txt = tool.image_to_string(
Image.open(image_file),
lang=lang,
builder=pyocr.builders.TextBuilder()
)
ektp_no = re.search( r'[?:nik\s*:\s*](\d{1,20})\s*', txt, re.I)
#print ektp_no
#if ektp_no:
# print "ektp_no.group() : ", ektp_no.group()
data = {}
data['ektp'] = ektp_no.group().strip()
return json.dump
|
s(data)
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/upload", methods=['POST'])
def upload():
target = os.path.join(APP_ROOT, 'images/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
for file in request.files.getlist
|
("file"):
print(file)
filename = file.filename
destination = "/".join([target, filename])
print(destination)
file.save(destination)
#Return JSON
#print txt
#file.delete(destination)
return ocr(destination)
#return json.dumps(txt)
if __name__ == "__main__":
app.run(port=4555, debug=True)
|
shishaochen/TensorFlow-0.8-Win
|
third_party/grpc/src/python/grpcio/tests/unit/_links/_lonely_invocation_link_test.py
|
Python
|
apache-2.0
| 3,549
| 0.001691
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of
|
conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this softwar
|
e without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test of invocation-side code unconnected to an RPC server."""
import unittest
from grpc._adapter import _intermediary_low
from grpc._links import invocation
from grpc.framework.interfaces.links import links
from tests.unit.framework.common import test_constants
from tests.unit.framework.interfaces.links import test_cases
from tests.unit.framework.interfaces.links import test_utilities
_NULL_BEHAVIOR = lambda unused_argument: None
class LonelyInvocationLinkTest(unittest.TestCase):
def testUpAndDown(self):
channel = _intermediary_low.Channel('nonexistent:54321', None)
invocation_link = invocation.invocation_link(
channel, 'nonexistent', None, {}, {})
invocation_link.start()
invocation_link.stop()
def _test_lonely_invocation_with_termination(self, termination):
test_operation_id = object()
test_group = 'test package.Test Service'
test_method = 'test method'
invocation_link_mate = test_utilities.RecordingLink()
channel = _intermediary_low.Channel('nonexistent:54321', None)
invocation_link = invocation.invocation_link(
channel, 'nonexistent', None, {}, {})
invocation_link.join_link(invocation_link_mate)
invocation_link.start()
ticket = links.Ticket(
test_operation_id, 0, test_group, test_method,
links.Ticket.Subscription.FULL, test_constants.SHORT_TIMEOUT, 1, None,
None, None, None, None, termination, None)
invocation_link.accept_ticket(ticket)
invocation_link_mate.block_until_tickets_satisfy(test_cases.terminated)
invocation_link.stop()
self.assertIsNot(
invocation_link_mate.tickets()[-1].termination,
links.Ticket.Termination.COMPLETION)
def testLonelyInvocationLinkWithCommencementTicket(self):
self._test_lonely_invocation_with_termination(None)
def testLonelyInvocationLinkWithEntireTicket(self):
self._test_lonely_invocation_with_termination(
links.Ticket.Termination.COMPLETION)
if __name__ == '__main__':
unittest.main()
|
seba-1511/nnexp
|
examples/mnist_simple.py
|
Python
|
apache-2.0
| 293
| 0.006826
|
#!/usr/bin/env python
import numpy as np
import torch as th
from torchvision import datasets, transforms
from nnexp import learn
|
if __name__ == '__main__':
dataset = datasets.MNIST('./data', train=True, download=True, transform=transforms.ToTensor())
learn('mnist_simple', data
|
set)
|
piratica/ptf
|
vulnerability-analysis/arachni.py
|
Python
|
gpl-3.0
| 614
| 0.016287
|
#!/usr/bin/env python
#########################################
# Installation module for arachni
################################
|
#########
# AUTHOR OF MODULE NAME
AUTHOR="Nathan Underwood (sai nate)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="Website / webapp vulnerability scanner."
# INSTALLATION TYPE
# OPTIONS GIT, SVN, FILE, DOWNLOAD
INSTALL_TYPE="GIT"
#LOCATION OF THE FILE OR GIT / SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/Arachni/arachni.git"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="arachni"
# DEPENDS FOR DEB
|
IAN INSTALLS
DEBIAN=""
#COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
|
SebastianoF/LabelsManager
|
nilabels/definitions.py
|
Python
|
mit
| 1,452
| 0.006198
|
import os
__version__ = 'v0.0.7' # update also in setup.py
root_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
info = {
"name": "NiLabels",
"version": __version__,
"description": "",
"repository": {
|
"type": "git",
"url": ""
},
"author": "Sebastiano Ferraris",
"dependencies": {
# requirements.txt automatically generated using pipreqs
"python requirements" : "{0}/requirements.txt".format(root_dir)
}
}
definition_template = """ A template is the average, computed with a chose protocol, of a series of images acquisition
o
|
f the same anatomy, or in genreral of different objects that share common features.
"""
definition_atlas = """ An atlas is the segmentation of the template, obtained averaging with a chosen protocol,
the series of segmentations corresponding to the series of images acquisition that generates the template.
"""
definition_label = """ A segmentation assigns each region a label, and labels
are represented as subset of voxel with the same positive integer value.
"""
nomenclature_conventions = """ pfi_xxx = path to file xxx, \npfo_xxx = path to folder xxx,
\nin_xxx = input data structure xxx, \nout_xxx = output data structure xxx, \nz_ : prefix to temporary files and folders,
\nfin_ : file name.
"""
|
gisce/primestg
|
primestg/ziv_service.py
|
Python
|
agpl-3.0
| 959
| 0.005214
|
from requests import post
import io
import base64
class ZivService(object):
def __init__(self, cnc_url, user=None, password=None, sync=True):
self.cnc_url = cnc_url
self.sync = sync
self.auth = None
if user and password:
self.auth = (user,password)
def send_cycle(self, filename, cycle_filedata):
"""Send a cycle file to the concentrator service
Keyword arguments:
filename -- the name of our file (doesn'
|
t matter)
cycle_filedata -- the file to send, encoded as a base64 string
"""
filecontent = base64.b64decode(cycle_filedata)
url = self.cnc_url + ('/' if (self.cnc_url[-1] != '/') else '') +'cct/cycles/'
result = None
if self.auth:
|
result = post(url, files={'file': (filename, filecontent)}, auth=self.auth)
else:
result = post(url, files={'file': (filename, filecontent)})
return result
|
rmcintosh/ipy
|
setup.py
|
Python
|
mit
| 155
| 0
|
from setuptools import setup
setup(
name='ipy',
packages=['ipy'],
include_package_data=True
|
,
install_requires=[
'flask'
|
],
)
|
k0001/python-libmemcached
|
setup.py
|
Python
|
bsd-3-clause
| 517
| 0.040619
|
#!/usr/bin/env python
from setuptools import setup, Extension
setup(
name = "python-libmemcached",
version
|
= "0.17.0",
description="python memcached client wrapped on libmemcached",
maintainer="subdragon",
maintainer_email="subdragon@gmail.com",
requires = ['pyrex'],
# This assumes that libmemcache is installed with base /usr/local
ext_modules=[Extension('cmemcached', ['cmemcached.pyx'],
libraries=['memcached'],
)],
test_suite="cmemcached_test",
)
| |
ijat/Hotspot-PUTRA-Auto-login
|
PyInstaller-3.2/PyInstaller/hooks/hook-sysconfig.py
|
Python
|
gpl-3.0
| 1,238
| 0.002423
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# The 'sysconfig' module requires Makefile and pyconfig.h files from
# Python installation. 'sysconfig' parses these files to get some
# information from th
|
em.
# TODO Verify that bundling Makefile and pyconfig.h is still required for Python 3.
import sysconfig
import os
from PyInstaller.utils.hooks import relpath_to_config_or_make
_CONFIG_H = sysconfig.get_config_h_filename()
if hasattr(sysconfig, 'get_makefile_filename'):
# sysconfig.get_makefile_filename is missing in Python < 2.7.9
_MAKEFILE = sysconfig.get_makefile_filename()
else:
_MAKEFILE = sysconfig._get_makefile_filename()
datas = [(_CONFIG_H, relpath_
|
to_config_or_make(_CONFIG_H))]
# The Makefile does not exist on all platforms, eg. on Windows
if os.path.exists(_MAKEFILE):
datas.append((_MAKEFILE, relpath_to_config_or_make(_MAKEFILE)))
|
mick-d/nipype
|
nipype/interfaces/fsl/tests/test_auto_Level1Design.py
|
Python
|
bsd-3-clause
| 1,023
| 0.012708
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import Level1Design
def test_Level1Design_inputs():
input_map = dict(bases=dict(mandatory=True,
),
contrasts=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interscan_interval=dict(mandatory=True,
),
model_serial_correlations=dict(mandatory=True,
),
orthogonalization=dict(),
session_info=dict(mandatory=True,
),
)
inputs = Level1Design.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
|
assert getattr(inputs.traits()[key], metakey) == value
def test_Level1Design_outputs():
output_map = dict(ev_files=dict(),
fsf_files=dict(),
)
outputs = Level1Design.output_spec()
for key, metadata
|
in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
jbowens/taboo
|
wordgen/data-importer.py
|
Python
|
mit
| 1,307
| 0.007651
|
#!/usr/bin/env python
import sys, json, psycopg2, argparse
parser = argparse.ArgumentParser(description='Imports word data into the ta
|
boo database.')
parser.add_argument('--verified', dest='verified', action='store_true', help='include if these words are verified as good quality')
parser.add_argument('--source', dest='source', help='include to set the source of these imported words')
args = parser.parse_args()
CONN_STR = 'dbname=prod user=prod'
data_str
|
= '\n'.join(sys.stdin.readlines())
data = json.loads(data_str)
conn = psycopg2.connect(CONN_STR)
conn.autocommit = True
cur = conn.cursor()
count = 0
for word in data:
try:
cur.execute("INSERT INTO words (word, skipped, correct, status, source) VALUES(%s, %s, %s, %s, %s) RETURNING wid",
(word, 0, 0, 'approved' if args.verified == True else 'unverified', args.source))
wordid = cur.fetchone()[0]
prohibited_count = 0
for prohibited in data[word]:
prohibited_count = prohibited_count + 1
cur.execute("INSERT INTO prohibited_words (wid, word, rank) VALUES(%s, %s, %s)",
(wordid, prohibited, prohibited_count))
count = count + 1
except Exception as e:
print e
cur.close()
conn.close()
print 'Inserted ' + str(count) + ' words'
|
ponty/psidialogs
|
psidialogs/examples/choice.py
|
Python
|
bsd-2-clause
| 109
| 0
|
import psidialogs
s = psidialogs.choice(["1", "2", "3"], "Choose a number!")
if s is not None:
print(s)
| ||
jeremiedecock/pyarm
|
pyarm/model/muscle/fake_muscle_model.py
|
Python
|
mit
| 1,107
| 0.00543
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Jérémie DECOCK (http://www.jdhp.org)
import numpy as np
from pyarm import fig
class MuscleModel:
"Muscle model."
# CONSTANTS ###############################################################
name = 'Fake'
###########################################################################
def __init__(self):
# Init datas to plot
fig.subfig('command',
title='Command',
|
xlabel='time (s)',
ylabel='Command',
ylim=[-0.1, 1.1])
#legend=('shoulder +', 'shoulder -',
# 'elbow +', 'elbow -'))
def compute_torque(self, angles, velocit
|
ies, command):
"Compute the torque"
torque = np.zeros(2)
if len(command) > 2:
torque[0] = (command[0] - command[1])
torque[1] = (command[2] - command[3])
fig.append('command', command[0:4])
else:
torque = np.array(command)
fig.append('command', command[0:2])
return torque
|
regardscitoyens/nosdeputes.fr
|
batch/hemicycle/parse_hemicycle.py
|
Python
|
agpl-3.0
| 4,977
| 0.005231
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import bs4
import json
import re
def xml2json(s):
global timestamp
timestamp = 0
s = s.replace(u'\xa0', u' ')
soup = bs4.BeautifulSoup(s, features="lxml")
intervention_vierge = {"intervenant": "", "contexte": ""}
intervention_vierge["source"] = "https://www.assemblee-nationale.fr/dyn/15/comptes-rendus/seance/"+soup.uid.string
m = soup.metadonnees
dateseance = str(m.dateseance.string)
intervention_vierge["date"] = "%04d-%02d-%02d" % (int(dateseance[0:4]), int(dateseance[4:6]), int(dateseance[6:8]))
intervention_vierge["heure"] = "%02d:%02d" % (int(dateseance[8:10]), int(dateseance[10:12]))
intervention_vierge["session"] = str(m.session.string)[-9:].replace('-', '')
contextes = ['']
numeros_lois = None
intervenant2fonction = {}
last_titre = ''
for p in soup.find_all(['paragraphe', 'point']):
intervention = intervention_vierge.copy()
#Gestion des titres/contextes et numéros de loi
if p.name == "point" and p.texte and p.texte.get_text() and int(p['nivpoint']) < 4:
contextes = contextes[:int(p['nivpoint']) -1 ]
if not contextes:
contextes = []
contextes.append(p.texte.get_text().replace('\n', ''))
if p['valeur'] and p['valeur'][0:9] == ' (n[[o]] ':
numeros_lois = p['valeur'][9:-1].replace(' ', '')
if len(contextes) > 1:
intervention["contexte"] = contextes[0] + " > " + contextes[-1]
elif len(contextes) == 1:
intervention["contexte"] = contextes[0]
if p.name == "point":
intervention['intervention'] = "<p>"+contextes[-1]+"</p>"
if (last_titre != contextes[-1]):
printintervention(intervention)
last_titre = contextes[-1]
continue
#Gestion des interventions
if numeros_lois:
intervention['numeros_loi'] = numeros_lois
intervention["source"] += "#"+p['id_syceron']
if len(p.orateurs):
intervention["intervenant"] = p.orateurs.orateur.nom.get_text()
if p['id_mandat'] and p['id_mandat'] != "-1":
intervention["intervenant_url"] = "http://www2.assemblee-nationale.fr/deputes/fiche/OMC_"+p['id_acteur']
intervention["intervenant"] = p['id_acteur']
if p.orateurs.orateur.qualite and p.orateurs.orateur.qualite.string:
intervention['fonction'] = p.orateurs.orateur.qualite.get_text()
if not intervenant2fonction.get(intervention["intervenant"]) and intervention['fonction']:
intervenant2fonction[intervention["intervenant"]] = intervention['fonction']
elif intervention["intervenant"] == "Mme la présidente":
intervention['fonction'] = "présidente"
intervention["intervenant"] = '';
elif intervention["intervenant"] == "M le président":
intervention['fonction'] = "président"
intervention["intervenant"] = '';
else:
intervention['fonction'] = intervenant2fonction.get(intervention["intervenant"], "")
texte = "<p>"
isdidascalie = False
texte_didascalie = ""
t_string = str(p.texte)
t_string = t_string.replace('>\n', '> ')
t_string = re.sub(r' ?<\/?texte> ?', '', t_string)
t_string = t_string.replace('<italique>', '<i>')
t_string = t_string.replace('</italique>', '</i>')
t_string = t_string.replace('n<exposant>o</exposant>', 'n°')
t_string = t_string.replace('n<exposant>os</exposant>', 'n°')
t_string = t_string.replace('</i> <i>', ' ')
t_string = t_string.replace('<br/>', '</p><p>')
texte += t_string
texte += "</p>"
i = 0;
for i in re.split(' ?(<i>\([^<]*\)</i> ?)', texte):
if i[0] == ' ':
i = i[1:]
if i[-1] == ' ':
i = i[:-1]
if (i[0:3] != '<p>'):
i = '<p>' + i
if (i[-4:] != '</p>'):
i = i + '</p>'
if i.find('<p><i>') == 0:
didasc = intervention_vierge
didasc["intervention"] = i
didasc["contexte"] = intervention["contexte"]
printintervention(didasc)
else:
intervention["intervention"] = i
printintervention(intervention)
def printintervention(i):
global timestamp
if i['intervention'] == '<p></p>' or i['intervention'] == '<p> </p>':
return
|
intervenants = i['intervenant'].split(' et ')
timestamp += 10
for intervenant in intervenants:
i['timestamp'] = str(timestamp)
i['intervenant'] = intervenant
|
print(json.dumps(i))
content_file = sys.argv[1]
with open(content_file, encoding='utf-8') as f:
xml2json(f.read())
|
zerothi/sisl
|
toolbox/siesta/minimizer/_minimize.py
|
Python
|
mpl-2.0
| 13,374
| 0.002094
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from hashlib import sha256
from abc import abstractmethod
from pathlib import Path
from numbers import Real
import warnings
import logging
import numpy as np
from scipy.optimize import minimize, dual_annealing
from sisl._dispatcher import AbstractDispatch
from sisl._dispatcher import ClassDispatcher
from sisl.io import tableSile
from sisl.utils import PropertyDict
__all__ = ["BaseMinimize", "LocalMinimize", "DualAnnealingMinimize",
"MinimizeToDispatcher"]
_log = logging.getLogger("sisl_toolbox.siesta.minimize")
def _convert_optimize_result(minimizer, result):
""" Convert optimize result to conform to the scaling procedure performed """
# reverse optimized value
# and also store the normalized values (to match the gradients etc)
if minimizer.norm[0] in ("none", "identity"):
# We don't need to do anything
# We haven't scaled anything
return result
result.x_norm = result.x
result.x = minimizer.reverse_normalize(result.x)
if hasattr(result,
|
"jac"):
# transform the jacobian
# The jacobian is dM / dx with dx possibly being scaled
# S
|
o here we change multiply by dx / dv
result.jac_norm = result.jac.copy()
result.jac /= minimizer.reverse_normalize(np.ones(len(minimizer)),
with_offset=False)
return result
class BaseMinimize:
# Basic minimizer basically used for figuring out whether
# to use a local or global minimization strategy
def __init__(self, variables=(), out="minimize.dat", norm='identity'):
# ensure we have an ordered dict, for one reason or the other
self.variables = []
if variables is not None:
for v in variables:
self.add_variable(v)
self.reset(out, norm)
def reset(self, out=None, norm=None):
""" Reset data table to be able to restart """
# While this *could* be a named-tuple, we would not be able
# to override the attribute, hence we use a property dict
# same effect.
self.data = PropertyDict(x=[], y=[], hash=[])
# log
log = ""
if not out is None:
log += f" out={str(out)}"
self.out = Path(out)
if not norm is None:
log += f" norm={str(norm)}"
if isinstance(norm, str):
self.norm = (norm, 1.)
elif isinstance(norm, Real):
self.norm = ("l2", norm)
else:
self.norm = norm
_log.info(f"{self.__class__.__name__} resetting{log}")
def normalize(self, variables, with_offset=True):
if isinstance(variables, str):
# this means we grab the variable name from the attributes
# of each variable
out = np.empty(len(self.variables))
for i, v in enumerate(self.variables):
out[i] = v.normalize(v.attrs[variables], self.norm, with_offset=with_offset)
else:
out = np.empty_like(variables)
for i, v in enumerate(variables):
out[i] = self.variables[i].normalize(v, self.norm, with_offset=with_offset)
return out
def normalize_bounds(self):
return [v.normalize(v.bounds, self.norm) for v in self.variables]
def reverse_normalize(self, variables, with_offset=True):
# ensures numpy array
out = np.empty_like(variables)
for i, v in enumerate(variables):
out[i] = self.variables[i].reverse_normalize(v, self.norm, with_offset=with_offset)
return out
def __getitem__(self, key):
return self.variables[key]
@staticmethod
def get_hash(data):
return sha256(data.view(np.uint8)).hexdigest()
def add_variable(self, variable):
if self.variables.count(variable.name) != 0:
raise ValueError(f"Multiple variables with same name {variable.name}")
self.variables.append(variable)
@property
def names(self):
return [v.name for v in self.variables]
@property
def values(self):
return np.array([v.value for v in self.variables], np.float64)
def update(self, variables):
""" Update internal variables for the values """
for var, v in zip(self.variables, variables):
var.update(v)
def dict_values(self):
""" Get all vaules in a dictionary table """
return {v.name: v.value for v in self.variables}
# Define a dispatcher for converting Minimize data to some specific data
# BaseMinimize().to.skopt() will convert to an skopt.OptimizationResult structure
to = ClassDispatcher("to",
obj_getattr=lambda obj, key:
(_ for _ in ()).throw(
AttributeError((f"{obj}.to does not implement '{key}' "
f"dispatcher, are you using it incorrectly?"))
)
)
def __enter__(self):
""" Open the file and fill with stuff """
_log.debug(f"__enter__ {self.__class__.__name__}")
# check if the file exists
if self.out.is_file():
# read in previous data
# this will be "[variables, runs]"
data, header = tableSile(self.out).read_data(ret_header=True)
else:
data = np.array([])
# check if the file exists
if self.out.is_file() and data.size > 0:
nvars = data.shape[0] - 1
if nvars != len(self):
raise ValueError(f"Found old file {self.out} which contains previous data for another number of parameters, please delete or move file")
# now parse header
*header, _ = header[1:].split()
idx = []
for name in self.names:
# find index in header
for i, head in enumerate(header):
if head == name:
idx.append(i)
break
if nvars != len(idx):
print(header)
print(self.names)
print(idx)
raise ValueError(f"Found old file {self.out} which contains previous data with some variables being renamed, please correct header or move file")
# add functional value, no pivot
idx.append(len(self))
# re-arrange data (in case user swapped order of variables)
data = np.ascontiguousarray(data[idx].T)
x, y = data[:, :-1], data[:, -1]
# We populate with hashes without the functional
# That would mean we can't compare hashes between input arguments
# only make the first index a list (x.tolist() makes everything a list)
self.data.x = [xi for xi in x]
self.data.y = [yi for yi in y]
self.data.hash = list(map(self.get_hash, self.data.x))
# Re-open file (overwriting it)
# First output a few things in this file
comment = f"Created by sisl '{self.__class__.__name__}'."
header = self.names + ["metric"]
if len(self.data.x) == 0:
self._fh = tableSile(self.out, 'w').__enter__()
self._fh.write_data(comment=comment, header=header)
else:
comment += f" The first {len(self.data)} lines contains prior content."
data = np.column_stack((self.data.x, self.data.y))
self._fh = tableSile(self.out, 'w').__enter__()
self._fh.write_data(data.T, comment=comment, header=header, fmt='20.17e')
self._fh.flush()
return self
def __exit__(self, *args, **kwargs):
""" Exit routine """
self._fh.__exit__(*args, **kwargs)
# clean-up
del self._fh
def __len__(self):
return len(self.variables)
@abstractmethod
def __call__(self, variables, *args):
""" Actual running code tha
|
philpep/testinfra
|
testinfra/modules/iptables.py
|
Python
|
apache-2.0
| 2,936
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF AN
|
Y KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testinfra.modules.base import InstanceModule
class Iptables(InstanceModule):
"""Test iptables rule exists"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# support for -w argument (since 1.6.0)
# https://git.netfilter.org/iptables/commit/?id=aaa4ace72b
# cen
|
tos 6 has no support
# centos 7 has 1.4 patched
self._has_w_argument = None
def _iptables_command(self, version):
if version == 4:
iptables = "iptables"
elif version == 6:
iptables = "ip6tables"
else:
raise RuntimeError("Invalid version: %s" % version)
if self._has_w_argument is False:
return iptables
else:
return "{} -w 90".format(iptables)
def _run_iptables(self, version, cmd, *args):
ipt_cmd = "{} {}".format(self._iptables_command(version), cmd)
if self._has_w_argument is None:
result = self.run_expect([0, 2], ipt_cmd, *args)
if result.rc == 2:
self._has_w_argument = False
return self._run_iptables(version, cmd, *args)
else:
self._has_w_argument = True
return result.stdout.rstrip('\r\n')
else:
return self.check_output(ipt_cmd, *args)
def rules(self, table='filter', chain=None, version=4):
"""Returns list of iptables rules
Based on ouput of `iptables -t TABLE -S CHAIN` command
optionally takes takes the following arguments:
- table: defaults to `filter`
- chain: defaults to all chains
- version: default 4 (iptables), optionally 6 (ip6tables)
>>> host.iptables.rules()
[
'-P INPUT ACCEPT',
'-P FORWARD ACCEPT',
'-P OUTPUT ACCEPT',
'-A INPUT -i lo -j ACCEPT',
'-A INPUT -j REJECT'
'-A FORWARD -j REJECT'
]
>>> host.iptables.rules("nat", "INPUT")
['-P PREROUTING ACCEPT']
"""
cmd, args = "-t %s -S", [table]
if chain:
cmd += " %s"
args += [chain]
rules = []
for line in self._run_iptables(version, cmd, *args).splitlines():
line = line.replace("\t", " ")
rules.append(line)
return rules
|
VitalLabs/gcloud-python
|
gcloud/storage/blob.py
|
Python
|
apache-2.0
| 37,138
| 0
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with Google Cloud Storage blobs."""
import base64
import copy
import hashlib
from io import BytesIO
import json
import mimetypes
import os
import time
import httplib2
import six
from six.moves.urllib.parse import quote
from gcloud._helpers import _rfc3339_to_datetime
from gcloud._helpers import _to_bytes
from gcloud._helpers import _bytes_to_unicode
from gcloud.credentials import generate_signed_url
from gcloud.exceptions import NotFound
from gcloud.exceptions import make_exception
from gcloud.storage._helpers import _PropertyMixin
from gcloud.storage._helpers import _scalar_property
from gcloud.storage.acl import ObjectACL
from gcloud.streaming.http_wrapper import Request
from gcloud.streaming.http_wrapper import make_api_request
from gcloud.streaming.transfer import Download
from gcloud.streaming.transfer import RESUMABLE_UPLOAD
from gcloud.streaming.transfer import Upload
_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com'
class Blob(_PropertyMixin):
"""A wrapper around Cloud Storage's concept of an ``Object``.
:type name: string
:param name: The name of the blob. This corresponds to the
unique path of the object in the bucket.
:type bucket: :class:`gcloud.storage.bucket.Bucket`
:param
|
bucket: The bucket to which this blob belongs.
:type chunk_size: integer
:param
|
chunk_size: The size of a chunk of data whenever iterating (1 MB).
This must be a multiple of 256 KB per the API
specification.
"""
_chunk_size = None # Default value for each instance.
_CHUNK_SIZE_MULTIPLE = 256 * 1024
"""Number (256 KB, in bytes) that must divide the chunk size."""
def __init__(self, name, bucket, chunk_size=None, generation=None):
super(Blob, self).__init__(name=name)
self.chunk_size = chunk_size # Check that setter accepts value.
self.bucket = bucket
self._acl = ObjectACL(self)
self.generation = generation
@property
def chunk_size(self):
"""Get the blob's default chunk size.
:rtype: integer or ``NoneType``
:returns: The current blob's chunk size, if it is set.
"""
return self._chunk_size
@chunk_size.setter
def chunk_size(self, value):
"""Set the blob's default chunk size.
:type value: integer or ``NoneType``
:param value: The current blob's chunk size, if it is set.
:raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
multiple of 256 KB.
"""
if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0:
raise ValueError('Chunk size must be a multiple of %d.' % (
self._CHUNK_SIZE_MULTIPLE,))
self._chunk_size = value
@staticmethod
def path_helper(bucket_path, blob_name):
"""Relative URL path for a blob.
:type bucket_path: string
:param bucket_path: The URL path for a bucket.
:type blob_name: string
:param blob_name: The name of the blob.
:rtype: string
:returns: The relative URL path for ``blob_name``.
"""
return bucket_path + '/o/' + quote(blob_name, safe='')
@property
def acl(self):
"""Create our ACL on demand."""
return self._acl
def __repr__(self):
if self.bucket:
bucket_name = self.bucket.name
else:
bucket_name = None
return '<Blob: %s, %s>' % (bucket_name, self.name)
@property
def path(self):
"""Getter property for the URL path to this Blob.
:rtype: string
:returns: The URL path to this Blob.
"""
if not self.name:
raise ValueError('Cannot determine path without a blob name.')
return self.path_helper(self.bucket.path, self.name)
@property
def path_with_params(self):
"""Getter property for the URL path to this Blob, with version.
:rtype: tuple of ``path`` (a string) and ``params`` (a dictionary)
:returns: the URL path to this blob and a dictionary with the
generation that can be used in query_params for
connection.api_request
"""
params = {}
if self.generation is not None:
params = {'generation': self.generation}
return (self.path, params)
@property
def client(self):
"""The client bound to this blob."""
return self.bucket.client
@property
def public_url(self):
"""The public URL for this blob's object.
:rtype: `string`
:returns: The public URL for this blob.
"""
return '{storage_base_url}/{bucket_name}/{quoted_name}'.format(
storage_base_url='https://storage.googleapis.com',
bucket_name=self.bucket.name,
quoted_name=quote(self.name, safe=''))
def generate_signed_url(self, expiration, method='GET',
content_type=None,
generation=None, response_disposition=None,
response_type=None, client=None, credentials=None):
"""Generates a signed URL for this blob.
.. note::
If you are on Google Compute Engine, you can't generate a signed
URL. Follow `Issue 922`_ for updates on this. If you'd like to
be able to generate a signed URL from GCE, you can use a standard
service account from a JSON file rather than a GCE service account.
.. _Issue 922: https://github.com/GoogleCloudPlatform/\
gcloud-python/issues/922
If you have a blob that you want to allow access to for a set
amount of time, you can use this method to generate a URL that
is only valid within a certain time period.
This is particularly useful if you don't want publicly
accessible blobs, but don't want to require users to explicitly
log in.
:type expiration: int, long, datetime.datetime, datetime.timedelta
:param expiration: When the signed URL should expire.
:type method: str
:param method: The HTTP verb that will be used when requesting the URL.
:type content_type: str
:param content_type: (Optional) The content type of the object
referenced by ``resource``.
:type generation: str
:param generation: (Optional) A value that indicates which generation
of the resource to fetch.
:type response_disposition: str
:param response_disposition: (Optional) Content disposition of
responses to requests for the signed URL.
For example, to enable the signed URL
to initiate a file of ``blog.png``, use
the value
``'attachment; filename=blob.png'``.
:type response_type: str
:param response_type: (Optional) Content type of responses to requests
for the signed URL. Used to over-ride the content
type of the underlying blob/object.
:type client: :class:`gcloud.storage.client.Client` or ``NoneType``
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type credentials: :
|
TWAtGH/pilot2
|
pilot.py
|
Python
|
apache-2.0
| 4,338
| 0.001844
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, mario.lassnig@cern.ch, 2016-2017
# - Daniel Drizhuk, d.drizhuk@gmail.com, 2017
import argparse
import logging
import sys
import threading
from pilot.util.constants import SUCCESS, FAILURE, ERRNO_NOJOBS
from pilot.util.https import https_setup
from pilot.util.information import set_location
VERSION = '2017-04-04.001'
def main():
logger = logging.getLogger(__name__)
logger.info('pilot startup - version %s' % VERSION)
args.graceful_stop = threading.Event()
https_setup(args, VERSION)
if not set_location(args):
return False
logge
|
r.info('workflow: %s' % arg
|
s.workflow)
workflow = __import__('pilot.workflow.%s' % args.workflow, globals(), locals(), [args.workflow], -1)
return workflow.run(args)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d',
dest='debug',
action='store_true',
default=False,
help='enable debug logging messages')
# the choices must match in name the python module in pilot/workflow/
arg_parser.add_argument('-w',
dest='workflow',
default='generic',
choices=['generic', 'generic_hpc',
'production', 'production_hpc',
'analysis', 'analysis_hpc',
'eventservice', 'eventservice_hpc'],
help='pilot workflow (default: generic)')
# graciously stop pilot process after hard limit
arg_parser.add_argument('-l',
dest='lifetime',
default=10,
type=int,
help='pilot lifetime seconds (default: 10)')
# set the appropriate site and queue
arg_parser.add_argument('-q',
dest='queue',
required=True,
help='MANDATORY: queue name (e.g., AGLT2_TEST-condor')
# graciously stop pilot process after hard limit
arg_parser.add_argument('-j',
dest='job_label',
default='mtest',
help='job prod/source label (default: mtest)')
# SSL certificates
arg_parser.add_argument('--cacert',
dest='cacert',
default=None,
help='CA certificate to use with HTTPS calls to server, commonly X509 proxy',
metavar='path/to/your/certificate')
arg_parser.add_argument('--capath',
dest='capath',
default=None,
help='CA certificates path',
metavar='path/to/certificates/')
args = arg_parser.parse_args()
console = logging.StreamHandler(sys.stdout)
if args.debug:
logging.basicConfig(filename='pilotlog.txt', level=logging.DEBUG,
format='%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s')
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(threadName)-10s | %(name)-32s | %(funcName)-32s | %(message)s'))
else:
logging.basicConfig(filename='pilotlog.txt', level=logging.INFO,
format='%(asctime)s | %(levelname)-8s | %(message)s')
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(asctime)s | %(levelname)-8s | %(message)s'))
logging.getLogger('').addHandler(console)
trace = main()
logging.shutdown()
if not trace:
logging.getLogger(__name__).critical('pilot startup did not succeed -- aborting')
sys.exit(FAILURE)
elif trace.pilot['nr_jobs'] > 0:
sys.exit(SUCCESS)
else:
sys.exit(ERRNO_NOJOBS)
|
arugifa/website
|
website/deployment/__init__.py
|
Python
|
gpl-3.0
| 51
| 0
|
"""Colle
|
ction of helpers for online deployment."""
| |
mick-d/nipype
|
nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py
|
Python
|
bsd-3-clause
| 1,633
| 0.021433
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brainsresample import BRAINSResample
def test_BRAINSResample_inputs():
input_map = dict(args=dict(argstr='%s',
),
defaultValue=dict(argstr='--defaultValue %f',
),
deformationVolume=dict(argstr='--deformationVolume %s',
),
environ=dict(nohash=True,
usedefault=True,
),
gridSpacing=dict(argstr='--gridSpacing %s',
sep=',',
),
ignore_ex
|
ception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
interpolationMode=dict(argstr='--interpolationMode %s',
),
inverseTransform=dict(argstr='--inverseTransform ',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
pixelType=dict(argstr='--pixelType %s',
),
referenceVolume=dict(argstr='--referenceVolume %s',
),
termina
|
l_output=dict(deprecated='1.0.0',
nohash=True,
),
warpTransform=dict(argstr='--warpTransform %s',
),
)
inputs = BRAINSResample.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSResample_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSResample.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
RedHatQE/cfme_tests
|
cfme/fixtures/v2v_fixtures.py
|
Python
|
gpl-2.0
| 22,048
| 0.002812
|
import json
from collections import namedtuple
import fauxfactory
import pytest
from riggerlib import recursive_update
from widgetastic.utils import partial_match
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.fixtures.provider import setup_or_skip
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.version import Version
from cfme.utils.version import VersionPicker
from cfme.v2v.infrastructure_mapping import InfrastructureMapping as InfraMapping
FormDataVmObj = namedtuple("FormDataVmObj", ["infra_mapping_data", "vm_list"])
V2vProviders = namedtuple("V2vProviders", ["vmware_provider", "rhv_provider", "osp_provider"])
@pytest.fixture(scope="module")
def v2v_provider_setup(request, appliance, source_provider, provider):
""" Fixture to setup providers """
vmware_provider, rhv_provider, osp_provider = None, None, None
for v2v_provider in [source_provider, provider]:
if v2v_provider.one_of(VMwareProvider):
vmware_provider = v2v_provider
setup_or_skip(request, vmware_provider)
elif v2v_provider.one_of(RHEVMProvider):
rhv_provider = v2v_provider
setup_or_skip(request, rhv_provider)
elif v2v_provider.one_of(OpenStackProvider):
osp_provider = v2v_provider
setup_or_skip(request, osp_provider)
else:
pytest.skip("Provider {} is not a valid provider for v2v tests".format(provider.name))
v2v_providers = V2vProviders(vmware_provider=vmware_provider,
rhv_provider=rhv_provider,
osp_provider=osp_provider)
# Transformation method can be vddk or ssh
if hasattr(request, "param") and request.param == "SSH":
transformation_method = "SSH"
else:
transformation_method = "VDDK"
# set host credentials for Vmware and RHEV hosts
host_credentials(appliance, transformation_method, v2v_providers)
yield v2v_providers
for v2v_provider in v2v_providers:
if v2v_provider is not None:
v2v_provider.delete_if_exists(cancel=False)
def host_credentials(appliance, transformation_method, v2v_providers):
""" Sets up host credentials for vmware and rhv providers
for RHEV migration.
For migration with OSP only vmware(source) provider
host credentials need to be added.
These credentials are automatically removed once the
provider is deleted in clean up.
Args:
appliance
transformation_method : vddk or ssh to be used in configuring conversion host
v2v_providers: vmware (and rhev in case of RHV migration ) , osp not needed.
"""
provider_list = [v2v_providers.vmware_provider]
rhv_hosts = None
if v2v_providers.rhv_provider is not None:
rhv_hosts = v2v_providers.rhv_provider.hosts.all()
provider_list.append(v2v_providers.rhv_provider)
try:
for v2v_provider in provider_list:
hosts = v2v_provider.hosts.all()
for host in hosts:
host_data = [data for data in v2v_provider.data['hosts']
if data['name'] == host.name]
if not host_data:
pytest.skip("No host data")
host.update_credentials_rest(credentials=host_data[0]['credentials'])
except Exception:
logger.exception("Exception when trying to add the host credentials.")
pytest.skip("No data for hosts in providers, failed to retrieve hosts and add creds.")
# Configure conversion host for RHEV migration
if rhv_hosts is not None:
set_conversion_instance_for_rhev(appliance, transformation_method, rhv_hosts)
if v2v_providers.osp_provider is not None:
set_conversion_instance_for_osp(appliance, v2v_providers.osp_provider,
transformation_method)
def _tag_cleanup(host_obj, tag1, tag2):
"""
Clean Up Tags
Returns: Boolean True if all Tags were removed/cleaned
or False means all required Tags are present on host.
"""
def extract_tag(tag):
# Following strip will remove extra asterisk from tag assignment
return tag.category.display_name.strip(" *"), tag.display_name
valid_tags = {extract_tag(tag1), extract_tag(tag2)}
tags = host_obj.get_tags()
tags_set = set(map(extract_tag, tags))
# we always neeed 2 tags for migration, if total is less than 2
# don't bother checking what tag was it, just remove it and
# then add all required tags via add_tags() call. or if tags on host
# are not subset of valid tags, we still remove them.
if len(tags_set) < 2 or not tags_set.issubset(valid_tags):
host_obj.remove_tags(tags=tags)
return True
return False
def create_tags(appliance, transformation_method):
"""
Create tags V2V - Transformation Host * and V2V - Transformation Method
Args:
appliance:
transformation_method: VDDK/SSH
"""
# t is for True in V2V - Transformation Host * tag
tag1 = appliance.collections.categories.instantiate(
display_name="V2V - Transformation Host *"
).collections.tags.instantiate(display_name="t")
tag2 = appliance.collections.categories.instantiate(
display_name="V2V - Transformation Method"
).collections.tags.
|
instantiate(display_name=transformation_method)
return tag1, tag2
def set_conversion_instance_for_rhev(appliance, transformation_method, rhev_hosts):
"""Assigning tags to conversion host.
In 5.1
|
0 rails console commands are run to configure all the rhev hosts.
Args:
appliance:
transformation_method : vddk or ssh as per test requirement
rhev_hosts: hosts in rhev to configure for conversion
"""
for host in rhev_hosts:
# set conversion host via rails console
# Delete all prior conversion hosts otherwise it creates duplicate entries
delete_hosts = appliance.ssh_client.run_rails_command("'ConversionHost.delete_all'")
if not delete_hosts.success:
pytest.skip("Failed to delete all conversion hosts:".format(delete_hosts.output))
set_conv_host = appliance.ssh_client.run_rails_command(
"'r = Host.find_by(name:{host});\
c_host = ConversionHost.create(name:{host},resource:r);\
c_host.{method}_transport_supported = true;\
c_host.save'".format(host=json.dumps(host.name),
method=transformation_method.lower())
)
if not set_conv_host.success:
pytest.skip("Failed to set conversion hosts:".format(set_conv_host.output))
def set_conversion_instance_for_osp(appliance, osp_provider, transformation_method='vddk'):
"""
Rails console command
====================
res = Vm.find_by(name: 'my_osp_instance')
conversion_host = ConversionHost.create(name: res.name, resource: res)
conversion_host.vddk_transport_supported = true
conversion_host.save
Args:
appliance
transformation_method: vddk or ssh
osp_provider: OSP
"""
# Delete all prior conversion hosts otherwise it creates duplicate entries
delete_hosts = appliance.ssh_client.run_rails_command("'ConversionHost.delete_all'")
if not delete_hosts.success:
pytest.skip("Failed to delete all conversion hosts:".format(delete_hosts.output))
# transformation method needs to be lower case always
trans_method = transformation_method.lower()
try:
conversion_instances = osp_provider.data['conversion_instances'][trans_method]
except KeyError:
pytest.skip("No conversion instance on provider.")
for instance in conversion_instances:
set_conv_host = appliance.ssh_client.run_rails_command(
"'r = Vm.find_by(name:{vm});\
c_host = ConversionHost.create(name:r.name, resource: r);\
c_host.{method}_transport_supported = true;\
|
RaJiska/Warband-PW-Punishments-Manager
|
scripts/process_dialogs.py
|
Python
|
gpl-3.0
| 3,283
| 0.018581
|
import proces
|
s_common as pc
import process_operations as po
import module_dialogs
import module_info
from header_dialogs import *
start_states = []
end_states = []
def compile_dialog_states(processor, dialog_file):
global start_states
global end_states
unique_state_list = ["start", "party_encounter", "prisoner_liberated", "enemy_defeated", "party_relieved",
"event_triggere
|
d", "close_window", "trade", "exchange_members", "trade_prisoners", "buy_mercenaries",
"view_char", "training", "member_chat", "prisoner_chat"]
unique_state_usages = [1 for i in unique_state_list]
unique_states = dict((k, i) for i, k in enumerate(unique_state_list))
last_index = len(unique_state_list)
for entry in module_dialogs.dialogs:
end_state = entry[5]
index = unique_states.setdefault(end_state, last_index)
if index == last_index:
last_index += 1
unique_state_list.append(end_state)
unique_state_usages.append(0)
end_states.append(index)
for entry in module_dialogs.dialogs:
start_state = entry[2]
try:
index = unique_states[start_state]
unique_state_usages[index] += 1
start_states.append(index)
except KeyError:
pc.ERROR("starting dialog state '%s' has no matching ending state" % start_state)
for state, usages in zip(unique_state_list, unique_state_usages):
if not usages:
pc.ERROR("ending dialog state '%s' is not used" % state)
with open(module_info.export_path("dialog_states.txt"), "wb") as state_file:
state_file.write("".join("%s\r\n" % e for e in unique_state_list))
dialog_names = {}
def get_dialog_name(start_state, end_state, text):
global dialog_names
name = "dlga_%s:%s" % (pc.convert_to_identifier(start_state), pc.convert_to_identifier(end_state))
text_list = dialog_names.setdefault(name, [])
for i, existing_text in enumerate(text_list):
if text == existing_text:
name = "%s.%d" % (name, i + 1)
break
else:
text_list.append(text)
return name
def process_entry(processor, txt_file, entry, index):
name = get_dialog_name(entry[start_state_pos], entry[end_state_pos], entry[text_pos])
trp_pt = entry[speaker_pos]
flags = entry[flags_pos]
speaker = 0
if flags & other:
speaker = processor.process_id(trp_pt[1], "trp") << other_bits
flags ^= other
trp_pt = trp_pt[0]
if flags & party_tpl:
speaker |= processor.process_id(trp_pt, "pt")
else:
speaker |= processor.process_id(trp_pt, "trp")
speaker |= flags
output_list = ["%s %d %d " % (name, speaker, start_states[index])]
output_list.extend(processor.process_block(entry[conditions_pos], "%s conditions" % name))
output_list.append("%s " % pc.replace_spaces(entry[text_pos]) if entry[text_pos] else "NO_TEXT ")
output_list.append(" %d " % end_states[index])
output_list.extend(processor.process_block(entry[consequences_pos], "%s consequences" % name))
output_list.append("%s " % entry[voice_pos] if len(entry) > voice_pos else "NO_VOICEOVER ")
output_list.append("\r\n")
txt_file.write("".join(output_list))
export = po.make_export(data=module_dialogs.dialogs, data_name="dialogs", file_name="conversation",
header_format="dialogsfile version 2\r\n%d\r\n", process_entry=process_entry, process_list=compile_dialog_states)
|
google/megalista
|
megalista_dataflow/uploaders/google_ads/customer_match/contact_info_uploader.py
|
Python
|
apache-2.0
| 1,678
| 0.005364
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language govern
|
ing permissions and
# limitations under the License.
import apache_beam as beam
import logging
from typing import Dict, Any, List
from uploaders.google_ads.customer_match.abstract_uploader import GoogleAdsCustomerMatchAbstractUploaderDoFn
from uploaders import utils
from models.execution import DestinationType, AccountConfig
class GoogleAdsCustomerMatchContactInfoUploaderDoFn(GoogleAdsCustomerMatchAbstractUploaderDoFn):
def get_list_definition(sel
|
f, account_config: AccountConfig, destination_metadata: List[str]) -> Dict[str, Any]:
list_name = destination_metadata[0]
return {
'membership_status': 'OPEN',
'name': list_name,
'description': 'List created automatically by Megalista',
'membership_life_span': 10000,
'crm_based_user_list': {
'upload_key_type': 'CONTACT_INFO', #CONTACT_INFO, CRM_ID, MOBILE_ADVERTISING_ID
'data_source_type': 'FIRST_PARTY',
}
}
def get_row_keys(self) -> List[str]:
return ['hashed_email', 'address_info', 'hashed_phone_number']
def get_action_type(self) -> DestinationType:
return DestinationType.ADS_CUSTOMER_MATCH_CONTACT_INFO_UPLOAD
|
shteeven/conference
|
holder/test/test_appengine_api.py
|
Python
|
apache-2.0
| 1,694
| 0.002361
|
import unittest
import urllib
import logging
from google.appengine.ext import testbed
from goog
|
le.appengine.api import urlfetch
from conference import ConferenceApi
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from protorpc.remote import protojson
def init_stubs(tb):
tb.init_urlfetch_stub()
tb.init_app_identity_stub()
tb.init_blobstore_stub()
tb.init_capability_stub
|
()
tb.init_channel_stub()
tb.init_datastore_v3_stub()
tb.init_files_stub()
# tb.init_mail_stub()
tb.init_memcache_stub()
tb.init_taskqueue_stub(root_path='tests/resources')
tb.init_user_stub()
tb.init_xmpp_stub()
return tb
class AppEngineAPITest(unittest.TestCase):
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
tb = testbed.Testbed()
tb.setup_env(current_version_id='testbed.version')
tb.activate()
self.testbed = init_stubs(tb)
def testUrlfetch(self):
# response = urlfetch.fetch('http://www.google.com')
url = 'http://localhost:9000/_ah/api/conference/v1/conference'
# form_fields = {
# "name": "Albert"
# }
form_fields = ConferenceForm(name='steven')
form_data = protojson.encode_message(form_fields)
# form_data = urllib.urlencode(form_fields)
response = urlfetch.fetch(url=url, payload=form_data, method=urlfetch.POST,
headers={'Content-Type': 'application/json'})
print(dir(response))
print(response.content)
self.assertEquals(200, response.status_code)
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/tests/architecture/test_rule_016.py
|
Python
|
gpl-3.0
| 2,048
| 0.003418
|
import os
import unittest
from vsg.rules import architecture
from vsg import vhdlFile
from vsg.tests
|
import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_016_test_input.vhd'))
lExpected_require_blank = []
lExpected_require_blank.append('')
utils
|
.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_require_blank.vhd'), lExpected_require_blank)
lExpected_no_blank = []
lExpected_no_blank.append('')
utils.read_file(os.path.join(sTestDir, 'rule_016_test_input.fixed_no_blank.vhd'), lExpected_no_blank)
class test_architecture_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_016_require_blank(self):
oRule = architecture.rule_016()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'architecture')
self.assertEqual(oRule.identifier, '016')
lExpected = [7, 12, 17]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_016_require_blank(self):
oRule = architecture.rule_016()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_require_blank, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_rule_016_no_blank(self):
oRule = architecture.rule_016()
oRule.style = 'no_blank_line'
lExpected = [23]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_016_no_blank(self):
oRule = architecture.rule_016()
oRule.style = 'no_blank_line'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_no_blank, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
starforgelabs/py-korad-serial
|
koradserial.py
|
Python
|
mit
| 10,720
| 0.001679
|
""" Serial communication with Korad KA3xxxP power supplies.
The intent is to give easy access to the power supply as Python objects, eliminating the need to know
special codes.
The object supports the python `with` statement to release the serial port automatically:
from koradserial import KoradSerial
with KoradSerial('/dev/tty.usbmodemfd121') as device:
print "Model: ", device.model
print "Status: ", device.status
LICENSE: MIT
RESOURCES:
http://www.eevblog.com/forum/testgear/power-supply-ps3005d-ka3005d-rs232-protocol/
http://www.eevblog.com/forum/testgear/korad-ka3005p-io-commands/
http://sigrok.org/wiki/Velleman_PS3005D
https://gist.github.com/k-nowicki/5379272
"""
from __future__ import print_function, unicode_literals
from enum import Enum
from time import sleep
import serial
__all__ = ['KoradSerial', 'ChannelMode', 'OnOffState', 'Tracking']
class ChannelMode(Enum):
""" Represents channel modes.
These values should correspond to the values returned by the ``STATUS?`` command.
"""
constant_current = 0
constant_voltage = 1
class OnOffState(Enum):
""" Represents on/off states.
This could just as easily be done as a Boolean, but is explicit.
"""
off = 0
on = 1
class Tracking(Enum):
""" Tracking state for a multi-channel power supply.
These values should correspond to the values returned by the ``STATUS?`` command.
There seems to be conflicting information about these values.
The other values I've seen are:
* 0 - independent
* 1 - series
* 2 - parallel
* 3 - symmetric
However, I don't have a multi-channel power supply to test these.
"""
independent = 0
series = 1
parallel = 3
class Status(object):
""" Decode the KoradSerial status byte.
It appears that the firmware is a little wonky here.
SOURCE:
Taken from http://www.eevblog.com/forum/testgear/korad-ka3005p-io-commands/
Contents 8 bits in the following format
Bit Item Description
0 CH1 0=CC mode, 1=CV mode
1 CH2 0=CC mode, 1=CV mode
2, 3 Tracking 00=Independent, 01=Tracking series,11=Tracking parallel
4 Beep 0=Off, 1=On
5 Lock 0=Lock, 1=Unlock
6 Output 0=Off, 1=On
7 N/A N/A
"""
def __init__(self, status):
""" Initialize object with a KoradSerial status character.
:param status: Status value
:type status: int
"""
super(Status, self).__init__()
self.raw = status
self.channel1 = ChannelMode(status & 1)
self.channel2 = ChannelMode((status >> 1) & 1)
self.tracking = Tracking((status >> 2) & 3)
self.beep = OnOffState((status >> 4) & 1)
self.lock = OnOffState((status >> 5) & 1)
self.output = OnOffState((status >> 6) & 1)
def __repr__(self):
return "{0}".format(self.raw)
def __str__(self):
return "Channel 1: {0}, Channel 2: {1}, Tracking: {2}, Beep: {3}, Lock: {4}, Output: {5}".format(
self.channel1.name,
self.channel2.name,
self.tracking.name,
self.beep.name,
self.lock.name,
self.output.name,
)
def __unicode__(self):
return self.__str__()
def float_or_none(value):
try:
return float(value)
except (TypeError, ValueError):
return None
class KoradSerial(object):
""" Wrapper for communicating with a programmable KoradSerial KA3xxxxP power supply as a serial interface.
"""
class Channel(object):
""" Wrap a channel. """
def __init__(self, serial_, channel_number):
"""
:type serial_: KoradSerial.Serial
:type channel_number: int
"""
super(KoradSerial.Channel, self).__init__()
self.__serial = serial_
self.number = channel_number
@property
def current(self):
result = self.__serial.send_receive("ISET{0}?".format(self.number), fixed_length=6)
# There's a bug that return a 6th character of previous output.
# This has to be read and discarded otherwise it will be prepended to the next output
return float_or_none(result[:5])
@current.setter
def current(self, value):
self.__serial.send("ISET{0}:{1:05.3f}".format(self.number, value))
@property
def voltage(self):
return float_or_none(self.__serial.send_receive("VSET{0}?".format(self.number), fixed_length=5))
@voltage.setter
def voltage(self, value):
self.__serial.send("VSET{0}:{1:05.2f}".format(self.number, value))
@property
def output_current(self):
""" Retrieve this channel's current current output.
:return: Amperes
:rtype: float or None
"""
result = self.__serial.send_receive("IOUT{0}?".format(self.number), fixed_length=5)
return float_or_none(result)
@property
def output_voltage(self):
""" Retrieve this channel's current current voltage.
:return: Volts
:rtype: float or None
"""
result = self.__serial.send_receive("VOUT{0}?".format(self.number), fixed_length=5)
return float_or_none(result)
class Memory(object):
""" Wrap a memory setting. """
def __init__(self, serial_, memory_number):
super(KoradSerial.Memory, self).__init__()
self.__serial = serial_
self.number = memory_number
def recall(self):
""" Recall this memory's settings. """
self.__serial.send("RCL{0}".format(self.number))
def save(self):
""" Save the current voltage and current to this memory. """
self.__serial.send("SAV{0}".format(self.number))
class OnOffButton(object):
""" Wrap an off/off button. """
def __init__(self, serial_, on_command, off_command):
super(KoradSerial.OnOffButton, self).__init__()
self.__serial = serial_
self._on = on_command
self._off = off_command
def on(self):
self.__serial.send(self._on)
def off(self):
self.__serial.send(self._off)
class Serial(object):
""" Serial operations.
There are some quirky things in communication. They go here.
"""
def __init__(self, port, debug=False):
super(KoradSerial.Serial, self).__init__()
sel
|
f.debug = debug
self.port = serial.Serial(port, 9600, timeout=1)
def read_character(self):
c = self.port.read(1).decode('ascii')
if self.debug:
|
if len(c) > 0:
print("read: {0} = '{1}'".format(ord(c), c))
else:
print("read: timeout")
return c
def read_string(self, fixed_length=None):
""" Read a string.
It appears that the KoradSerial PSU returns zero-terminated strings.
:return: str
"""
result = []
c = self.read_character()
while len(c) > 0 and ord(c) != 0:
result.append(c)
if fixed_length is not None and len(result) == fixed_length:
break
c = self.read_character()
return ''.join(result)
def send(self, text):
if self.debug:
print("_send: ", text)
sleep(0.1)
self.port.write(text.encode('ascii'))
def send_receive(self, text, fixed_length=None):
self.send(text)
return self.read_string(fixed_length)
def __init__(self, port, debug=False):
super(KoradSerial, self).__init__()
self.__serial = KoradSerial.Serial(port, debug)
# Channels: adjust voltage and current, discover current output voltage.
self.channels = [KoradSerial.Chan
|
zdlm/conext
|
manage.py
|
Python
|
mit
| 305
| 0.003279
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODU
|
LE", "conext.settings")
from django.core.management import execute_from_command_line
import conext.startup as startup
startup.run()
|
execute_from_command_line(sys.argv)
pass
|
dslackw/slpkg
|
slpkg/health.py
|
Python
|
gpl-3.0
| 3,641
| 0.000824
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# health.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from slpkg.messages import Msg
from slpkg.__metadata__ import MetaData as _meta_
from slpkg.pkg.find import find_package
class PackageHealth:
"""Health check installed packages
"""
def __init__(self, mode):
self.mode = mode
self.meta = _meta_
self.green = _meta_.color["GREEN"]
|
self.red = _meta_.color["RED"]
self.yellow = _meta_.color["YELLOW"]
self.endc = _meta_.color["ENDC"]
self.msg = Msg
|
()
self.pkg_path = _meta_.pkg_path
self.installed = []
self.cn = 0
def packages(self):
"""Get all installed packages from /var/log/packages/ path
"""
self.installed = find_package("", self.pkg_path)
def check(self, line, pkg):
line = line.replace("\n", "")
try:
if (not line.endswith("/") and
not line.endswith(".new") and
not line.startswith("dev/") and
not line.startswith("install/") and
"/incoming/" not in line):
if not os.path.isfile(r"/" + line):
self.cn += 1
print(f"Not installed: {self.red}/{line}{self.endc} --> {pkg}")
elif not self.mode:
print(line)
except IOError:
print()
raise SystemExit()
def test(self):
"""Get started test each package and read file list
"""
self.packages()
self.cf = 0
for pkg in self.installed:
if os.path.isfile(f"{self.meta.pkg_path}{pkg}"):
self.lf = 0
with open(self.pkg_path + pkg, "r") as fopen:
for line in fopen:
if "\0" in line:
print(f"Null: {line}")
break
self.cf += 1 # count all files
self.lf += 1 # count each package files
if self.lf > 19:
self.check(line, pkg)
self.results()
def results(self):
"""Print results
"""
print()
per = int(round((float(self.cf) / (self.cf + self.cn)) * 100))
if per > 90:
color = self.green
elif per < 90 and per > 60:
color = self.yellow
elif per < 60:
color = self.red
health = f"{color}{str(per)}%{self.endc}"
self.msg.template(78)
print(f"| Total files{' ' * 7}Not installed{' ' * 40}Health")
self.msg.template(78)
print(f"| {self.cf}{' ' * (18-len(str(self.cf)))}{self.cn}{' ' * (55-len(str(self.cn)))}{health:>4}")
self.msg.template(78)
|
darthbhyrava/pywikibot-local
|
pywikibot/daemonize.py
|
Python
|
mit
| 2,017
| 0
|
# -*- coding: utf-8 -*-
"""Module to daemonize the current process on Unix."""
#
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import codecs
import os
import sys
is_daemon = False
def daemonize(close_fd=True, chdir=True, write_pid=False, redirect_std=None):
"""
Daemonize the current process.
Only works on POSIX compatible operating systems.
The process will fork to the background and return control to terminal.
@param close_fd: Close the standard streams and replace them by /dev/null
@type close_fd: bool
@param chdir: Change the current working directory to /
@type chdir: bool
@param write_pid: Write the pid to sys.argv[0] + '.pid'
@type write_pid: bool
@param redirect_std: Filename to redirect stdout and stdin to
@type redirect_std: str
"""
# Fork away
if not os.fork():
# Become session leader
os.setsid()
# Fork again to prevent the process from acquiring a
# controlling terminal
pid = os.fork()
if not pid:
global is_daemon
is_daemon = True
if close_fd:
os.close(0)
os.close(1)
os.close(2)
os.open('/dev/null', os.O_RDWR)
if redirect_std:
os.open(redirect_std,
os.O_WRONLY | os.O_APPEND | os.O_CREAT)
else:
os.dup2(0, 1)
os.dup2(1, 2)
if chdir:
os.chdir('/')
|
return
else:
# Write out the pid
path = os.path.basename(sys.argv
|
[0]) + '.pid'
with codecs.open(path, 'w', 'utf-8') as f:
f.write(str(pid))
os._exit(0)
else:
# Exit to return control to the terminal
# os._exit to prevent the cleanup to run
os._exit(0)
|
oguayasa/tobii_pro_wrapper
|
tobii_pro_wrapper/__init__.py
|
Python
|
apache-2.0
| 34
| 0
|
from .tobii_p
|
ro_wrapper
|
import *
|
pasinskim/integration
|
backend-tests/tests/test_devauth_v2.py
|
Python
|
apache-2.0
| 45,297
| 0.003378
|
# Copyright 2018 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import random
import time
from api.client import ApiClient
from common import mongo, clean_mongo
from infra.cli import CliUseradm, CliDeviceauth, CliTenantadm
import api.deviceauth as deviceauth_v1
import api.deviceauth_v2 as deviceauth_v2
import api.useradm as useradm
import api.tenantadm as tenantadm
import api.deployments as deployments
import api.inventory as inventory
import util.crypto
from common import User, Device, Authset, Tenant, \
create_user, create_tenant, create_tenant_user, \
create_random_authset, create_authset, \
get_device_by_id_data, change_authset_status
@pytest.yield_fixture(scope='function')
def clean_migrated_mongo(clean_mongo):
deviceauth_cli = CliDeviceauth()
useradm_cli = CliUseradm()
deviceauth_cli.migrate()
useradm_cli.migrate()
yield clean_mongo
@pytest.yield_fixture(scope='function')
def clean_migrated_mongo_mt(clean_mongo):
deviceauth_cli = CliDeviceauth()
useradm_cli = CliUseradm()
for t in ['tenant1', 'tenant2']:
deviceauth_cli.migrate(t)
useradm_cli.migrate(t)
yield clean_mongo
@pytest.yield_fixture(scope="function")
def user(clean_migrated_mongo):
yield create_user('user-foo@acme.com', 'correcthorse')
@pytest.yield_fixture(scope="function")
def devices(clean_migrated_mongo, user):
uc = ApiClient(useradm.URL_MGMT)
r = uc.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
devices = []
for _ in range(5):
aset = create_random_authset(utoken)
dev = Device(aset.did, aset.id_data, aset.pubkey)
devices.append(dev)
yield devices
@pytest.yield_fixture(scope="function")
def tenants_users(clean_migrated_mongo_mt):
cli = CliTenantadm()
api = ApiClient(tenantadm.URL_INTERNAL)
names = ['tenant1', 'tenant2']
tenants=[]
for n in names:
tenants.append(create_tenant(n))
for t in tenants:
for i in range(2):
user = create_tenant_user(i, t)
t.users.append(user)
yield tenants
@pytest.yield_fixture(scope="function")
def tenants_users_devices(clean_migrated_mongo_mt, tenants_users):
uc = ApiClient(useradm.URL_MGMT)
for t in tenants_users:
user = t.users[0]
r = uc.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
for _ in range(5):
aset = create_random_authset(utoken, t.tenant_token)
dev = Device(aset.did, aset.id_data, aset.pubkey, t.tenant_token)
t.devices.append(dev)
yield tenants_users
class TestPreauthBase:
def do_test_ok(self, user, tenant_token=''):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
devauthd = ApiClient(deviceauth_v1.URL_DEVICES)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# preauth device
priv, pub = util.crypto.rsa_get_keypair()
id_data = {'mac': 'pretenditsamac'}
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 201
# device appears in device list
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == 1
api_dev = api_devs[0]
assert api_dev['status'] == 'preauthorized'
assert api_dev['identity_data'] == id_data
assert len(api_dev['auth_sets']) == 1
aset = api_dev['auth_sets'][0]
assert aset['identity_data'] == id_data
assert util.crypto.rsa_compare_keys(aset['pubkey'], pub)
assert aset['status'] == 'preauthorized'
# actual device can obtain auth token
body, sighdr = deviceauth_v1.auth_req(id_data,
pub,
priv,
tenant_token)
r = devauthd.call('POST',
deviceauth_v1.URL_AUTH_REQS,
body,
headers=sighdr)
assert r.status_code == 200
# device and authset changed status to 'accepted'
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES,
path_params={'id': api_dev['id']})
api_devs = r.json()
assert len(api_devs) == 1
api_dev = api_devs[0]
assert api_dev['status'] == 'accepted'
assert len(api_dev['auth_sets']) == 1
aset = api_dev['auth_sets'][0]
assert aset['status'] == 'accepted'
def do_test_fail_duplicate(self, user, devices):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
auth=(user.name, user.pwd))
assert r.status_code == 200
utoken = r.text
# preauth duplicate device
priv, pub = util.crypto.rsa_get_keypair()
id_data = devices[0].id_data
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
deviceauth_v2.URL_DEVICES,
body)
assert r.status_code == 409
# device list is unmodified
r = devauthm.with_auth(utoken).call('GET',
deviceauth_v2.URL_DEVICES)
assert r.status_code == 200
api_devs = r.json()
assert len(api_devs) == len(devices)
# existing device has no new auth sets
existing = [d for d in api_devs if d['identity_data'] == id_data]
assert len(existing) == 1
existing = existing[0]
assert len(existing['auth_sets']) == 1
aset = existing['auth_sets'][0]
assert util.crypto.rsa_compare_keys(aset['pubkey'], devices[0].pubkey)
assert aset['status'] == 'pending'
class TestPreauth(TestPreauthBase):
def test_ok(self, user):
self.do_test_ok(user)
def test_fail_duplicate(self, user, devices):
self.do_test_fail_duplicate(user, devices)
def test_fail_bad_request(self, user):
useradmm = ApiClient(useradm.URL_MGMT)
devauthm = ApiClient(deviceauth_v2.URL_MGMT)
# log in user
r = useradmm.call('POST',
useradm.URL_LOGIN,
|
auth=(user.name, user
|
.pwd))
assert r.status_code == 200
utoken = r.text
# id data not json
priv, pub = util.crypto.rsa_get_keypair()
id_data = '{\"mac\": \"foo\"}'
body = deviceauth_v2.preauth_req(
id_data,
pub)
r = devauthm.with_auth(utoken).call('POST',
|
mjwtom/swift
|
test/dedupe/bin/remakerings.py
|
Python
|
apache-2.0
| 1,973
| 0.003548
|
#!/home/mjwtom/install/python/bin/python
# -*- coding: utf-8 -*-
import os
import subprocess
from nodes import storage_nodes as ips
def generate_rings():
print (os.environ["PATH"])
os.environ["PATH"] = '/home/mjwtom/install/python/bin' + ":" + os.environ["PATH"]
print (os.environ["PATH"])
dev = 'sdb1'
ETC_SWIFT='/etc/swift'
if not os.path.exists(ETC_SWIFT):
os.makedirs(ETC_SWIFT)
if os.path.exists(ETC_SWIFT+'/backups'):
cmd = ['rm',
'-rf',
'%s/backups' % ETC_SWIFT]
subprocess.call(cmd)
print 'current work path:%s' % os.getcwd()
os.chdir(ETC_SWIFT)
print 'change work path to:%s' % os.getcwd()
files = os.listdir(ETC_SWIFT)
for file in files:
path = ETC_SWIFT + '/' + file
if os.path.isdir(path):
continue
shotname, extentsion = os.path.splitext(file)
if (extentsion == '.builder') or (extentsion == '.gz'):
try:
os.remove(path)
except Exception as e:
print e
for builder, port in [('object.builder', 6000),
('object-1.builder', 6000),
('object-2.builder', 6000),
('container.builder', 6001),
('account.builder', 6002)]:
cmd = ['swift-ring-builder',
'%s' % builder,
'create',
'10',
'3',
'1']
subprocess.call(cmd)
|
i = 1
for ip in ips:
cmd = ['swift-ring-builder',
'%s' % builder,
'add',
'r%dz%d-%s:%d/%s' % (i, i, ip, port, dev),
'1']
subprocess.call(cmd)
i += 1
cmd = ['swift-ring
|
-builder',
'%s' % builder,
'rebalance']
subprocess.call(cmd)
if __name__ == '__main__':
generate_rings()
|
pakodekker/oceansar
|
oceansar/surfaces/balancer.py
|
Python
|
gpl-3.0
| 13,512
| 0.001998
|
from mpi4py import MPI
import numpy as np
from oceansar import utils
class OceanSurfaceBalancer(object):
""" Ocean Surface Balancer class
This class is used to access a surface from
different MPI processes so that each one is
assigned an azimuth (y) portion of the surface and
also gives access to common properties
:param surface: Full ocean surface (only owned by root process)
:param dt: Interpolation differential
:param t0: Initialization time
:param root: Rank number of surface owner
"""
def __init__(self, surface, dt, t0=0., root=0):
# MPI
self.comm = MPI.COMM_WORLD
self.size, self.rank = self.comm.Get_size(), self.comm.Get_rank()
self.root = root
# Surface
if self.rank == self.root:
if not surface:
raise ValueError('Surface is needed by root process')
self.surface = surface
# Prepare surface properties for broadcasting
surface_prop = {'Lx': self.surface.Lx,
'Ly': self.surface.Ly,
'dx': self.surface.dx,
'dy': self.surface.dy,
'Nx': self.surface.Nx,
'Ny': self.surface.Ny,
'x': self.surface.x,
'y': self.surface.y,
'wind_dir': self.surface.wind_dir,
'wind_dir_eff': self.surface.wind_dir_eff,
'wind_fetch': self.surface.wind_fetch,
'wind_U': self.surface.wind_U,
'wind_U_eff': self.surface.wind_U_eff,
'current_mag': self.surface.current_mag,
'current_dir': self.surface.current_dir,
'compute': self.surface.compute}
else:
surface_prop = None
# Broadcast & save properties
surface_prop = self.comm.bcast(surface_prop, root=self.root)
self.Lx = surface_prop['Lx']
self.Ly = surface_prop['Ly']
self.dx = surface_prop['dx']
self.dy = surface_prop['dy']
self.Nx = surface_prop['Nx']
self.Ny_full = surface_prop['Ny']
self.x = surface_prop['x']
self.y_full = surface_prop['y']
self.wind_dir = surface_prop['wind_dir']
self.wind_dir_eff = surface_prop['wind_dir_eff']
self.wind_fetch = surface_prop['wind_fetch']
self.wind_U = surface_prop['wind_U']
self.wind_U_eff = surface_prop['wind_U_eff']
self.current_mag = surface_prop['current_mag']
self.current_dir = surface_prop['current_dir']
self.compute = surface_prop['compute']
# Setup balancing (counts, displacements) for 2-D matrixes [Ny,Nx]
|
self.counts, self.displ = utils.balance_elements(
self.Ny_full, self.size)
self.counts *= self.Nx
self.displ *= self.Nx
# Process-dependent properties
self.Ny = np.int(self.counts[self.rank] / self.Nx)
self.y = np.empty(self.Ny, dtype=np.float32)
if self.rank == self.root:
y = (np.ascontiguousarray(surface.y),
(self.counts / self.Nx, self.displ / self.Nx), MPI.FLOAT)
else
|
:
y = None
self.comm.Scatterv(y, (self.y, MPI.FLOAT), root=self.root)
# INITIALIZE SURFACE
# Memory allocation (LOW (0) / HIGH (1) dt values)
if 'D' in self.compute:
self._Dx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Dy = np.empty_like(self._Dx, dtype=np.float32)
self._Dz = np.empty_like(self._Dx, dtype=np.float32)
if 'Diff' in self.compute:
self._Diffx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Diffy = np.empty_like(self._Diffx)
if 'Diff2' in self.compute:
self._Diffxx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Diffyy = np.empty_like(self._Diffxx)
self._Diffxy = np.empty_like(self._Diffxx)
if 'V' in self.compute:
self._Vx = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Vy = np.empty_like(self._Vx)
self._Vz = np.empty_like(self._Vx)
if 'A' in self.compute:
self._Ax = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self._Ay = np.empty_like(self._Ax)
self._Az = np.empty_like(self._Ax)
if 'hMTF' in self.compute:
self._hMTF = (np.empty(2 * int(self.counts[self.rank]), dtype=np.float32).
reshape(2, int(self.counts[self.rank] / self.Nx), int(self.Nx)))
self.dt = dt
self.t_l_last = -1.
self.t_h_last = -1.
self.t = t0
@property
def t(self):
return self._t
@t.setter
def t(self, value):
self._t = np.float32(value)
# Update low/high times
t_l = np.float32(np.floor(self._t / self.dt) * self.dt)
t_h = t_l + self.dt
if (t_l != self.t_l_last) or (t_h != self.t_h_last):
# Only update t_h if 'going up'
if t_l == self.t_h_last:
if 'D' in self.compute:
self._Dx[0] = self._Dx[1]
self._Dy[0] = self._Dy[1]
self._Dz[0] = self._Dz[1]
if 'Diff' in self.compute:
self._Diffx[0] = self._Diffx[1]
self._Diffy[0] = self._Diffy[1]
if 'Diff2' in self.compute:
self._Diffxx[0] = self._Diffxx[1]
self._Diffyy[0] = self._Diffyy[1]
self._Diffxy[0] = self._Diffxy[1]
if 'V' in self.compute:
self._Vx[0] = self._Vx[1]
self._Vy[0] = self._Vy[1]
self._Vz[0] = self._Vz[1]
if 'A' in self.compute:
self._Ax[0] = self._Ax[1]
self._Ay[0] = self._Ay[1]
self._Az[0] = self._Az[1]
if 'hMTF' in self.compute:
self._hMTF[0] = self._hMTF[1]
t_update = np.array([[1, t_h]])
else:
t_update = np.array([[0, t_l], [1, t_h]])
# Initialize surface properties
for t_i in t_update:
if self.rank == self.root:
self.surface.t = t_i[1]
if 'D' in self.compute:
Dx_f = (self.surface.Dx,
(self.counts, self.displ), MPI.FLOAT)
Dy_f = (self.surface.Dy,
(self.counts, self.displ), MPI.FLOAT)
Dz_f = (self.surface.Dz,
(self.counts, self.displ), MPI.FLOAT)
if 'Diff' in self.compute:
Diffx_f = (self.surface.Diffx,
(self.counts, self.displ), MPI.FLOAT)
Diffy_f = (self.surface.Diffy,
(self.counts, self.displ), MPI.FLOAT)
if 'Diff2' in self.compute:
Diffxx_f = (self.surface.Diffxx,
(self.counts, self.displ), MPI.FLOAT)
Diffyy_f = (self.surface.Diffyy,
(self.counts, self.displ), MPI.FLOAT)
Diffxy_f = (self.surface.Diffxy,
(se
|
vgrem/SharePointOnline-REST-Python-Client
|
tests/outlook_case.py
|
Python
|
mit
| 1,082
| 0.003697
|
from unittest import TestCase
from settings import settings
from office365.outlookservices.outlook_client import OutlookClient
from office365.runtime.auth.authentication_context import AuthenticationContext
class OutlookClientTestCase(TestCase):
"""SharePoint specific test case base class"""
@classmethod
def setUpClass(cls):
# Due to Outlook REST API v1.0 BasicAuth Deprecation
# (refer https://developer.microsoft.com/en-us/office/blogs/outlook-rest-api-v1-0-basicauth-deprecation/)
# NetworkCredentialContext class should be no longer utilized
# ctx_auth = NetworkCredentialContext(username=settings['user_credentials']['username'],
# password=settings['user_credentials']['password'])
ctx_auth = AuthenticationContext(url=
|
settings['tenant'])
ctx_auth.acqui
|
re_token_password_grant(client_credentials=settings['client_credentials'],
user_credentials=settings['user_credentials'])
cls.client = OutlookClient(ctx_auth)
|
freedomsponsors/www.freedomsponsors.org
|
djangoproject/bitcoin_frespo/models.py
|
Python
|
agpl-3.0
| 1,943
| 0.001544
|
from django.db import models
from django.utils import timezone
class ReceiveAddress(models.Model):
address = models.CharField(max_length=128, blank=True)
available = models.BooleanField(default=True)
@classmethod
def newAddress(cls, address):
receive_address = cls()
receive_address.address = address
receive_address.available = True
return receive_address
def use(self):
self.available = False
self.save()
class MoneySent(models.Model):
|
from_address = models.CharField(max_length=128)
to_address = models.CharField(max_length=128)
value = models.DecimalField(max_digits=16, decimal_places=8)
transaction_hash = models.CharField(max_length=128, null=True)
status = models.CharField(max_length=30)
creationDate = m
|
odels.DateTimeField()
lastChangeDate = models.DateTimeField()
CREATED = 'CREATED'
SENT = 'SENT'
CONFIRMED_IPN = 'CONFIRMED_IPN'
CONFIRMED_TRN = 'CONFIRMED_TRN'
@classmethod
def newMoneySent(cls, from_address, to_address, value):
money_sent = cls()
money_sent.from_address = from_address
money_sent.to_address = to_address
money_sent.value = value
money_sent.status = MoneySent.CREATED
money_sent.creationDate = timezone.now()
money_sent.lastChangeDate = money_sent.creationDate
return money_sent
def touch(self):
self.lastChangeDate = timezone.now()
def sent(self, transaction_hash):
self.status = MoneySent.SENT
self.transaction_hash = transaction_hash
self.touch()
self.save()
def confirm_ipn(self):
if self.status == MoneySent.CREATED or self.status == MoneySent.SENT:
self.status = MoneySent.CONFIRMED_IPN
self.touch()
self.save()
def confirm_trn(self):
self.status = MoneySent.CONFIRMED_TRN
self.touch()
self.save()
|
ikn/wvoas
|
game/level.py
|
Python
|
gpl-3.0
| 21,546
| 0.003063
|
from math import cos, sin, pi, ceil
from random import randint, random, expovariate, shuffle
import pygame as pg
from pygame import Rect
from ext import evthandler as eh
from conf import conf
from obj import Player, Star
from util import ir
import ui
random0 = lambda: 2 * random() - 1
def tile (screen, img, rect, ox = 0, oy = 0, full = None):
# get offset
if full is not None:
ox += rect[0] - full[0]
oy += rect[1] - full[1]
# draw
i_w, i_h = img.get_size()
ox %= i_w
oy %= i_h
x0, y0, w0, h0 = rect
x1, y1 = x0 + w0, y0 + h0
x = x0
while x < x1:
this_ox = ox if x == x0 else 0
w = min(i_w - this_ox, x1 - x)
y = y0
while y < y1:
this_oy = oy if y == y0 else 0
h = min(i_h - this_oy, y1 - y)
screen.blit(img, (x, y), (this_ox, this_oy, w, h))
y += h
x += w
class Level (object):
def __init__ (self, game, event_handler = None, ID = 0, cp = -1):
self.game = game
# input
if event_handler is not None:
event_handler.add_event_handlers({
pg.KEYDOWN: self.skip,
pg.MOUSEBUTTONDOWN: self.skip
})
event_handler.add_key_handlers([
(conf.KEYS_BACK, self.pause, eh.MODE_ONDOWN),
(conf.KEYS_RESET, self.reset, eh.MODE_ONDOWN),
(conf.KEYS_JUMP, self.jump, eh.MODE_ONDOWN_REPEAT, 1, 1)
] + [
(ks, [(self.move, (i,))], eh.MODE_HELD)
for i, ks in enumerate((conf.KEYS_LEFT, conf.KEYS_RIGHT))
])
w, h = conf.RES
self.centre = (w / 2, h / 2)
ww, wh = conf.WINDOW_SIZE
border = (2 * (ww + 5), 2 * (wh + 5))
self.window_bds = pg.Rect(0, 0, w, h).inflate(border)
self.clouds = []
self.load_graphics()
if event_handler is not None:
self.move_channel = game.move_channel
self.star_channel = game.star_channel
else:
self.move_channel = None
self.star_channel = None
# load first level
self.ID = None
self.init(ID, cp)
def init (self, ID = None, cp = None):
self.paused = False
self.dying = False
self.first_dying = False
self.winning = False
self.fading = False
self.particles = []
self.particle_rects = []
self.void_jitter = [conf.VOID_JITTER_X, conf.VOID_JITTER_Y, conf.VOID_JITTER_T]
self.first = True
# get level/current checkpoint
if ID is None:
# same level
ID = self.ID
if ID != self.ID:
# new level
self.ID = ID
self.current_cp = cp if cp is not None else -1
|
# clouds: randomise initial positions and velocities
self.clouds = cs = []
w, h = conf.RES
imgs = self.imgs
vx0 = conf.CLOUD_SPEED
vy0 = vx0 * conf.CLOUD_VERT_SPEED_RATIO
self.cloud_vel = [vx0 * random0(), vy0 * random0()]
vx = conf.CLOUD_MOD_SPEED_RATIO
vy = vx * conf.CLOUD_VERT_SPEED_RATIO
for
|
c in conf.CLOUDS:
c_w, c_h = imgs[c].get_size()
s = (c_w, c_h)
c_w /= 2
c_h /= 2
pos = [randint(-c_w, w - c_w), randint(-c_h, h - c_h)]
vel = [vx * random0(), vy * random0()]
cs.append((pos, vel, s))
elif cp is not None:
self.current_cp = cp
data = conf.LEVELS[ID]
# background
self.bgs = data.get('bgs', conf.DEFAULT_BGS)
# player
if self.current_cp >= 0:
p = list(data['checkpoints'][self.current_cp][:2])
s_p, s_c = conf.PLAYER_SIZE, conf.CHECKPOINT_SIZE
for i in (0, 1):
p[i] += float(s_c[i] - s_p[i]) / 2
else:
p = data['player_pos']
self.player = Player(self, p)
# window
x, y = Rect(self.to_screen(self.player.rect)).center
w, h = conf.HALF_WINDOW_SIZE
self.window = Rect(x - w, y - h, 2 * w, 2 * h)
self.old_window = self.window.copy()
# checkpoints
s = conf.CHECKPOINT_SIZE
self.checkpoints = [Rect(p + s) for p in data.get('checkpoints', [])]
# goal
self.goal = Rect(data['goal'] + conf.GOAL_SIZE)
self.goal_img = self.goal.move(conf.GOAL_OFFSET)
self.goal_img.size = self.imgs['goal'].get_size()
# stars
self.stars = [Star(self, p, [ID, i] in conf.STARS)
for i, p in enumerate(data.get('stars', []))]
if self.star_channel is not None and not all(s.got for s in self.stars):
self.star_channel.unpause()
# rects
self.all_rects = [Rect(r) for r in data.get('rects', [])]
self.all_vrects = [Rect(r) for r in data.get('vrects', [])]
self.arects = [Rect(r) for r in data.get('arects', [])]
self.update_rects()
def skip (self, evt):
if self.dying and self.dying_counter < conf.DIE_SKIP_THRESHOLD and \
not (evt.type == pg.KEYDOWN and evt.key in conf.KEYS_BACK) and \
not self.winning:
self.init()
elif conf.DEBUG and evt.type == pg.MOUSEBUTTONDOWN:
r = self.player.rect
c = self.window.center
print 'moving to', c
for i in (0, 1):
r[i] = c[i] - (r[i + 2] / 2)
self.player.old_rect = r
def pause (self, *args):
if self.move_channel is not None:
self.move_channel.pause()
if self.star_channel is not None:
self.star_channel.pause()
self.game.start_backend(ui.Paused, self)
self.paused = True
def reset (self, *args):
if not self.winning:
self.init()
def jump (self, key, mode, mods):
self.player.jump(mode == 0)
def move (self, key, mode, mods, i):
self.player.move(i)
def update_window (self):
w = self.window
wp0 = w.topleft
wp1 = w.bottomright
s = conf.RES
self.inverse_win = rs = []
for px in (0, 1, 2):
for py in (0, 1, 2):
if px == py == 1:
continue
r = [0, 0, 0, 0]
for i, p in enumerate((px, py)):
if p == 0:
r[i + 2] = wp0[i]
if p == 1:
r[i] = wp0[i]
r[i + 2] = wp1[i] - wp0[i]
elif p == 2:
r[i] = wp1[i]
r[i + 2] = s[i] - wp1[i]
if r[2] > 0 and r[3] > 0:
rs.append(Rect(r))
def get_clip (self, r1, r2, err = 0):
x01, y01, w, h = r1
x11, y11 = x01 + w, y01 + h
x02, y02, w, h = r2
x12, y12 = x02 + w, y02 + h
x0, y0 = max(x01, x02), max(y01, y02)
x1, y1 = min(x11, x12), min(y11, y12)
w, h = x1 - x0, y1 - y0
if w > err and h > err:
return (x0, y0, w, h)
def update_rects (self):
self.update_window()
# rects
self.rects = rects = []
self.draw_rects = draw = []
w = self.window
for r in self.all_rects:
c = w.clip(r)
if c:
rects.append(c)
draw.append(r)
# vrects
self.vrects = rects = []
ws = self.inverse_win
for r in self.all_vrects:
for w in ws:
c = w.clip(r)
if c:
rects.append(c)
def handle_collisions (self):
get_clip = self.get_clip
p = self.player.rect
p0 = list(p)
for r in self.rects + self.vrects + self.arects:
if get_clip(r, p):
r_x0, r_y0, w, h = r
r_x1, r_y1 = r_x0 + w, r_y0 + h
p_x0, p_y0, w, h = p
p_x1, p_y1 = p_x0 + w, p_y0 + h
x, dirn = min((p_x1 - r_x0, 0), (p_y1 - r_y0, 1),
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.