code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from distutils.core import setup
import os
PKG_VERSION = os.environ.get('PACKAGE_VERSION') or '1.11.1'
TEST_DEPS = [
'pytest<3.7', 'pytest-asyncio', 'base58'
]
setup(
name='python3-indy',
version=PKG_VERSION,
packages=['indy'],
url='https://github.com/hyperledger/indy-sdk',
license='MIT/Apache-2.0',
author='Vyacheslav Gudkov',
author_email='vyacheslav.gudkov@dsr-company.com',
description='This is the official SDK for Hyperledger Indy (https://www.hyperledger.org/projects), which provides a distributed-ledger-based foundation for self-sovereign identity (https://sovrin.org). The major artifact of the SDK is a c-callable library.',
install_requires=['base58'],
tests_require=TEST_DEPS,
extras_require={
'test': TEST_DEPS
}
)
| peacekeeper/indy-sdk | wrappers/python/setup.py | Python | apache-2.0 | 794 |
#!/usr/bin/env python
"""Tests for the linalg.isolve.gcrotmk module
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, assert_allclose, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
import numpy as np
from numpy import zeros, array, allclose
from scipy.linalg import norm
from scipy.sparse import csr_matrix, eye, rand
from scipy.sparse.linalg.interface import LinearOperator
from scipy.sparse.linalg import splu
from scipy.sparse.linalg.isolve import gcrotmk, gmres
Am = csr_matrix(array([[-2,1,0,0,0,9],
[1,-2,1,0,5,0],
[0,1,-2,1,0,0],
[0,0,1,-2,1,0],
[0,3,0,1,-2,1],
[1,0,0,0,1,-2]]))
b = array([1,2,3,4,5,6])
count = [0]
def matvec(v):
count[0] += 1
return Am*v
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
def do_solve(**kw):
count[0] = 0
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), tol=1e-14, **kw)
count_0 = count[0]
assert_(allclose(A*x0, b, rtol=1e-12, atol=1e-12), norm(A*x0-b))
return x0, count_0
class TestGCROTMK(object):
def test_preconditioner(self):
# Check that preconditioning works
pc = splu(Am.tocsc())
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
x0, count_0 = do_solve()
x1, count_1 = do_solve(M=M)
assert_equal(count_1, 3)
assert_(count_1 < count_0/2)
assert_(allclose(x1, x0, rtol=1e-14))
def test_arnoldi(self):
np.random.seed(1)
A = eye(2000) + rand(2000, 2000, density=5e-4)
b = np.random.rand(2000)
# The inner arnoldi should be equivalent to gmres
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
assert_equal(flag0, 1)
assert_equal(flag1, 1)
assert_(np.linalg.norm(A.dot(x0) - b) > 1e-3)
assert_allclose(x0, x1)
def test_cornercase(self):
np.random.seed(1234)
# Rounding error may prevent convergence with tol=0 --- ensure
# that the return values in this case are correct, and no
# exceptions are raised
for n in [3, 5, 10, 100]:
A = 2*eye(n)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
b = np.ones(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
b = np.random.rand(n)
x, info = gcrotmk(A, b, maxiter=10)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
x, info = gcrotmk(A, b, tol=0, maxiter=10)
if info == 0:
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
def test_nans(self):
A = eye(3, format='lil')
A[1,1] = np.nan
b = np.ones(3)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, tol=0, maxiter=10)
assert_equal(info, 1)
def test_truncate(self):
np.random.seed(1234)
A = np.random.rand(30, 30) + np.eye(30)
b = np.random.rand(30)
for truncate in ['oldest', 'smallest']:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate, tol=1e-4,
maxiter=200)
assert_equal(info, 0)
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
def test_CU(self):
for discard_C in (True, False):
# Check that C,U behave as expected
CU = []
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
assert_(len(CU) > 0)
assert_(len(CU) <= 6)
if discard_C:
for c, u in CU:
assert_(c is None)
# should converge immediately
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
if discard_C:
assert_equal(count_1, 2 + len(CU))
else:
assert_equal(count_1, 3)
assert_(count_1 <= count_0/2)
assert_allclose(x1, x0, atol=1e-14)
def test_denormals(self):
# Check that no warnings are emitted if the matrix contains
# numbers for which 1/x has no float representation, and that
# the solver behaves properly.
A = np.array([[1, 2], [3, 4]], dtype=float)
A *= 100 * np.nextafter(0, 1)
b = np.array([1, 1])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, ".*called without specifying.*")
xp, info = gcrotmk(A, b)
if info == 0:
assert_allclose(A.dot(xp), b)
| gertingold/scipy | scipy/sparse/linalg/isolve/tests/test_gcrotmk.py | Python | bsd-3-clause | 5,488 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import theano.tensor as T
from recurrent import RecurrentLayer
from deepy.core import neural_computation, env
class PeepholeLSTM(RecurrentLayer):
"""
Long short-term memory layer with peepholes.
"""
def __init__(self, hidden_size, init_forget_bias=1, **kwargs):
kwargs["hidden_size"] = hidden_size
super(PeepholeLSTM, self).__init__("PLSTM", ["state", "lstm_cell"], **kwargs)
self._init_forget_bias = 1
@neural_computation
def compute_new_state(self, step_inputs):
xi_t, xf_t, xo_t, xc_t, h_tm1, c_tm1 = map(step_inputs.get, ["xi", "xf", "xc", "xo", "state", "lstm_cell"])
if not xi_t:
xi_t, xf_t, xo_t, xc_t = 0, 0, 0, 0
# LSTM core step
hs = self.hidden_size
dot_h = T.dot(h_tm1, self.U)
dot_c = T.dot(h_tm1, self.C)
i_t = self.gate_activate(xi_t + dot_h[:, :hs] + self.b_i + dot_c[:, :hs])
f_t = self.gate_activate(xf_t + dot_h[:, hs:hs*2] + self.b_f + dot_c[:, hs:hs*2])
c_t = f_t * c_tm1 + i_t * self.activate(xc_t + dot_h[:, hs*2:hs*3] + self.b_c)
o_t = self.gate_activate(xo_t + dot_h[:, hs*3:hs*4] + dot_c[:, hs*2:hs*3] + self.b_o)
h_t = o_t * self.activate(c_t)
return {"state": h_t, "lstm_cell": c_t}
@neural_computation
def merge_inputs(self, input_var, additional_inputs=None):
if not additional_inputs:
additional_inputs = []
all_inputs = filter(bool, [input_var] + additional_inputs)
if not all_inputs:
return {}
last_dim_id = all_inputs[0].ndim - 1
merged_input = T.concatenate(all_inputs, axis=last_dim_id)
dot_input = T.dot(merged_input, self.W)
merged_inputs = {
"xi": dot_input[:, :, :self.hidden_size],
"xf": dot_input[:, :, self.hidden_size:self.hidden_size*2],
"xc": dot_input[:, :, self.hidden_size*2:self.hidden_size*3],
"xo": dot_input[:, :, self.hidden_size*3:self.hidden_size*4],
}
return merged_inputs
def prepare(self):
if self._input_type == "sequence":
all_input_dims = [self.input_dim] + self.additional_input_dims
else:
all_input_dims = self.additional_input_dims
summed_input_dim = sum(all_input_dims, 0)
self.output_dim = self.hidden_size
self.W = self.create_weight(summed_input_dim, self.hidden_size * 4, "W", initializer=self.outer_init)
self.U = self.create_weight(self.hidden_size, self.hidden_size * 4, "U", initializer=self.inner_init)
self.C = self.create_weight(self.hidden_size, self.hidden_size * 3, "C", initializer=self.inner_init)
self.b_i = self.create_bias(self.hidden_size, "bi")
self.b_f = self.create_bias(self.hidden_size, "bf")
self.b_f.set_value(np.ones((self.hidden_size,) * self._init_forget_bias, dtype=env.FLOATX))
self.b_c = self.create_bias(self.hidden_size, "bc")
self.b_o = self.create_bias(self.hidden_size, "bo")
if summed_input_dim > 0:
self.register_parameters(self.W, self.U, self.C,
self.b_i, self.b_f, self.b_c, self.b_o)
else:
self.register_parameters(self.U, self.C,
self.b_i, self.b_f, self.b_c, self.b_o)
PLSTM = PeepholeLSTM | zomux/deepy | deepy/layers/plstm.py | Python | mit | 3,426 |
# Cycle Monitor, Copyright (C) 2016 M.B.Grieve
# This file is part of the Cycle Monitor example application.
# Cycle Monitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Cycle Monitor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with City Monitor. If not, see <http://www.gnu.org/licenses/>.
# Contact: moray.grieve@me.com
import sys, threading, traceback
from com.jtech.scenario_printer import ScenarioDefinitionFinder
if __name__ == '__main__':
port=int(sys.argv[1])
dvname=sys.argv[2]
fields=list(sys.argv[3].split(','))
try:
ScenarioDefinitionFinder('localhost',port,dvname,fields)
r=raw_input("Press any key to stop ...")
except:
print traceback.print_exc() | moraygrieve/cycle-monitor | test/utils/scripts/printer.py | Python | gpl-3.0 | 1,129 |
from my_extension.carray_ext import CarrayOnSteroids
from my_extension.ctable import ctable
| FrancescElies/bcolz_extension_test | my_extension/__init__.py | Python | gpl-2.0 | 92 |
"""Test ModelRunScheduler and JobScheduler
"""
from unittest.mock import Mock, patch
import networkx
from pytest import fixture, raises
from smif.controller.scheduler import JobScheduler, ModelRunScheduler
from smif.model import ModelOperation, ScenarioModel, SectorModel
class EmptySectorModel(SectorModel):
def simulate(self, data):
return data
class TestModelRunScheduler():
@patch('smif.controller.scheduler.subprocess.Popen')
def test_single_modelrun(self, mock_popen):
my_scheduler = ModelRunScheduler()
my_scheduler.add('my_model_run', {
'directory': 'mock/dir',
'verbosity': 0,
'warm_start': False,
'output_format': 'local_csv'
})
mock_popen.assert_called_with(
'smif run my_model_run -d mock/dir -i local_csv',
shell=True,
stderr=-2, stdout=-1
)
def test_status_modelrun_never_added(self):
my_scheduler = ModelRunScheduler()
status = my_scheduler.get_status('my_model_run')
assert status['status'] == 'unstarted'
@patch('smif.controller.scheduler.subprocess.Popen')
def test_status_model_started(self, mock_popen):
attrs = {
'poll.return_value': None,
'communicate.return_value': (
"this is a stdout".encode('utf-8'),
),
'returncode': None
}
process_mock = Mock(**attrs)
mock_popen.return_value = process_mock
my_scheduler = ModelRunScheduler()
my_scheduler.add('my_model_run', {
'directory': 'mock/dir',
'verbosity': 0,
'warm_start': False,
'output_format': 'local_csv'
})
my_scheduler.lock = True
status = my_scheduler.get_status('my_model_run')
assert status['status'] == 'running'
@patch('smif.controller.scheduler.subprocess.Popen')
def test_status_model_done(self, mock_popen):
attrs = {
'poll.return_value': 0,
'communicate.return_value': (
"this is a stdout".encode('utf-8')
)
}
process_mock = Mock(**attrs)
mock_popen.return_value = process_mock
my_scheduler = ModelRunScheduler()
my_scheduler.add('my_model_run', {
'directory': 'mock/dir',
'verbosity': 0,
'warm_start': False,
'output_format': 'local_csv'
})
my_scheduler.lock = True
response = my_scheduler.get_status('my_model_run')
assert response['status'] == 'done'
@patch('smif.controller.scheduler.subprocess.Popen')
def test_status_model_failed(self, mock_popen):
attrs = {
'poll.return_value': 1,
'communicate.return_value': (
"this is a stdout".encode('utf-8'),
)
}
process_mock = Mock(**attrs)
mock_popen.return_value = process_mock
my_scheduler = ModelRunScheduler()
my_scheduler.add('my_model_run', {
'directory': 'mock/dir',
'verbosity': 0,
'warm_start': False,
'output_format':
'local_csv'
})
my_scheduler.lock = True
response = my_scheduler.get_status('my_model_run')
assert response['status'] == 'failed'
@patch('smif.controller.scheduler.subprocess.Popen')
def test_status_model_stopped(self, mock_popen):
attrs = {
'poll.return_value': None,
'communicate.return_value': (
"this is a stdout".encode('utf-8'),
)
}
process_mock = Mock(**attrs)
mock_popen.return_value = process_mock
my_scheduler = ModelRunScheduler()
my_scheduler.add('my_model_run', {
'directory': 'mock/dir',
'verbosity': 0,
'warm_start': False,
'output_format':
'local_csv'
})
my_scheduler.lock = True
my_scheduler.kill('my_model_run')
response = my_scheduler.get_status('my_model_run')
assert response['status'] == 'stopped'
class TestJobScheduler():
@fixture
def job_graph(self):
G = networkx.DiGraph()
a_model = ScenarioModel('a')
G.add_node(
'a',
model=a_model,
operation=ModelOperation.BEFORE_MODEL_RUN,
modelrun_name='test',
current_timestep=1,
timesteps=[1],
decision_iteration=0
)
b_model = EmptySectorModel('b')
G.add_node(
'b',
model=b_model,
operation=ModelOperation.SIMULATE,
modelrun_name='test',
current_timestep=1,
timesteps=[1],
decision_iteration=0
)
G.add_edge('a', 'b')
return G
@fixture
def scheduler(self, empty_store):
empty_store.write_model_run({
'name': 'test',
'narratives': {},
'scenarios': {},
'sos_model': 'test_sos_model'
})
empty_store.write_sos_model({
'name': 'test_sos_model',
'scenario_dependencies': [],
'model_dependencies': []
})
scheduler = JobScheduler()
scheduler.store = empty_store
return scheduler
def test_add(self, job_graph, scheduler):
job_id, err = scheduler.add(job_graph)
print(err)
assert err is None
assert scheduler.get_status(job_id)['status'] == 'done'
def test_default_status(self):
scheduler = JobScheduler()
assert scheduler.get_status(0)['status'] == 'unstarted'
def test_add_cyclic(self, job_graph, scheduler):
job_graph.add_edge('b', 'a')
job_id, err = scheduler.add(job_graph)
assert isinstance(err, NotImplementedError)
assert scheduler.get_status(job_id)['status'] == 'failed'
def test_kill_fails(self, job_graph, scheduler):
job_id, err = scheduler.add(job_graph)
assert err is None
with raises(NotImplementedError):
scheduler.kill(job_id)
def test_unknown_operation(self, job_graph, scheduler):
model = EmptySectorModel('c')
job_graph.add_node(
'c',
model=model,
operation='unknown_operation',
modelrun_name='test',
current_timestep=1,
timesteps=[1],
decision_iteration=0
)
job_id, err = scheduler.add(job_graph)
assert isinstance(err, ValueError)
assert scheduler.get_status(job_id)['status'] == 'failed'
| willu47/smif | tests/controller/test_scheduler.py | Python | mit | 6,693 |
"""
Utilites for working with tarfiles
"""
import os
from cStringIO import StringIO
import tarfile
def create_inmemory_tarfile(compression='bz2'):
"""
Creates a tar archive stored in a StringIO object.
"""
tar_bytestr = StringIO()
tar = tarfile.open(fileobj=tar_bytestr, mode='w:'+compression)
return tar_bytestr, tar
def archive_dir(path, compression='bz2'):
archive_bytes, tar = create_inmemory_tarfile(compression=compression)
for filename in os.listdir(path):
tar.add(os.path.join(path, filename), arcname=filename)
tar.close()
return archive_bytes.getvalue()
def add_bytes_to_file(tar, bytestr, filename):
"""
Adds bytestr to tar archive at filename
"""
tarinfo = tarfile.TarInfo(filename)
tarinfo.size = len(bytestr)
tar.addfile(tarinfo, StringIO(bytestr))
def extract_tar_from_bytes(bytestr, dest_dir):
tar = tarfile.open(fileobj=StringIO(bytestr), mode='r:*')
tar.extractall(dest_dir)
| suprzer0/python_utils | python_utils/tar.py | Python | mit | 989 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install, execute, and parse results from YCSB.
YCSB (the Yahoo! Cloud Serving Benchmark) is a common method of comparing NoSQL
database performance.
https://github.com/brianfrankcooper/YCSB
For PerfKitBenchmarker, we wrap YCSB to:
* Pre-load a database with a fixed number of records.
* Execute a collection of workloads under a staircase load.
* Parse the results into PerfKitBenchmarker samples.
The 'YCSBExecutor' class handles executing YCSB on a collection of client VMs.
Generally, clients just need this class. For example, to run against
HBase 1.0:
>>> executor = ycsb.YCSBExecutor('hbase-10')
>>> samples = executor.LoadAndRun(loader_vms)
By default, this runs YCSB workloads A and B against the database, 32 threads
per client VM, with an initial database size of 1GB (1k records).
Each workload runs for at most 30 minutes.
"""
import bisect
import collections
import copy
import csv
import io
import itertools
import math
import re
import logging
import operator
import os
import posixpath
from perfkitbenchmarker import data
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import maven
from xml.etree import ElementTree
FLAGS = flags.FLAGS
YCSB_TAR_URL = ('https://github.com/brianfrankcooper/YCSB/archive/'
'5ab241210adbb2d10fd89e755c433dd99cddb5ba.tar.gz')
YCSB_BUILD_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'ycsb-build')
YCSB_DIR = posixpath.join(vm_util.VM_TMP_DIR, 'ycsb')
YCSB_EXE = posixpath.join(YCSB_DIR, 'bin', 'ycsb')
_DEFAULT_PERCENTILES = 50, 75, 90, 95, 99, 99.9
# Binary operators to aggregate reported statistics.
# Statistics with operator 'None' will be dropped.
AGGREGATE_OPERATORS = {
'Operations': operator.add,
'RunTime(ms)': max,
'Return=0': operator.add,
'Return=-1': operator.add,
'Return=-2': operator.add,
'Return=-3': operator.add,
'AverageLatency(ms)': None, # Requires both average and # of ops.
'Throughput(ops/sec)': operator.add,
'95thPercentileLatency(ms)': None, # Calculated across clients.
'99thPercentileLatency(ms)': None, # Calculated across clients.
'MinLatency(ms)': min,
'MaxLatency(ms)': max}
flags.DEFINE_boolean('ycsb_histogram', True, 'Include individual '
'histogram results from YCSB (will increase sample '
'count).')
flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples '
'from pre-populating database.')
flags.DEFINE_boolean('ycsb_include_individual_results', False,
'Include results from each client VM, rather than just '
'combined results.')
flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.',
lower_bound=1)
flags.DEFINE_list('ycsb_workload_files', [],
'Path to YCSB workload file to use during *run* '
'stage only. Comma-separated list')
flags.DEFINE_list('ycsb_load_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_run_parameters', [],
'Passed to YCSB during the load stage. Comma-separated list '
'of "key=value" pairs.')
flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per '
'loader during the benchmark run. Specify a list to vary the '
'number of clients.')
flags.DEFINE_integer('ycsb_preload_threads', 1, 'Number of threads per '
'loader during the initial data population stage.')
flags.DEFINE_integer('ycsb_record_count', 1000000, 'Pre-load with a total '
'dataset of records total.')
flags.DEFINE_integer('ycsb_operation_count', 1000000, 'Number of operations '
'*per client VM*.')
flags.DEFINE_integer('ycsb_timelimit', 1800, 'Maximum amount of time to run '
'each workload / client count combination. Set to 0 for '
'unlimited time.')
def CreateProxyElement(proxy_type, proxy):
proxy = re.sub(r'^https?:\/\/', '', proxy)
host_addr, port_number = proxy.split(":")
proxy_element = ElementTree.Element("proxy")
active = ElementTree.SubElement(proxy_element, "active")
active.text = "true"
protocol = ElementTree.SubElement(proxy_element, "protocol")
protocol.text = proxy_type
host = ElementTree.SubElement(proxy_element, "host")
host.text = host_addr
port = ElementTree.SubElement(proxy_element, "port")
port.text = port_number
return proxy_element
def _GetThreadsPerLoaderList():
"""Returns the list of client counts per VM to use in staircase load."""
return [int(thread_count) for thread_count in FLAGS.ycsb_threads_per_client]
def _GetWorkloadFileList():
"""Returns the list of workload files to run.
Returns:
In order of preference:
* The argument to --ycsb_workload_files.
* Bundled YCSB workloads A and B.
"""
if FLAGS.ycsb_workload_files:
return FLAGS.ycsb_workload_files
return [data.ResourcePath(os.path.join('ycsb', workload))
for workload in ('workloada', 'workloadb')]
def CheckPrerequisites():
for workload_file in _GetWorkloadFileList():
if not os.path.exists(workload_file):
raise IOError('Missing workload file: {0}'.format(workload_file))
def _Install(vm):
"""Installs the YCSB package on the VM."""
vm.Install('maven')
vm.Install('openjdk7')
vm.Install('curl')
vm.RemoteCommand(('mkdir -p {0} && curl -L {1} | '
'tar -C {0} --strip-components=1 -xzf -').format(
YCSB_BUILD_DIR, YCSB_TAR_URL))
proxy_nodes = []
if FLAGS.http_proxy:
proxy_nodes.append(CreateProxyElement('http', FLAGS.http_proxy))
if FLAGS.https_proxy:
proxy_nodes.append(CreateProxyElement('https', FLAGS.http_proxy))
if proxy_nodes:
settings_file = ".m2/settings.xml"
root = ElementTree.Element('settings')
proxies = ElementTree.SubElement(root, 'proxies')
proxies.extend(proxy_nodes)
vm.RemoteCommand("mkdir -p $HOME/.m2")
vm.RemoteCommand("touch $HOME/%s" % settings_file)
vm.RemoteCommand("echo -e '%s' | sudo tee %s" % (
ElementTree.tostring(root), settings_file))
vm.RemoteCommand(('cd {0} && {1}/bin/mvn clean package '
'-DskipTests -Dcheckstyle.skip=true').format(
YCSB_BUILD_DIR, maven.MVN_DIR))
tar = posixpath.join(
YCSB_BUILD_DIR, 'distribution', 'target', 'ycsb-*.tar.gz')
vm.RemoteCommand(('mkdir -p {0} && tar --strip-components 1 -C {0} '
'-xf {1}').format(YCSB_DIR, tar))
def YumInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the YCSB package on the VM."""
_Install(vm)
def ParseResults(ycsb_result_string, data_type='histogram'):
"""Parse YCSB results.
Example input:
YCSB Client 0.1
Command line: -db com.yahoo.ycsb.db.HBaseClient -P /tmp/pkb/workloada
[OVERALL], RunTime(ms), 1800413.0
[OVERALL], Throughput(ops/sec), 2740.503428935472
[UPDATE], Operations, 2468054
[UPDATE], AverageLatency(us), 2218.8513395574005
[UPDATE], MinLatency(us), 554
[UPDATE], MaxLatency(us), 352634
[UPDATE], 95thPercentileLatency(ms), 4
[UPDATE], 99thPercentileLatency(ms), 7
[UPDATE], Return=0, 2468054
[UPDATE], 0, 398998
[UPDATE], 1, 1015682
[UPDATE], 2, 532078
...
Args:
ycsb_result_string: str. Text output from YCSB.
data_type: Either 'histogram' or 'timeseries'.
Returns:
A dictionary with keys:
client: containing YCSB version information.
command_line: Command line executed.
groups: list of operation group descriptions, each with schema:
group: group name (e.g., update, insert, overall)
statistics: dict mapping from statistic name to value
histogram: list of (ms_lower_bound, count) tuples, e.g.:
[(0, 530), (19, 1)]
indicates that 530 ops took between 0ms and 1ms, and 1 took between
19ms and 20ms. Empty bins are not reported.
"""
fp = io.BytesIO(ycsb_result_string)
client_string = next(fp).strip()
if not client_string.startswith('YCSB Client 0.'):
raise IOError('Unexpected header: {0}'.format(client_string))
command_line = next(fp).strip()
if not command_line.startswith('Command line:'):
raise IOError('Unexpected second line: {0}'.format(command_line))
# Some databases print additional output to stdout.
# YCSB results start with [<OPERATION_NAME>];
# filter to just those lines.
def LineFilter(line):
return re.search(r'^\[[A-Z]+\]', line) is not None
lines = itertools.ifilter(LineFilter, fp)
r = csv.reader(lines)
by_operation = itertools.groupby(r, operator.itemgetter(0))
result = collections.OrderedDict([
('client', client_string),
('command_line', command_line),
('groups', collections.OrderedDict())])
for operation, lines in by_operation:
operation = operation[1:-1].lower()
op_result = {
'group': operation,
data_type: [],
'statistics': {}
}
for _, name, val in lines:
name = name.strip()
val = val.strip()
# Drop ">" from ">1000"
if name.startswith('>'):
name = name[1:]
val = float(val) if '.' in val else int(val)
if name.isdigit():
if val:
op_result[data_type].append((int(name), val))
else:
if '(us)' in name:
name = name.replace('(us)', '(ms)')
val /= 1000.0
op_result['statistics'][name] = val
result['groups'][operation] = op_result
return result
def _CumulativeSum(xs):
total = 0
for x in xs:
total += x
yield total
def _WeightedQuantile(x, weights, p):
"""Weighted quantile measurement for an ordered list.
This method interpolates to the higher value when the quantile is not a direct
member of the list. This works well for YCSB, since latencies are floored.
Args:
x: List of values.
weights: List of numeric weights.
p: float. Desired quantile in the interval [0, 1].
Returns:
float.
Raises:
ValueError: When 'x' and 'weights' are not the same length, or 'p' is not in
the interval [0, 1].
"""
if len(x) != len(weights):
raise ValueError('Lengths do not match: {0} != {1}'.format(
len(x), len(weights)))
if p < 0 or p > 1:
raise ValueError('Invalid quantile: {0}'.format(p))
n = sum(weights)
target = n * float(p)
cumulative = list(_CumulativeSum(weights))
# Find the first cumulative weight >= target
i = bisect.bisect_left(cumulative, target)
if i == len(x):
return x[-1]
else:
return x[i]
def _PercentilesFromHistogram(ycsb_histogram, percentiles=_DEFAULT_PERCENTILES):
"""Calculate percentiles for from a YCSB histogram.
Args:
ycsb_histogram: List of (time_ms, frequency) tuples.
percentiles: iterable of floats, in the interval [0, 100].
Returns:
dict, mapping from percentile to value.
"""
result = collections.OrderedDict()
histogram = sorted(ycsb_histogram)
for percentile in percentiles:
if percentile < 0 or percentile > 100:
raise ValueError('Invalid percentile: {0}'.format(percentile))
if math.modf(percentile)[0] < 1e-7:
percentile = int(percentile)
label = 'p{0}'.format(percentile)
latencies, freqs = zip(*histogram)
time_ms = _WeightedQuantile(latencies, freqs, percentile * 0.01)
result[label] = time_ms
return result
def _CombineResults(result_list, combine_histograms=True):
"""Combine results from multiple YCSB clients.
Reduces a list of YCSB results (the output of ParseResults)
into a single result. Histogram bin counts, operation counts, and throughput
are summed; RunTime is replaced by the maximum runtime of any result.
Args:
result_list: List of ParseResults outputs.
combine_histograms: If true, histogram bins are summed across results. If
not, no histogram will be returned. Defaults to True.
Returns:
A dictionary, as returned by ParseResults.
"""
def DropUnaggregated(result):
"""Remove statistics which 'operators' specify should not be combined."""
drop_keys = {k for k, v in AGGREGATE_OPERATORS.iteritems() if v is None}
for group in result['groups'].itervalues():
for k in drop_keys:
group['statistics'].pop(k, None)
def CombineHistograms(hist1, hist2):
h1 = dict(hist1)
h2 = dict(hist2)
keys = sorted(frozenset(h1) | frozenset(h2))
result = []
for k in keys:
result.append((k, h1.get(k, 0) + h2.get(k, 0)))
return result
result = copy.deepcopy(result_list[0])
DropUnaggregated(result)
for indiv in result_list[1:]:
for group_name, group in indiv['groups'].iteritems():
if group_name not in result['groups']:
logging.warn('Found result group "%s" in individual YCSB result, '
'but not in accumulator.', group_name)
result['groups'][group_name] = copy.deepcopy(group)
continue
# Combine reported statistics.
# If no combining operator is defined, the statistic is skipped.
# Otherwise, the aggregated value is either:
# * The value in 'indiv', if the statistic is not present in 'result' or
# * AGGREGATE_OPERATORS[statistic](result_value, indiv_value)
for k, v in group['statistics'].iteritems():
if k not in AGGREGATE_OPERATORS:
logging.warn('No operator for "%s". Skipping aggregation.', k)
continue
elif AGGREGATE_OPERATORS[k] is None: # Drop
result['groups'][group_name]['statistics'].pop(k, None)
continue
elif k not in result['groups'][group_name]['statistics']:
logging.warn('Found statistic "%s.%s" in individual YCSB result, '
'but not in accumulator.', group_name, k)
result['groups'][group_name]['statistics'][k] = copy.deepcopy(v)
continue
op = AGGREGATE_OPERATORS[k]
result['groups'][group_name]['statistics'][k] = (
op(result['groups'][group_name]['statistics'][k], v))
if combine_histograms:
result['groups'][group_name]['histogram'] = CombineHistograms(
result['groups'][group_name]['histogram'],
group['histogram'])
else:
result['groups'][group_name].pop('histogram', None)
result['client'] = ' '.join((result['client'], indiv['client']))
result['command_line'] = ';'.join((result['command_line'],
indiv['command_line']))
if 'target' in result and 'target' in indiv:
result['target'] += indiv['target']
return result
def _ParseWorkload(contents):
"""Parse a YCSB workload file.
YCSB workloads are Java .properties format.
http://en.wikipedia.org/wiki/.properties
This function does not support all .properties syntax, in particular escaped
newlines.
Args:
contents: str. Contents of the file.
Returns:
dict mapping from property key to property value for each property found in
'contents'.
"""
fp = io.BytesIO(contents)
result = {}
for line in fp:
if (line.strip() and not line.lstrip().startswith('#') and
not line.lstrip().startswith('!')):
k, v = re.split(r'\s*[:=]\s*', line, maxsplit=1)
result[k] = v.strip()
return result
def _CreateSamples(ycsb_result, include_histogram=True, **kwargs):
"""Create PKB samples from a YCSB result.
Args:
ycsb_result: dict. Result of ParseResults.
include_histogram: bool. If True, include records for each histogram bin.
**kwargs: Base metadata for each sample.
Returns:
List of sample.Sample objects.
"""
stage = 'load' if ycsb_result['command_line'].endswith('-load') else 'run'
base_metadata = {'command_line': ycsb_result['command_line'],
'stage': stage}
base_metadata.update(kwargs)
for group_name, group in ycsb_result['groups'].iteritems():
meta = base_metadata.copy()
meta['operation'] = group_name
for statistic, value in group['statistics'].iteritems():
if value is None:
continue
unit = ''
m = re.match(r'^(.*) *\((us|ms|ops/sec)\)$', statistic)
if m:
statistic = m.group(1)
unit = m.group(2)
yield sample.Sample(' '.join([group_name, statistic]), value, unit, meta)
if group['histogram']:
percentiles = _PercentilesFromHistogram(group['histogram'])
for label, value in percentiles.iteritems():
yield sample.Sample(' '.join([group_name, label, 'latency']),
value, 'ms', meta)
if include_histogram:
for time_ms, count in group['histogram']:
yield sample.Sample(
'{0}_latency_histogram_{1}_ms'.format(group_name, time_ms),
count, 'count', meta)
class YCSBExecutor(object):
"""Load data and run benchmarks using YCSB.
See core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java for
attribute descriptions.
Attributes:
database: str.
parameters: dict. May contain the following, plus database-specific fields
(e.g., columnfamily for HBase).
threads: int.
target: int.
fieldcount: int.
fieldlengthdistribution: str.
readallfields: boolean.
writeallfields: boolean.
readproportion: float.
updateproportion: float.
scanproportion: float.
readmodifywriteproportion: float.
requestdistribution: str.
maxscanlength: int. Number of records to scan.
scanlengthdistribution: str.
insertorder: str.
hotspotdatafraction: float.
"""
FLAG_ATTRIBUTES = 'target', 'threads'
def __init__(self, database, parameter_files=None, **kwargs):
self.database = database
self.parameter_files = parameter_files or []
self.parameters = kwargs.copy()
def _BuildCommand(self, command_name, parameter_files=None, **kwargs):
command = [YCSB_EXE, command_name, self.database]
parameters = self.parameters.copy()
parameters.update(kwargs)
# These are passed as flags rather than properties, so they
# are handled differently.
for flag in self.FLAG_ATTRIBUTES:
value = parameters.pop(flag, None)
if value is not None:
command.extend(('-{0}'.format(flag), str(value)))
for param_file in list(self.parameter_files) + list(parameter_files or []):
command.extend(('-P', param_file))
for parameter, value in parameters.iteritems():
command.extend(('-p', '{0}={1}'.format(parameter, value)))
return ' '.join(command)
def _Load(self, vm, **kwargs):
"""Execute 'ycsb load' on 'vm'."""
kwargs.setdefault('threads', FLAGS.ycsb_preload_threads)
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
for pv in FLAGS.ycsb_load_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('load', **kwargs)
stdout, _ = vm.RobustRemoteCommand(command)
return ParseResults(str(stdout))
def _LoadThreaded(self, vms, workload_file, **kwargs):
"""Runs "Load" in parallel for each VM in VMs.
Args:
vms: List of virtual machine instances. client nodes.
workload_file: YCSB Workload file to use.
**kwargs: Additional key-value parameters to pass to YCSB.
Returns:
List of sample.Sample objects.
"""
results = []
remote_path = posixpath.join(vm_util.VM_TMP_DIR,
os.path.basename(workload_file))
kwargs.setdefault('threads', FLAGS.ycsb_preload_threads)
kwargs.setdefault('recordcount', FLAGS.ycsb_record_count)
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(stage='load',
clients=len(vms) * FLAGS.ycsb_preload_threads,
threads_per_client_vm=FLAGS.ycsb_preload_threads,
workload_name=os.path.basename(workload_file))
record_count = int(workload_meta.get('recordcount', '1000'))
n_per_client = long(record_count) // len(vms)
loader_counts = [n_per_client +
(1 if i < (record_count % len(vms)) else 0)
for i in xrange(len(vms))]
def PushWorkload(vm):
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, vms)
kwargs['parameter_files'] = [remote_path]
def _Load(loader_index):
start = sum(loader_counts[:loader_index])
kw = kwargs.copy()
kw.update(insertstart=start,
insertcount=loader_counts[loader_index])
results.append(self._Load(vms[loader_index], **kw))
logging.info('VM %d (%s) finished', loader_index, vms[loader_index])
vm_util.RunThreaded(_Load, range(len(vms)))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
samples = []
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
samples.extend(_CreateSamples(
result, result_type='individual', result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
combined = _CombineResults(results)
samples.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**workload_meta))
return samples
def _Run(self, vm, **kwargs):
"""Run a single workload from a client vm."""
for pv in FLAGS.ycsb_run_parameters:
param, value = pv.split('=', 1)
kwargs[param] = value
command = self._BuildCommand('run', **kwargs)
stdout, _ = vm.RobustRemoteCommand(command)
return ParseResults(str(stdout))
def _RunThreaded(self, vms, **kwargs):
"""Run a single workload using `vms`."""
target = kwargs.pop('target', None)
if target is not None:
target_per_client = target // len(vms)
targets = [target_per_client +
(1 if i < (target % len(vms)) else 0)
for i in xrange(len(vms))]
else:
targets = [target for _ in vms]
results = []
def _Run(loader_index):
vm = vms[loader_index]
kwargs['target'] = targets[loader_index]
results.append(self._Run(vm, **kwargs))
logging.info('VM %d (%s) finished', loader_index, vm)
vm_util.RunThreaded(_Run, range(len(vms)))
if len(results) != len(vms):
raise IOError('Missing results: only {0}/{1} reported\n{2}'.format(
len(results), len(vms), results))
return results
def RunStaircaseLoads(self, vms, workloads, **kwargs):
"""Run each workload in 'workloads' in succession.
A staircase load is applied for each workload file, for each entry in
ycsb_threads_per_client.
Args:
vms: List of VirtualMachine objects to generate load from.
**kwargs: Additional parameters to pass to each run. See constructor for
options.
Returns:
List of sample.Sample objects.
"""
all_results = []
for workload_index, workload_file in enumerate(workloads):
parameters = {'operationcount': FLAGS.ycsb_operation_count,
'recordcount': FLAGS.ycsb_record_count}
if FLAGS.ycsb_timelimit:
parameters['maxexecutiontime'] = FLAGS.ycsb_timelimit
parameters.update(kwargs)
remote_path = posixpath.join(vm_util.VM_TMP_DIR,
os.path.basename(workload_file))
with open(workload_file) as fp:
workload_meta = _ParseWorkload(fp.read())
workload_meta.update(kwargs)
workload_meta.update(workload_name=os.path.basename(workload_file),
workload_index=workload_index,
stage='run')
def PushWorkload(vm):
vm.PushFile(workload_file, remote_path)
vm_util.RunThreaded(PushWorkload, vms)
parameters['parameter_files'] = [remote_path]
for client_count in _GetThreadsPerLoaderList():
parameters['threads'] = client_count
results = self._RunThreaded(vms, **parameters)
client_meta = workload_meta.copy()
client_meta.update(clients=len(vms) * client_count,
threads_per_client_vm=client_count)
if FLAGS.ycsb_include_individual_results and len(results) > 1:
for i, result in enumerate(results):
all_results.extend(_CreateSamples(
result,
result_type='individual',
result_index=i,
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
combined = _CombineResults(results)
all_results.extend(_CreateSamples(
combined, result_type='combined',
include_histogram=FLAGS.ycsb_histogram,
**client_meta))
return all_results
def LoadAndRun(self, vms, workloads=None, load_kwargs=None, run_kwargs=None):
"""Load data using YCSB, then run each workload/client count combination.
Loads data using the workload defined by 'workloads', then
executes YCSB for each workload file in 'workloads', for each
client count defined in FLAGS.ycsb_threads_per_client.
Generally database benchmarks using YCSB should only need to call this
method.
Args:
vms: List of virtual machines. VMs to use to generate load.
workloads: List of strings. Workload files to use. If unspecified,
_GetWorkloadFileList() is used.
load_kwargs: dict. Additional arguments to pass to the load stage.
run_kwargs: dict. Additional arguments to pass to the run stage.
Returns:
List of sample.Sample objects.
"""
workloads = workloads or _GetWorkloadFileList()
assert workloads, 'no workloads'
load_samples = list(self._LoadThreaded(vms, workloads[0],
**(load_kwargs or {})))
run_samples = list(self.RunStaircaseLoads(vms, workloads,
**(run_kwargs or {})))
if FLAGS.ycsb_load_samples:
return load_samples + run_samples
else:
return run_samples
| askdaddy/PerfKitBenchmarker | perfkitbenchmarker/packages/ycsb.py | Python | apache-2.0 | 27,030 |
from nose.tools import eq_, ok_
from whoosh.index import open_dir, exists_in
from whoosh.query import Every
import unittest
import sys
import os
import shutil
import uuid
sys.path.append(os.path.dirname(__file__) + '/../')
from config import Config
from helper import separate_files
from index_manager import IndexManager
from search_manager import Search
test_pdf_name = '../electron/pdfjs/web/compressed.tracemonkey-pldi-09.pdf'
class TestSearch(unittest.TestCase):
def setup(self):
if exists_in(Config.database_dir):
raise IOError(Config.database_dir + ': data exists. Abort.')
def test_add_pdf(self):
im = IndexManager()
gid = str(uuid.uuid4())
im.add_pdf_file(test_pdf_name, gid=gid)
im.writer.commit()
im.ix.close()
def test_add_text(self):
test_text = 'abc def'
test_file = 'test_p1.txt'
test_file_path = os.path.join(Config.txt_dir, test_file)
test_pdf_file = 'test.pdf'
with open(os.path.join(Config.pdf_dir, test_pdf_file), 'w'):
pass
with open(test_file_path, 'w') as f:
f.write(test_text)
im = IndexManager()
gid = str(uuid.uuid4())
im.add_text_page_file(test_file_path, gid=gid)
im.writer.commit()
im.ix.close()
def test_confirm_index(self):
ix = open_dir(Config.database_dir)
query = Every()
with ix.searcher() as searcher:
results = searcher.search(query)
eq_(len(results), 2) # expect number of records
ix.close()
def test_search(self):
search = Search()
qstr = 'abc'
res = search.search(query_str=qstr, sort_field='title')
eq_(res['rows'][0]['title'], 'test')
def test_separate_files(self):
files = ['test.pdf', 'test_p1.txt', 'test_p2.txt']
groups = separate_files(files)
eq_(len(groups), 1)
eq_(groups[0]['pdf_file'], 'test.pdf')
eq_(len(groups[0]['text_files']), 2)
def test_delete_document(self):
im = IndexManager()
with im.ix.searcher() as searcher:
results = searcher.search(Every())
r = results[0]
gid = r['gid']
im.delete_document(gid)
def teardown(self):
print('Delete test data dir')
shutil.rmtree(Config.data_dir)
| mknz/mirusan | search/tests/test_search.py | Python | gpl-3.0 | 2,362 |
"""
This plugin gives the power of Selenium to nosetests
by providing a WebDriver object for the tests to use.
"""
from nose.plugins import Plugin
from pyvirtualdisplay import Display
from seleniumbase.core import proxy_helper
from seleniumbase.fixtures import constants
class SeleniumBrowser(Plugin):
"""
The plugin for Selenium tests. Takes in key arguments and then
creates a WebDriver object. All arguments are passed to the tests.
The following command line options are available to the tests:
self.options.browser -- the browser to use (--browser)
self.options.server -- the server used by the test (--server)
self.options.port -- the port used by the test (--port)
self.options.proxy -- designates the proxy server:port to use. (--proxy)
self.options.headless -- the option to run headlessly (--headless)
self.options.demo_mode -- the option to slow down Selenium (--demo_mode)
self.options.demo_sleep -- Selenium action delay in DemoMode (--demo_sleep)
self.options.highlights -- # of highlight animations shown (--highlights)
self.options.message_duration -- Messenger alert time (--message_duration)
self.options.js_checking_on -- option to check for js errors (--check_js)
self.options.ad_block -- the option to block some display ads (--ad_block)
self.options.verify_delay -- delay before MasterQA checks (--verify_delay)
self.options.timeout_multiplier -- increase defaults (--timeout_multiplier)
"""
name = 'selenium' # Usage: --with-selenium
def options(self, parser, env):
super(SeleniumBrowser, self).options(parser, env=env)
parser.add_option(
'--browser', action='store',
dest='browser',
choices=constants.ValidBrowsers.valid_browsers,
default=constants.Browser.GOOGLE_CHROME,
help="""Specifies the web browser to use. Default: Chrome.
If you want to use Firefox, explicitly indicate that.
Example: (--browser=firefox)""")
parser.add_option(
'--browser_version', action='store',
dest='browser_version',
default="latest",
help="""The browser version to use. Explicitly select
a version number or use "latest".""")
parser.add_option(
'--cap_file', action='store',
dest='cap_file',
default=None,
help="""The file that stores browser desired capabilities
for BrowserStack or Sauce Labs web drivers.""")
parser.add_option(
'--server', action='store', dest='servername',
default='localhost',
help="""Designates the Selenium Grid server to use.
Default: localhost.""")
parser.add_option(
'--port', action='store', dest='port',
default='4444',
help="""Designates the Selenium Grid port to use.
Default: 4444.""")
parser.add_option(
'--proxy', action='store',
dest='proxy_string',
default=None,
help="""Designates the proxy server:port to use.
Format: servername:port. OR
username:password@servername:port OR
A dict key from proxy_list.PROXY_LIST
Default: None.""")
parser.add_option(
'--headless', action="store_true",
dest='headless',
default=False,
help="""Using this makes Webdriver run headlessly,
which is required on headless machines.""")
parser.add_option(
'--demo_mode', action="store_true",
dest='demo_mode',
default=False,
help="""Using this slows down the automation so that
you can see what it's actually doing.""")
parser.add_option(
'--demo_sleep', action='store', dest='demo_sleep',
default=None,
help="""Setting this overrides the Demo Mode sleep
time that happens after browser actions.""")
parser.add_option(
'--highlights', action='store',
dest='highlights', default=None,
help="""Setting this overrides the default number of
highlight animation loops to have per call.""")
parser.add_option(
'--message_duration', action="store",
dest='message_duration', default=None,
help="""Setting this overrides the default time that
messenger notifications remain visible when reaching
assert statements during Demo Mode.""")
parser.add_option(
'--check_js', action="store_true",
dest='js_checking_on',
default=False,
help="""The option to check for JavaScript errors after
every page load.""")
parser.add_option(
'--ad_block', action="store_true",
dest='ad_block_on',
default=False,
help="""Using this makes WebDriver block display ads
that are defined in ad_block_list.AD_BLOCK_LIST.""")
parser.add_option(
'--verify_delay', action='store',
dest='verify_delay', default=None,
help="""Setting this overrides the default wait time
before each MasterQA verification pop-up.""")
parser.add_option(
'--timeout_multiplier', action='store',
dest='timeout_multiplier',
default=None,
help="""Setting this overrides the default timeout
by the multiplier when waiting for page elements.
Unused when tests overide the default value.""")
def configure(self, options, conf):
super(SeleniumBrowser, self).configure(options, conf)
self.enabled = True # Used if test class inherits BaseCase
self.options = options
self.headless_active = False # Default setting
proxy_helper.remove_proxy_zip_if_present()
def beforeTest(self, test):
test.test.browser = self.options.browser
test.test.cap_file = self.options.cap_file
test.test.headless = self.options.headless
test.test.servername = self.options.servername
test.test.port = self.options.port
test.test.proxy_string = self.options.proxy_string
test.test.demo_mode = self.options.demo_mode
test.test.demo_sleep = self.options.demo_sleep
test.test.highlights = self.options.highlights
test.test.message_duration = self.options.message_duration
test.test.js_checking_on = self.options.js_checking_on
test.test.ad_block_on = self.options.ad_block_on
test.test.verify_delay = self.options.verify_delay # MasterQA
test.test.timeout_multiplier = self.options.timeout_multiplier
test.test.use_grid = False
if test.test.servername != "localhost":
# Use Selenium Grid (Use --server=127.0.0.1 for localhost Grid)
test.test.use_grid = True
if self.options.headless:
self.display = Display(visible=0, size=(1920, 1200))
self.display.start()
self.headless_active = True
# The driver will be received later
self.driver = None
test.test.driver = self.driver
def finalize(self, result):
""" This runs after all tests have completed with nosetests. """
proxy_helper.remove_proxy_zip_if_present()
def afterTest(self, test):
try:
# If the browser window is still open, close it now.
self.driver.quit()
except AttributeError:
pass
except Exception:
pass
if self.options.headless:
if self.headless_active:
self.display.stop()
| mdmintz/seleniumspot | seleniumbase/plugins/selenium_plugin.py | Python | mit | 7,978 |
# Step 04 - generate Java literals.
#
# Java byte-code has severe restrictions. There is no such thing as
# "array literal" - those are implemented as series of data[x] = y;
# as a consequence N-byte array will use 7N bytes in class, plus N bytes
# in instantiated variable. Also no literal could be longer than 64KiB.
#
# To keep dictionary data compact both in source code and in compiled format
# we use the following tricks:
# * use String as a data container
# * store only lowest 7 bits; i.e. all characters fit ASCII table; this allows
# efficient conversion to byte array; also ASCII characters use only 1 byte
#. of memory (UTF-8 encoding)
# * RLE-compress sequence of 8-th bits
#
# This script generates literals used in Java code.
try:
unichr # Python 2
except NameError:
unichr = chr # Python 3
bin_path = "dictionary.bin"
with open(bin_path, "rb") as raw:
data = raw.read()
low = []
hi = []
is_skip = True
skip_flip_offset = 36
cntr = skip_flip_offset
for b in data:
value = ord(b)
low.append(chr(value & 0x7F))
if is_skip:
if value < 0x80:
cntr += 1
else:
is_skip = False
hi.append(unichr(cntr))
cntr = skip_flip_offset + 1
else:
if value >= 0x80:
cntr += 1
else:
is_skip = True
hi.append(unichr(cntr))
cntr = skip_flip_offset + 1
hi.append(unichr(cntr))
low0 = low[0:len(low) // 2]
low1 = low[len(low) // 2:len(low)]
def escape(chars):
result = []
for c in chars:
if "\r" == c:
result.append("\\r")
elif "\n" == c:
result.append("\\n")
elif "\t" == c:
result.append("\\t")
elif "\"" == c:
result.append("\\\"")
elif "\\" == c:
result.append("\\\\")
elif ord(c) < 32 or ord(c) >= 127:
result.append("\\u%04X" % ord(c))
else:
result.append(c)
return result
source_code = [
" private static final String DATA0 = \"", "".join(escape(low0)), "\";\n",
" private static final String DATA1 = \"", "".join(escape(low1)), "\";\n",
" private static final String SKIP_FLIP = \"", "".join(escape(hi)), "\";\n"
]
src_path = "DictionaryData.inc.java"
with open(src_path, "w") as source:
source.write("".join(source_code))
| google/brotli | scripts/dictionary/step-04-generate-java-literals.py | Python | mit | 2,214 |
class CreateMapping(object):
def __init__(self):
self.request_id = ''
self.mapping = '' | M4gn4tor/mastercard-api-python | Services/moneysend/domain/card_mapping/createmapping.py | Python | bsd-3-clause | 108 |
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""""Test the pre_configure entry point."""
from random import random
import pytest
from cylc.flow.exceptions import PluginError
from cylc.flow.parsec.exceptions import ParsecError
from cylc.flow.parsec.fileparse import process_plugins
class EntryPointWrapper:
"""Wraps a method to make it look like an entry point."""
def __init__(self, fcn):
self.name = fcn.__name__
self.fcn = fcn
def resolve(self):
return self.fcn
@EntryPointWrapper
def pre_configure_basic(*_, **__):
"""Simple plugin that returns one env var and one template var."""
return {
'env': {
'ANSWER': '42'
},
'template_variables': {
'QUESTION': 'What do you get if you multiply 7 by 6?'
}
}
@EntryPointWrapper
def pre_configure_templating_detected(*_, **__):
"""Plugin that detects a random templating engine."""
return {
'templating_detected': str(random())
}
@EntryPointWrapper
def pre_configure_error(*_, **__):
"""Plugin that raises an exception."""
raise Exception('foo')
def test_pre_configure(monkeypatch):
"""It should call the plugin."""
monkeypatch.setattr(
'cylc.flow.parsec.fileparse.iter_entry_points',
lambda x: [pre_configure_basic]
)
extra_vars = process_plugins(None)
assert extra_vars == {
'env': {
'ANSWER': '42'
},
'template_variables': {
'QUESTION': 'What do you get if you multiply 7 by 6?'
},
'templating_detected': None
}
def test_pre_configure_duplicate(monkeypatch):
"""It should error when plugins clash."""
monkeypatch.setattr(
'cylc.flow.parsec.fileparse.iter_entry_points',
lambda x: [
pre_configure_basic,
pre_configure_basic
]
)
with pytest.raises(ParsecError):
process_plugins(None)
def test_pre_configure_templating_detected(monkeypatch):
"""It should error when plugins clash (for templating)."""
monkeypatch.setattr(
'cylc.flow.parsec.fileparse.iter_entry_points',
lambda x: [
pre_configure_templating_detected,
pre_configure_templating_detected
]
)
with pytest.raises(ParsecError):
process_plugins(None)
def test_pre_configure_exception(monkeypatch):
"""It should wrap plugin errors."""
monkeypatch.setattr(
'cylc.flow.parsec.fileparse.iter_entry_points',
lambda x: [pre_configure_error]
)
with pytest.raises(PluginError) as exc_ctx:
process_plugins(None)
# the context of the original error should be preserved in the raised
# exception
assert exc_ctx.value.entry_point == 'cylc.pre_configure'
assert exc_ctx.value.plugin_name == 'pre_configure_error'
assert str(exc_ctx.value.exc) == 'foo'
| cylc/cylc | tests/unit/plugins/test_pre_configure.py | Python | gpl-3.0 | 3,607 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from badge_app.badge_model import Badge
from routes.badges.home import index, delete
from gaebusiness.business import CommandExecutionException
from gaegraph.model import Node
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
mommy.save_one(Badge)
template_response = index()
self.assert_can_render(template_response)
class DeleteTests(GAETestCase):
def test_success(self):
badge = mommy.save_one(Badge)
redirect_response = delete(badge.key.id())
self.assertIsInstance(redirect_response, RedirectResponse)
self.assertIsNone(badge.key.get())
def test_non_badge_deletion(self):
non_badge = mommy.save_one(Node)
self.assertRaises(CommandExecutionException, delete, non_badge.key.id())
self.assertIsNotNone(non_badge.key.get())
| raphaelrpl/portal | backend/test/badge_tests/badge_home_tests.py | Python | mit | 1,024 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# A singleton map from platform backends to maps of uniquely-identifying
# remote port (which may be the same as local port) to DevToolsClientBackend.
# There is no guarantee that the devtools agent is still alive.
_platform_backends_to_devtools_clients_maps = {}
def _RemoveStaleDevToolsClient(platform_backend):
"""Removes DevTools clients that are no longer connectable."""
devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
platform_backend, {})
devtools_clients_map = {
port: client
for port, client in devtools_clients_map.iteritems()
if client.IsAlive()
}
_platform_backends_to_devtools_clients_maps[platform_backend] = (
devtools_clients_map)
def RegisterDevToolsClient(devtools_client_backend, platform_backend):
"""Register DevTools client
This should only be called from DevToolsClientBackend when it is initialized.
"""
remote_port = str(devtools_client_backend.remote_port)
if platform_backend not in _platform_backends_to_devtools_clients_maps:
_platform_backends_to_devtools_clients_maps[platform_backend] = {}
devtools_clients_map = (
_platform_backends_to_devtools_clients_maps[platform_backend])
devtools_clients_map[remote_port] = devtools_client_backend
def IsSupported(platform_backend):
_RemoveStaleDevToolsClient(platform_backend)
devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
platform_backend, {})
for _, devtools_client in devtools_clients_map.iteritems():
if devtools_client.IsChromeTracingSupported():
return True
return False
def GetDevToolsClients(platform_backend):
"""Get DevTools clients including the ones that are no longer connectable."""
devtools_clients_map = _platform_backends_to_devtools_clients_maps.get(
platform_backend, {})
if not devtools_clients_map:
return []
return devtools_clients_map.values()
def GetActiveDevToolsClients(platform_backend):
"""Get DevTools clients that are still connectable."""
_RemoveStaleDevToolsClient(platform_backend)
return GetDevToolsClients(platform_backend)
| catapult-project/catapult-csm | telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_devtools_manager.py | Python | bsd-3-clause | 2,272 |
# -*- coding: utf-8 -*-
#
# codimension - graphics python two-way code editor and analyzer
# Copyright (C) 2010-2017 Sergey Satskiy <sergey.satskiy@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""A few constants which do not depend on other project files"""
# Default encoding for the cases when:
# - the encoding could not be detected
# - replaces ascii to be on the safe side
DEFAULT_ENCODING = 'utf-8'
# File encoding used for various settings and project files
SETTINGS_ENCODING = 'utf-8'
# Directory to store Codimension settings and projects
CONFIG_DIR = '.codimension3'
| SergeySatskiy/codimension | codimension/utils/config.py | Python | gpl-3.0 | 1,187 |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import ibis
@pytest.fixture
def pipe_table():
return ibis.table(
[
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value', 'double'),
],
'foo_table',
)
def test_pipe_positional_args(pipe_table):
def my_func(data, foo, bar):
return data[bar] + foo
result = pipe_table.pipe(my_func, 4, 'value')
expected = pipe_table['value'] + 4
assert result.equals(expected)
def test_pipe_keyword_args(pipe_table):
def my_func(data, foo=None, bar=None):
return data[bar] + foo
result = pipe_table.pipe(my_func, foo=4, bar='value')
expected = pipe_table['value'] + 4
assert result.equals(expected)
def test_pipe_pass_to_keyword(pipe_table):
def my_func(x, y, data=None):
return data[x] + y
result = pipe_table.pipe((my_func, 'data'), 'value', 4)
expected = pipe_table['value'] + 4
assert result.equals(expected)
| cpcloud/ibis | ibis/tests/expr/test_pipe.py | Python | apache-2.0 | 1,564 |
"""This file is intentionally left blank."""
| voicecom/pgtool | tests/__init__.py | Python | apache-2.0 | 45 |
"""
Blackboard Analysis Tools
Copyright 2013, Jeroen Doggen, jeroendoggen@gmail.com
"""
import sys
from blackboard_analysis_tools.blackboard import BlackboardAnalysisTools
def run():
"""Run the main program"""
assignment_analyser = BlackboardAnalysisTools()
#assignment_analyser.init()
assignment_analyser.run()
return(assignment_analyser.exit_value())
if __name__ == "__main__":
sys.exit(run())
| jeroendoggen/blackboard-analysis-tools | blackboard_analysis_tools/main.py | Python | mit | 434 |
'''
Author: leovt (Leonhard Vogt)
License: GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007
Example code for using glsl and vertex buffer objects with pyglet
'''
import sys
import warnings
import pyglet
from pyglet import gl
import ctypes
render_program = None
copy_program = None
framebuffer = None
window = None
FB_WIDTH = 30
FB_HEIGHT = 20
TYPE_NAME_TO_TYPE = {
gl.GL_FLOAT: gl.GLfloat,
gl.GL_DOUBLE: gl.GLdouble,
gl.GL_INT: gl.GLint,
gl.GL_UNSIGNED_INT: gl.GLuint,
gl.GL_BYTE: gl.GLbyte,
gl.GL_UNSIGNED_BYTE: gl.GLubyte,
gl.GL_SHORT: gl.GLshort,
gl.GL_UNSIGNED_SHORT: gl.GLushort,
}
def compile_shader(shader_type, shader_source):
'''
Compile a shader and print error messages.
'''
shader_name = gl.glCreateShader(shader_type)
src_buffer = ctypes.create_string_buffer(shader_source)
buf_pointer = ctypes.cast(ctypes.pointer(ctypes.pointer(src_buffer)), ctypes.POINTER(ctypes.POINTER(ctypes.c_char)))
length = ctypes.c_int(len(shader_source) + 1)
gl.glShaderSource(shader_name, 1, buf_pointer, ctypes.byref(length))
gl.glCompileShader(shader_name)
# test if compilation is succesful and print status messages
success = gl.GLint(0)
gl.glGetShaderiv(shader_name, gl.GL_COMPILE_STATUS, ctypes.byref(success))
length = gl.GLint(0)
gl.glGetShaderiv(shader_name, gl.GL_INFO_LOG_LENGTH, ctypes.byref(length))
log_buffer = ctypes.create_string_buffer(length.value)
gl.glGetShaderInfoLog(shader_name, length, None, log_buffer)
log_message = log_buffer.value[:length.value].decode('ascii').strip()
if log_message:
sys.stderr.write(log_message + '\n')
if not success:
raise ValueError('Compiling of the shader failed.')
return shader_name
def link_program(program_name):
'''
link a glsl program and print error messages.
'''
gl.glLinkProgram(program_name)
success = gl.GLint(0)
gl.glGetProgramiv(program_name, gl.GL_LINK_STATUS, ctypes.byref(success))
length = gl.GLint(0)
gl.glGetProgramiv(program_name, gl.GL_INFO_LOG_LENGTH, ctypes.byref(length))
log_buffer = ctypes.create_string_buffer(length.value)
gl.glGetProgramInfoLog(program_name, length, None, log_buffer)
log_message = log_buffer.value[:length.value].decode('ascii').strip()
if log_message:
sys.stderr.write(log_message + '\n')
if not success:
raise ValueError('Linking of the shader program failed.')
class ShaderProgram:
def __init__(self, vertex_shader, fragment_shader, attributes):
# compile and link
self.program_name = gl.glCreateProgram()
gl.glAttachShader(self.program_name, compile_shader(gl.GL_VERTEX_SHADER, vertex_shader))
gl.glAttachShader(self.program_name, compile_shader(gl.GL_FRAGMENT_SHADER, fragment_shader))
link_program(self.program_name)
# vertex type
class VERTEX(ctypes.Structure):
_fields_ = [ (name, TYPE_NAME_TO_TYPE[tname] * size)
for (name, tname, size) in attributes ]
self.VERTEX = VERTEX
# vertex array and buffer
self.vertex_array_name = gl.GLuint(0)
self.vertex_buffer_name = gl.GLuint(0)
gl.glGenVertexArrays(1, ctypes.byref(self.vertex_array_name))
gl.glGenBuffers(1, ctypes.byref(self.vertex_buffer_name))
gl.glBindVertexArray(self.vertex_array_name)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_buffer_name)
for (name, tname, size) in attributes:
location = gl.glGetAttribLocation(self.program_name,
ctypes.create_string_buffer(name.encode('ascii')))
if location < 0:
warnings.warn('Attribute %r is not present.' % name, stacklevel=2)
continue
gl.glEnableVertexAttribArray(location)
gl.glVertexAttribPointer(location, size, tname, False,
ctypes.sizeof(VERTEX),
ctypes.c_void_p(getattr(VERTEX, name).offset))
gl.glBindVertexArray(0)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
def __enter__(self):
gl.glUseProgram(self.program_name)
gl.glBindVertexArray(self.vertex_array_name)
def __exit__(self, *unused):
gl.glUseProgram(0)
gl.glBindVertexArray(0)
def send_data(self, data):
data = (self.VERTEX * len(data))(*data)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_buffer_name)
gl.glBufferData(gl.GL_ARRAY_BUFFER, ctypes.sizeof(data), data, gl.GL_DYNAMIC_DRAW)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
def setup_render_program():
'''
Create the glsl program for rendering the colored triangle
'''
vertex_shader = b'''
attribute vec2 position;
attribute vec4 color;
varying vec4 var_color;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
var_color = color;
}
'''
fragment_shader = b'''
varying vec4 var_color;
void main()
{
gl_FragColor = var_color;
}
'''
return ShaderProgram(vertex_shader, fragment_shader, [
('position', gl.GL_FLOAT, 2),
('color', gl.GL_FLOAT, 4),
])
def setup_copy_program():
'''
Create the glsl copy_program for copying the rendered texture
'''
vertex_shader = b'''
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 var_texcoord;
void main()
{
gl_Position = vec4(position, 0.0, 1.0);
var_texcoord = texcoord;
}
'''
fragment_shader = b'''
uniform sampler2D texture;
varying vec2 var_texcoord;
void main()
{
gl_FragColor = texture2D(texture, var_texcoord);
}
'''
return ShaderProgram(vertex_shader, fragment_shader, [
('position', gl.GL_FLOAT, 2),
('texcoord', gl.GL_FLOAT, 2),
])
def draw():
render_to_texture()
copy_texture_to_screen()
def render_to_texture():
# select the target to draw into
with framebuffer:
gl.glViewport(0, 0, FB_WIDTH, FB_HEIGHT)
# clear the destination
gl.glClearColor(0.5, 0.6, 0.7, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# send the vertex data
render_program.send_data([
((-0.6, -0.5), (1.0, 0.0, 0.0, 1.0)),
((0.6, -0.5), (0.0, 1.0, 0.0, 1.0)),
((0.0, 0.5), (0.0, 0.0, 1.0, 1.0))])
# draw using the vertex array for vertex information
with render_program:
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3)
def copy_texture_to_screen():
# clear the destination
gl.glClearColor(0.4, 0.4, 0.4, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
# send the vertex data
copy_program.send_data([
((-0.9, -0.9), (0.0, 0.0)),
((0.5, -0.9), (1.0, 0.0)),
((0.5, 0.5), (1.0, 1.0)),
((-0.9, 0.5), (0.0, 1.0)),
((0.6, 0.6), (0.0, 1.0)),
((1.0, 0.6), (1.0, 1.0)),
((1.0, 1.0), (1.0, 0.0)),
((0.6, 1.0), (0.0, 0.0))])
# draw
with copy_program, framebuffer.rendered_texture:
gl.glDrawArrays(gl.GL_QUADS, 0, 8)
class Texture:
def __init__(self):
self.name = gl.GLuint(0)
gl.glGenTextures(1, ctypes.byref(self.name))
def __enter__(self):
gl.glBindTexture(gl.GL_TEXTURE_2D, self.name)
def __exit__(self, *unused):
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
class Framebuffer:
def __init__(self):
self.framebuffer = gl.GLuint(0)
self.rendered_texture = Texture()
gl.glGenFramebuffers(1, ctypes.byref(self.framebuffer))
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebuffer)
# Set up the texture as the target for color output
with self.rendered_texture:
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGB, FB_WIDTH, FB_HEIGHT, 0, gl.GL_RGB, gl.GL_UNSIGNED_BYTE, 0)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.rendered_texture.name, 0)
if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE:
raise ValueError('Framebuffer not set up completely')
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
def __enter__(self):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.framebuffer)
draw_buffers = (gl.GLenum * 1)(gl.GL_COLOR_ATTACHMENT0)
gl.glDrawBuffers(1, draw_buffers)
gl.glViewport(0, 0, FB_WIDTH, FB_HEIGHT)
def __exit__(self, *unused):
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glViewport(0, 0, window.width, window.height)
def main():
global window
window = pyglet.window.Window()
global framebuffer
framebuffer = Framebuffer()
global render_program
render_program = setup_render_program()
global copy_program
copy_program = setup_copy_program()
print('OpenGL Version {}'.format(window.context.get_info().get_version()))
window.on_draw = draw
pyglet.clock.schedule_interval(lambda dt: None, 0.01)
pyglet.app.run()
if __name__ == '__main__':
main()
| leovt/leovt | glhelper.py | Python | gpl-3.0 | 9,545 |
#!/usr/bin/env python
'''
This file is part of the ASNN eMail Suite.
ASNN-MDA is free software: you can redistribute it and/or modify
it under the terms of the version 2 GNU General Public License
as published by the Free Software Foundation.
ASNN-MDA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
If you don't know about the GNU General Public License by now,
you can find information about it at http://www.gnu.org/licenses/
'''
"""
Library file for the ASNN email suite, directly runable for testing
This function will scan an email message contained in a file and return a
string which will be a SMTP result code. If the string starts with '5', then
it is a rejection message that should be presented to the client. If it starts
with a '4', then it's a temporary failure that should be offered to the client.
Built with 'dkimpy' from https://launchpad.net/dkimpy
"""
import re, email, sys, base64
# ------------------------------------------------------------------
# for CLI calls, 'dkimrule' of '1' checks, '2' forces
def scan(fileobj, dkimrule = 0, returnpath = None):
try:
import asnn_config
# Declaring the 'import' where the CLI is starting is not getting it
# into the namespace properly and it's faulting in this function.
fileobj.seek(0) # to the top of the file
domains = [] # 'Return-Path' and 'From' domains
# create a parsing object that reads the email from the file object
msgobj = email.message_from_file(fileobj)
if returnpath == '<>': # the message is a bounce, maybe force DKIM to make
# sure that it's not a spam masquarading as a bounce
if asnn_config.DKIM_FORCE_ON_BOUNCE:
############# THIS FUNCTIONALITY HAS NOT BEEN TESTED ##################
dkimrule = 2
elif returnpath:
domain = returnpath.split('@')[1]
if debug > 5:
print >> sys.stderr, logid() + "DKIM: 'Return-Path' domain: " + domain
dobj = asnn_whois_domain.getbasedomain(domain, retobject=True, retupper=True)
# (By 'returning upper', adding a subdomain to a domain that should be
# checked will get checked. Domains that don't exist in the DB will
# end up returning a TLD, but no TLD should have a DKIM rule.)
if dobj: # don't look unless we found a result
for line in dobj.whoisdecode.splitlines():
if line == 'rpskipdkim':
if debug > 4:
print >> sys.stderr, logid() + \
"DKIM: check bypassed per return-path domain: " + dobj.domain
return '200 DKIM check bypassed'
if line == 'forcedkim':
dkimrule = 2
if debug > 4:
print >> sys.stderr, logid() + "DKIM: forced check per RP domain: " + dobj.domain
break
if line == 'checkdkim' and dkimrule < 2:
dkimrule = 1
if debug > 4:
print >> sys.stderr, logid() + "DKIM: check per RP domain: " + dobj.domain
break
if ('from' in msgobj) and ('@' in msgobj['From']):
if '<' in msgobj['From']:
domain = msgobj['From'].split('<')[1].split('@')[1].split('>')[0]
else:
domain = msgobj['From'].split('@')[1]
dobj = asnn_whois_domain.getbasedomain(domain, retobject=True, retupper=True)
if debug > 5:
print >> sys.stderr, logid() + "DKIM: 'From' domain: " + domain
if dobj: # don't look unless we found a result
for line in dobj.whoisdecode.splitlines():
if line == 'forcedkim':
dkimrule = 2
if debug > 4:
print >> sys.stderr, logid() + "DKIM: forced check per PRA domain: " + dobj.domain
break
if line == 'checkdkim' and dkimrule < 2:
dkimrule = 1
if debug > 4:
print >> sys.stderr, logid() + "DKIM: check per PRA domain: " + dobj.domain
break
else:
if debug > 3:
print >> sys.stderr, logid() + "DKIM: bad or missing 'From'"
if not dkimrule:
if debug > 4:
print >> sys.stderr, logid() + "DKIM: no DKIM check required"
return '200 no DKIM check required'
import dkim
if 'DKIM-Signature' not in msgobj:
if debug > 4:
print >> sys.stderr, logid() + "DKIM: no DKIM header found"
if dkimrule == 2:
return asnn_config.REJECT_DKIM
return '200 no DKIM header found' # i.e. dkimrule == 1
fileobj.seek(0) # to the top of the file
dkimresult = dkim.verify(fileobj.read())
if not dkimresult:
if debug > 4:
print >> sys.stderr, logid() + "DKIM: reject per result"
return asnn_config.REJECT_DKIM
if debug > 4:
print >> sys.stderr, logid() + "DKIM: result okay"
return '200 DKIM check okay'
except Exception as msg:
if debug > 0:
print >> sys.stderr, logid() + "DKIM: exception during check: " + str(msg)
if debug > 4:
import traceback
print >> sys.stderr, traceback.print_exc()
return asnn_config.SOFTFAIL_DKIM
# ------------------------------------------------------
# catch if this was run directly or imported as a module
# (This is primarily for testing purposes)
if __name__ == "__main__" and __package__ is None:
# command line option parsing
import argparse, os, time
parser = argparse.ArgumentParser()
parser.add_argument("filename", default = None, \
help="file with the message to parse")
parser.add_argument("-v", dest='debug', \
action="count", default=0, \
help="debug vebosity")
parser.add_argument("-l", dest='lookup', \
action="store_true", help="lookup rules for domains in DB")
args = parser.parse_args()
def logid():
return ''
scan.logid = logid
debug = args.debug
if debug > 0:
print >> sys.stderr, "Debug level set to", debug
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asnn.settings")
from asnn import models
import django
django.setup()
import asnn_whois_domain
asnn_whois_domain.models = models # explicitly put stuff in its namespace
asnn_whois_domain.debug = debug
asnn_whois_domain.logid = logid
asnn_whois_domain.main.logid = logid # not getting inherited
asnn_whois_domain.main.debug = debug
asnn_whois_domain.sys = sys
asnn_whois_domain.time = time
asnn_whois_domain.peeripaddr = None
asnn_whois_domain.checkflags = []
fileobj = open(args.filename, 'r')
if args.lookup:
print scan(fileobj)
else:
print scan(fileobj, 2)
| marvinglenn/asnn-mda | asnn_lib_dkim.py | Python | gpl-2.0 | 6,823 |
#!/usr/bin/env python
#
# Copyright 2008 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Modification note for cylc: original version is 0.6, 27fbeb8 from
# https://github.com/jrfonseca/xdot.py
#
'''Visualize dot graphs via the xdot format.'''
__author__ = "Jose Fonseca et al"
import os
import sys
import subprocess
import math
import colorsys
import time
import re
import optparse
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
import cairo
import pango
import pangocairo
# See http://www.graphviz.org/pub/scm/graphviz-cairo/plugin/cairo/gvrender_cairo.c
# For pygtk inspiration and guidance see:
# - http://mirageiv.berlios.de/
# - http://comix.sourceforge.net/
class Pen:
"""Store pen attributes."""
def __init__(self):
# set default attributes
self.color = (0.0, 0.0, 0.0, 1.0)
self.fillcolor = (0.0, 0.0, 0.0, 1.0)
self.linewidth = 1.0
self.fontsize = 14.0
self.fontname = "Times-Roman"
self.dash = ()
def copy(self):
"""Create a copy of this pen."""
pen = Pen()
pen.__dict__ = self.__dict__.copy()
return pen
def highlighted(self):
pen = self.copy()
pen.color = (1, 0, 0, 1)
pen.fillcolor = (1, .8, .8, 1)
return pen
class Shape:
"""Abstract base class for all the drawing shapes."""
def __init__(self):
pass
def draw(self, cr, highlight=False):
"""Draw this shape with the given cairo context"""
raise NotImplementedError
def select_pen(self, highlight):
if highlight:
if not hasattr(self, 'highlight_pen'):
self.highlight_pen = self.pen.highlighted()
return self.highlight_pen
else:
return self.pen
def search_text(self, regexp):
return False
class TextShape(Shape):
LEFT, CENTER, RIGHT = -1, 0, 1
def __init__(self, pen, x, y, j, w, t):
Shape.__init__(self)
self.pen = pen.copy()
self.x = x
self.y = y
self.j = j
self.w = w
self.t = t
def draw(self, cr, highlight=False):
try:
layout = self.layout
except AttributeError:
layout = cr.create_layout()
# set font options
# see http://lists.freedesktop.org/archives/cairo/2007-February/009688.html
context = layout.get_context()
fo = cairo.FontOptions()
try:
fo.set_antialias(cairo.ANTIALIAS_DEFAULT)
fo.set_hint_style(cairo.HINT_STYLE_NONE)
fo.set_hint_metrics(cairo.HINT_METRICS_OFF)
except AttributeError:
# HJO: handle crappy cairo installation on SLES11, e.g:
# AttributeError: 'cairo.FontOptions' object has no attribute 'set_antialias'
pass
try:
pangocairo.context_set_font_options(context, fo)
except TypeError:
# XXX: Some broken pangocairo bindings show the error
# 'TypeError: font_options must be a cairo.FontOptions or None'
pass
except AttributeError:
# HJO: handle crappy cairo installation on SLES11
pass
# set font
font = pango.FontDescription()
font.set_family(self.pen.fontname)
try:
font.set_absolute_size(self.pen.fontsize*pango.SCALE)
except Exception:
# HJO: handle crappy cairo installation on SLES11
pass
layout.set_font_description(font)
# set text
layout.set_text(self.t)
# cache it
self.layout = layout
else:
cr.update_layout(layout)
descent = 2 # XXX get descender from font metrics
width, height = layout.get_size()
width = float(width)/pango.SCALE
height = float(height)/pango.SCALE
# we know the width that dot thinks this text should have
# we do not necessarily have a font with the same metrics
# scale it so that the text fits inside its box
if width > self.w:
f = self.w / width
width = self.w # equivalent to width *= f
height *= f
descent *= f
else:
f = 1.0
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5*width
elif self.j == self.RIGHT:
x = self.x - width
else:
assert 0
y = self.y - height + descent
cr.move_to(x, y)
cr.save()
cr.scale(f, f)
cr.set_source_rgba(*self.select_pen(highlight).color)
cr.show_layout(layout)
cr.restore()
if 0: # DEBUG
# show where dot thinks the text should appear
cr.set_source_rgba(1, 0, 0, .9)
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5*self.w
elif self.j == self.RIGHT:
x = self.x - self.w
cr.move_to(x, self.y)
cr.line_to(x+self.w, self.y)
cr.stroke()
def search_text(self, regexp):
return regexp.search(self.t) is not None
class ImageShape(Shape):
def __init__(self, pen, x0, y0, w, h, path):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.path = path
def draw(self, cr, highlight=False):
cr2 = gtk.gdk.CairoContext(cr)
pixbuf = gtk.gdk.pixbuf_new_from_file(self.path)
sx = float(self.w)/float(pixbuf.get_width())
sy = float(self.h)/float(pixbuf.get_height())
cr.save()
cr.translate(self.x0, self.y0 - self.h)
cr.scale(sx, sy)
cr2.set_source_pixbuf(pixbuf, 0, 0)
cr2.paint()
cr.restore()
class EllipseShape(Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(1.0, 0.0)
cr.arc(0.0, 0.0, 1.0, 0, 2.0*math.pi)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class PolygonShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class LineShape(Shape):
def __init__(self, pen, points):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for x1, y1 in self.points[1:]:
cr.line_to(x1, y1)
pen = self.select_pen(highlight)
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class BezierShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for i in xrange(1, len(self.points), 3):
x1, y1 = self.points[i]
x2, y2 = self.points[i + 1]
x3, y3 = self.points[i + 2]
cr.curve_to(x1, y1, x2, y2, x3, y3)
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class CompoundShape(Shape):
def __init__(self, shapes):
Shape.__init__(self)
self.shapes = shapes
def draw(self, cr, highlight=False):
for shape in self.shapes:
shape.draw(cr, highlight=highlight)
def search_text(self, regexp):
for shape in self.shapes:
if shape.search_text(regexp):
return True
return False
class Url(object):
def __init__(self, item, url, highlight=None):
self.item = item
self.url = url
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Jump(object):
def __init__(self, item, x, y, highlight=None):
self.item = item
self.x = x
self.y = y
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Element(CompoundShape):
"""Base class for graph nodes and edges."""
def __init__(self, shapes):
CompoundShape.__init__(self, shapes)
def is_inside(self, x, y):
return False
def get_url(self, x, y):
return None
def get_jump(self, x, y):
return None
class Node(Element):
def __init__(self, id, x, y, w, h, shapes, url):
Element.__init__(self, shapes)
self.id = id
self.x = x
self.y = y
self.x1 = x - 0.5*w
self.y1 = y - 0.5*h
self.x2 = x + 0.5*w
self.y2 = y + 0.5*h
self.url = url
def is_inside(self, x, y):
return self.x1 <= x and x <= self.x2 and self.y1 <= y and y <= self.y2
def get_url(self, x, y):
if self.url is None:
return None
if self.is_inside(x, y):
return Url(self, self.url)
return None
def get_jump(self, x, y):
if self.is_inside(x, y):
return Jump(self, self.x, self.y)
return None
def __repr__(self):
return "<Node %s>" % self.id
def square_distance(x1, y1, x2, y2):
deltax = x2 - x1
deltay = y2 - y1
return deltax*deltax + deltay*deltay
class Edge(Element):
def __init__(self, src, dst, points, shapes):
Element.__init__(self, shapes)
self.src = src
self.dst = dst
self.points = points
RADIUS = 10
def is_inside_begin(self, x, y):
return square_distance(x, y, *self.points[0]) <= self.RADIUS*self.RADIUS
def is_inside_end(self, x, y):
return square_distance(x, y, *self.points[-1]) <= self.RADIUS*self.RADIUS
def is_inside(self, x, y):
if self.is_inside_begin(x, y):
return True
if self.is_inside_end(x, y):
return True
return False
def get_jump(self, x, y):
if self.is_inside_begin(x, y):
return Jump(self, self.dst.x, self.dst.y, highlight=set([self, self.dst]))
if self.is_inside_end(x, y):
return Jump(self, self.src.x, self.src.y, highlight=set([self, self.src]))
return None
def __repr__(self):
return "<Edge %s -> %s>" % (self.src, self.dst)
class Graph(Shape):
def __init__(self, width=1, height=1, shapes=(), nodes=(), edges=()):
Shape.__init__(self)
self.width = width
self.height = height
self.shapes = shapes
self.nodes = nodes
self.edges = edges
def get_size(self):
return self.width, self.height
def draw(self, cr, highlight_items=None):
if highlight_items is None:
highlight_items = ()
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_cap(cairo.LINE_CAP_BUTT)
cr.set_line_join(cairo.LINE_JOIN_MITER)
for shape in self.shapes:
shape.draw(cr)
for edge in self.edges:
edge.draw(cr, highlight=(edge in highlight_items))
for node in self.nodes:
node.draw(cr, highlight=(node in highlight_items))
def get_element(self, x, y):
for node in self.nodes:
if node.is_inside(x, y):
return node
for edge in self.edges:
if edge.is_inside(x, y):
return edge
def get_url(self, x, y):
for node in self.nodes:
url = node.get_url(x, y)
if url is not None:
return url
return None
def get_jump(self, x, y):
for edge in self.edges:
jump = edge.get_jump(x, y)
if jump is not None:
return jump
for node in self.nodes:
jump = node.get_jump(x, y)
if jump is not None:
return jump
return None
BOLD = 1
ITALIC = 2
UNDERLINE = 4
SUPERSCRIPT = 8
SUBSCRIPT = 16
STRIKE_THROUGH = 32
OVERLINE = 64
class XDotAttrParser:
"""Parser for xdot drawing attributes.
See also:
- http://www.graphviz.org/doc/info/output.html#d:xdot
"""
def __init__(self, parser, buf):
self.parser = parser
self.buf = buf
self.pos = 0
self.pen = Pen()
self.shapes = []
def __nonzero__(self):
return self.pos < len(self.buf)
def read_code(self):
pos = self.buf.find(" ", self.pos)
res = self.buf[self.pos:pos]
self.pos = pos + 1
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_int(self):
return int(self.read_code())
def read_float(self):
return float(self.read_code())
def read_point(self):
x = self.read_float()
y = self.read_float()
return self.transform(x, y)
def read_text(self):
num = self.read_int()
pos = self.buf.find("-", self.pos) + 1
self.pos = pos + num
res = self.buf[pos:self.pos]
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_polygon(self):
n = self.read_int()
p = []
for i in range(n):
x, y = self.read_point()
p.append((x, y))
return p
def read_color(self):
# See http://www.graphviz.org/doc/info/attrs.html#k:color
c = self.read_text()
c1 = c[:1]
if c1 == '#':
hex2float = lambda h: float(int(h, 16)/255.0)
r = hex2float(c[1:3])
g = hex2float(c[3:5])
b = hex2float(c[5:7])
try:
a = hex2float(c[7:9])
except (IndexError, ValueError):
a = 1.0
return r, g, b, a
elif c1.isdigit() or c1 == ".":
# "H,S,V" or "H S V" or "H, S, V" or any other variation
h, s, v = map(float, c.replace(",", " ").split())
r, g, b = colorsys.hsv_to_rgb(h, s, v)
a = 1.0
return r, g, b, a
elif c1 == "[":
sys.stderr.write('warning: color gradients not supported yet\n')
return None
else:
return self.lookup_color(c)
def lookup_color(self, c):
try:
color = gtk.gdk.color_parse(c)
except ValueError:
pass
else:
s = 1.0/65535.0
r = color.red*s
g = color.green*s
b = color.blue*s
a = 1.0
return r, g, b, a
try:
dummy, scheme, index = c.split('/')
r, g, b = brewer_colors[scheme][int(index)]
except (ValueError, KeyError):
pass
else:
s = 1.0/255.0
r = r*s
g = g*s
b = b*s
a = 1.0
return r, g, b, a
sys.stderr.write("warning: unknown color '%s'\n" % c)
return None
def parse(self):
s = self
while s:
op = s.read_code()
if op == "c":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=False)
elif op == "C":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=True)
elif op == "S":
# http://www.graphviz.org/doc/info/attrs.html#k:style
style = s.read_text()
if style.startswith("setlinewidth("):
lw = style.split("(")[1].split(")")[0]
lw = float(lw)
self.handle_linewidth(lw)
elif style in ("solid", "dashed", "bold", "dotted"):
self.handle_linestyle(style)
elif op == "F":
size = s.read_float()
name = s.read_text()
self.handle_font(size, name)
elif op == "T":
x, y = s.read_point()
j = s.read_int()
w = s.read_float()
t = s.read_text()
self.handle_text(x, y, j, w, t)
elif op == "t":
f = s.read_int()
self.handle_font_characteristics(f)
elif op == "E":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
self.handle_ellipse(x0, y0, w, h, filled=True)
elif op == "e":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
self.handle_ellipse(x0, y0, w, h, filled=False)
elif op == "L":
points = self.read_polygon()
self.handle_line(points)
elif op == "B":
points = self.read_polygon()
self.handle_bezier(points, filled=False)
elif op == "b":
points = self.read_polygon()
self.handle_bezier(points, filled=True)
elif op == "P":
points = self.read_polygon()
self.handle_polygon(points, filled=True)
elif op == "p":
points = self.read_polygon()
self.handle_polygon(points, filled=False)
elif op == "I":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
path = s.read_text()
self.handle_image(x0, y0, w, h, path)
else:
sys.stderr.write("error: unknown xdot opcode '%s'\n" % op)
sys.exit(1)
return self.shapes
def transform(self, x, y):
return self.parser.transform(x, y)
def handle_color(self, color, filled=False):
if filled:
self.pen.fillcolor = color
else:
self.pen.color = color
def handle_linewidth(self, linewidth):
self.pen.linewidth = linewidth
def handle_linestyle(self, style):
if style == "bold":
self.pen.linewidth = 4
elif style == "solid":
self.pen.dash = ()
elif style == "dashed":
self.pen.dash = (6, ) # 6pt on, 6pt off
elif style == "dotted":
self.pen.dash = (2, 4) # 2pt on, 4pt off
def handle_font(self, size, name):
self.pen.fontsize = size
self.pen.fontname = name
def handle_font_characteristics(self, flags):
# TODO
if flags != 0:
sys.stderr.write("warning: font characteristics not supported yet\n" % op)
def handle_text(self, x, y, j, w, t):
self.shapes.append(TextShape(self.pen, x, y, j, w, t))
def handle_ellipse(self, x0, y0, w, h, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h, filled=True))
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h))
def handle_image(self, x0, y0, w, h, path):
self.shapes.append(ImageShape(self.pen, x0, y0, w, h, path))
def handle_line(self, points):
self.shapes.append(LineShape(self.pen, points))
def handle_bezier(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(BezierShape(self.pen, points, filled=True))
self.shapes.append(BezierShape(self.pen, points))
def handle_polygon(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(PolygonShape(self.pen, points, filled=True))
self.shapes.append(PolygonShape(self.pen, points))
EOF = -1
SKIP = -2
class ParseError(Exception):
def __init__(self, msg=None, filename=None, line=None, col=None):
self.msg = msg
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return ':'.join([str(part) for part in (self.filename, self.line, self.col, self.msg) if part != None])
class Scanner:
"""Stateless scanner."""
# should be overriden by derived classes
tokens = []
symbols = {}
literals = {}
ignorecase = False
def __init__(self):
flags = re.DOTALL
if self.ignorecase:
flags |= re.IGNORECASE
self.tokens_re = re.compile(
'|'.join(['(' + regexp + ')' for type, regexp, test_lit in self.tokens]),
flags
)
def next(self, buf, pos):
if pos >= len(buf):
return EOF, '', pos
mo = self.tokens_re.match(buf, pos)
if mo:
text = mo.group()
type, regexp, test_lit = self.tokens[mo.lastindex - 1]
pos = mo.end()
if test_lit:
type = self.literals.get(text, type)
return type, text, pos
else:
c = buf[pos]
return self.symbols.get(c, None), c, pos + 1
class Token:
def __init__(self, type, text, line, col):
self.type = type
self.text = text
self.line = line
self.col = col
class Lexer:
# should be overriden by derived classes
scanner = None
tabsize = 8
newline_re = re.compile(r'\r\n?|\n')
def __init__(self, buf = None, pos = 0, filename = None, fp = None):
if fp is not None:
try:
fileno = fp.fileno()
length = os.path.getsize(fp.name)
import mmap
except:
# read whole file into memory
buf = fp.read()
pos = 0
else:
# map the whole file into memory
if length:
# length must not be zero
buf = mmap.mmap(fileno, length, access = mmap.ACCESS_READ)
pos = os.lseek(fileno, 0, 1)
else:
buf = ''
pos = 0
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = None
self.buf = buf
self.pos = pos
self.line = 1
self.col = 1
self.filename = filename
def next(self):
while True:
# save state
pos = self.pos
line = self.line
col = self.col
type, text, endpos = self.scanner.next(self.buf, pos)
assert pos + len(text) == endpos
self.consume(text)
type, text = self.filter(type, text)
self.pos = endpos
if type == SKIP:
continue
elif type is None:
msg = 'unexpected char '
if text >= ' ' and text <= '~':
msg += "'%s'" % text
else:
msg += "0x%X" % ord(text)
raise ParseError(msg, self.filename, line, col)
else:
break
return Token(type = type, text = text, line = line, col = col)
def consume(self, text):
# update line number
pos = 0
for mo in self.newline_re.finditer(text, pos):
self.line += 1
self.col = 1
pos = mo.end()
# update column number
while True:
tabpos = text.find('\t', pos)
if tabpos == -1:
break
self.col += tabpos - pos
self.col = ((self.col - 1)//self.tabsize + 1)*self.tabsize + 1
pos = tabpos + 1
self.col += len(text) - pos
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.lookahead = self.lexer.next()
def match(self, type):
if self.lookahead.type != type:
raise ParseError(
msg = 'unexpected token %r' % self.lookahead.text,
filename = self.lexer.filename,
line = self.lookahead.line,
col = self.lookahead.col)
def skip(self, type):
while self.lookahead.type != type:
self.consume()
def consume(self):
token = self.lookahead
self.lookahead = self.lexer.next()
return token
ID = 0
STR_ID = 1
HTML_ID = 2
EDGE_OP = 3
LSQUARE = 4
RSQUARE = 5
LCURLY = 6
RCURLY = 7
COMMA = 8
COLON = 9
SEMI = 10
EQUAL = 11
PLUS = 12
STRICT = 13
GRAPH = 14
DIGRAPH = 15
NODE = 16
EDGE = 17
SUBGRAPH = 18
class DotScanner(Scanner):
# token regular expression table
tokens = [
# whitespace and comments
(SKIP,
r'[ \t\f\r\n\v]+|'
r'//[^\r\n]*|'
r'/\*.*?\*/|'
r'#[^\r\n]*',
False),
# Alphanumeric IDs
(ID, r'[a-zA-Z_\x80-\xff][a-zA-Z0-9_\x80-\xff]*', True),
# Numeric IDs
(ID, r'-?(?:\.[0-9]+|[0-9]+(?:\.[0-9]*)?)', False),
# String IDs
(STR_ID, r'"[^"\\]*(?:\\.[^"\\]*)*"', False),
# HTML IDs
(HTML_ID, r'<[^<>]*(?:<[^<>]*>[^<>]*)*>', False),
# Edge operators
(EDGE_OP, r'-[>-]', False),
]
# symbol table
symbols = {
'[': LSQUARE,
']': RSQUARE,
'{': LCURLY,
'}': RCURLY,
',': COMMA,
':': COLON,
';': SEMI,
'=': EQUAL,
'+': PLUS,
}
# literal table
literals = {
'strict': STRICT,
'graph': GRAPH,
'digraph': DIGRAPH,
'node': NODE,
'edge': EDGE,
'subgraph': SUBGRAPH,
}
ignorecase = True
class DotLexer(Lexer):
scanner = DotScanner()
def filter(self, type, text):
# TODO: handle charset
if type == STR_ID:
text = text[1:-1]
# line continuations
text = text.replace('\\\r\n', '')
text = text.replace('\\\r', '')
text = text.replace('\\\n', '')
# quotes
text = text.replace('\\"', '"')
# layout engines recognize other escape codes (many non-standard)
# but we don't translate them here
type = ID
elif type == HTML_ID:
text = text[1:-1]
type = ID
return type, text
class DotParser(Parser):
def __init__(self, lexer):
Parser.__init__(self, lexer)
self.graph_attrs = {}
self.node_attrs = {}
self.edge_attrs = {}
def parse(self):
self.parse_graph()
self.match(EOF)
def parse_graph(self):
if self.lookahead.type == STRICT:
self.consume()
self.skip(LCURLY)
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
def parse_subgraph(self):
id = None
if self.lookahead.type == SUBGRAPH:
self.consume()
if self.lookahead.type == ID:
id = self.lookahead.text
self.consume()
if self.lookahead.type == LCURLY:
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
return id
def parse_stmt(self):
if self.lookahead.type == GRAPH:
self.consume()
attrs = self.parse_attrs()
self.graph_attrs.update(attrs)
self.handle_graph(attrs)
elif self.lookahead.type == NODE:
self.consume()
self.node_attrs.update(self.parse_attrs())
elif self.lookahead.type == EDGE:
self.consume()
self.edge_attrs.update(self.parse_attrs())
elif self.lookahead.type in (SUBGRAPH, LCURLY):
self.parse_subgraph()
else:
id = self.parse_node_id()
if self.lookahead.type == EDGE_OP:
self.consume()
node_ids = [id, self.parse_node_id()]
while self.lookahead.type == EDGE_OP:
node_ids.append(self.parse_node_id())
attrs = self.parse_attrs()
for i in range(0, len(node_ids) - 1):
self.handle_edge(node_ids[i], node_ids[i + 1], attrs)
elif self.lookahead.type == EQUAL:
self.consume()
self.parse_id()
else:
attrs = self.parse_attrs()
self.handle_node(id, attrs)
if self.lookahead.type == SEMI:
self.consume()
def parse_attrs(self):
attrs = {}
while self.lookahead.type == LSQUARE:
self.consume()
while self.lookahead.type != RSQUARE:
name, value = self.parse_attr()
attrs[name] = value
if self.lookahead.type == COMMA:
self.consume()
self.consume()
return attrs
def parse_attr(self):
name = self.parse_id()
if self.lookahead.type == EQUAL:
self.consume()
value = self.parse_id()
else:
value = 'true'
return name, value
def parse_node_id(self):
node_id = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
port = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
compass_pt = self.parse_id()
else:
compass_pt = None
else:
port = None
compass_pt = None
# XXX: we don't really care about port and compass point values when parsing xdot
return node_id
def parse_id(self):
self.match(ID)
id = self.lookahead.text
self.consume()
return id
def handle_graph(self, attrs):
pass
def handle_node(self, id, attrs):
pass
def handle_edge(self, src_id, dst_id, attrs):
pass
class XDotParser(DotParser):
XDOTVERSION = '1.7'
def __init__(self, xdotcode):
lexer = DotLexer(buf = xdotcode)
DotParser.__init__(self, lexer)
self.nodes = []
self.edges = []
self.shapes = []
self.node_by_name = {}
self.top_graph = True
def handle_graph(self, attrs):
if self.top_graph:
# Check xdot version
try:
xdotversion = attrs['xdotversion']
except KeyError:
pass
else:
if float(xdotversion) > float(self.XDOTVERSION):
sys.stderr.write('warning: xdot version %s, but supported is %s\n' % (xdotversion, self.XDOTVERSION))
# Parse bounding box
try:
bb = attrs['bb']
except KeyError:
return
if bb:
xmin, ymin, xmax, ymax = map(float, bb.split(","))
self.xoffset = -xmin
self.yoffset = -ymax
self.xscale = 1.0
self.yscale = -1.0
# FIXME: scale from points to pixels
self.width = max(xmax - xmin, 1)
self.height = max(ymax - ymin, 1)
self.top_graph = False
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
self.shapes.extend(parser.parse())
def handle_node(self, id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
x, y = self.parse_node_pos(pos)
w = float(attrs.get('width', 0))*72
h = float(attrs.get('height', 0))*72
shapes = []
for attr in ("_draw_", "_ldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
url = attrs.get('URL', None)
node = Node(id, x, y, w, h, shapes, url)
self.node_by_name[id] = node
if shapes:
self.nodes.append(node)
def handle_edge(self, src_id, dst_id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
points = self.parse_edge_pos(pos)
shapes = []
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
if shapes:
src = self.node_by_name[src_id]
dst = self.node_by_name[dst_id]
self.edges.append(Edge(src, dst, points, shapes))
def parse(self):
DotParser.parse(self)
return Graph(self.width, self.height, self.shapes, self.nodes, self.edges)
def parse_node_pos(self, pos):
x, y = pos.split(",")
return self.transform(float(x), float(y))
def parse_edge_pos(self, pos):
points = []
for entry in pos.split(' '):
fields = entry.split(',')
try:
x, y = fields
except ValueError:
# TODO: handle start/end points
continue
else:
points.append(self.transform(float(x), float(y)))
return points
def transform(self, x, y):
# XXX: this is not the right place for this code
x = (x + self.xoffset)*self.xscale
y = (y + self.yoffset)*self.yscale
return x, y
class Animation(object):
step = 0.03 # seconds
def __init__(self, dot_widget):
self.dot_widget = dot_widget
self.timeout_id = None
def start(self):
self.timeout_id = gobject.timeout_add(int(self.step * 1000), self.tick)
def stop(self):
self.dot_widget.animation = NoAnimation(self.dot_widget)
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
def tick(self):
self.stop()
class NoAnimation(Animation):
def start(self):
pass
def stop(self):
pass
class LinearAnimation(Animation):
duration = 0.6
def start(self):
self.started = time.time()
Animation.start(self)
def tick(self):
t = (time.time() - self.started) / self.duration
self.animate(max(0, min(t, 1)))
return (t < 1)
def animate(self, t):
pass
class MoveToAnimation(LinearAnimation):
def __init__(self, dot_widget, target_x, target_y):
Animation.__init__(self, dot_widget)
self.source_x = dot_widget.x
self.source_y = dot_widget.y
self.target_x = target_x
self.target_y = target_y
def animate(self, t):
sx, sy = self.source_x, self.source_y
tx, ty = self.target_x, self.target_y
self.dot_widget.x = tx * t + sx * (1-t)
self.dot_widget.y = ty * t + sy * (1-t)
self.dot_widget.queue_draw()
class ZoomToAnimation(MoveToAnimation):
def __init__(self, dot_widget, target_x, target_y):
MoveToAnimation.__init__(self, dot_widget, target_x, target_y)
self.source_zoom = dot_widget.zoom_ratio
self.target_zoom = self.source_zoom
self.extra_zoom = 0
middle_zoom = 0.5 * (self.source_zoom + self.target_zoom)
distance = math.hypot(self.source_x - self.target_x,
self.source_y - self.target_y)
rect = self.dot_widget.get_allocation()
visible = min(rect.width, rect.height) / self.dot_widget.zoom_ratio
visible *= 0.9
if distance > 0:
desired_middle_zoom = visible / distance
self.extra_zoom = min(0, 4 * (desired_middle_zoom - middle_zoom))
def animate(self, t):
a, b, c = self.source_zoom, self.extra_zoom, self.target_zoom
self.dot_widget.zoom_ratio = c*t + b*t*(1-t) + a*(1-t)
self.dot_widget.zoom_to_fit_on_resize = False
MoveToAnimation.animate(self, t)
class DragAction(object):
def __init__(self, dot_widget):
self.dot_widget = dot_widget
def on_button_press(self, event):
self.startmousex = self.prevmousex = event.x
self.startmousey = self.prevmousey = event.y
self.start()
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
deltax = self.prevmousex - x
deltay = self.prevmousey - y
self.drag(deltax, deltay)
self.prevmousex = x
self.prevmousey = y
def on_button_release(self, event):
self.stopmousex = event.x
self.stopmousey = event.y
self.stop()
def draw(self, cr):
pass
def start(self):
pass
def drag(self, deltax, deltay):
pass
def stop(self):
pass
def abort(self):
pass
class NullAction(DragAction):
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
dot_widget = self.dot_widget
item = dot_widget.get_url(x, y)
if item is None:
item = dot_widget.get_jump(x, y)
if item is not None:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
dot_widget.set_highlight(item.highlight)
else:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
dot_widget.set_highlight(None)
class PanAction(DragAction):
def start(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
def drag(self, deltax, deltay):
self.dot_widget.x += deltax / self.dot_widget.zoom_ratio
self.dot_widget.y += deltay / self.dot_widget.zoom_ratio
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
abort = stop
class ZoomAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.zoom_ratio *= 1.005 ** (deltax + deltay)
self.dot_widget.zoom_to_fit_on_resize = False
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.queue_draw()
class ZoomAreaAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.queue_draw()
def draw(self, cr):
cr.save()
cr.set_source_rgba(.5, .5, 1.0, 0.25)
cr.rectangle(self.startmousex, self.startmousey,
self.prevmousex - self.startmousex,
self.prevmousey - self.startmousey)
cr.fill()
cr.set_source_rgba(.5, .5, 1.0, 1.0)
cr.set_line_width(1)
cr.rectangle(self.startmousex - .5, self.startmousey - .5,
self.prevmousex - self.startmousex + 1,
self.prevmousey - self.startmousey + 1)
cr.stroke()
cr.restore()
def stop(self):
x1, y1 = self.dot_widget.window2graph(self.startmousex,
self.startmousey)
x2, y2 = self.dot_widget.window2graph(self.stopmousex,
self.stopmousey)
self.dot_widget.zoom_to_area(x1, y1, x2, y2)
def abort(self):
self.dot_widget.queue_draw()
class DotWidget(gtk.DrawingArea):
"""PyGTK widget that draws dot graphs."""
__gsignals__ = {
'expose-event': 'override',
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event))
}
filter = 'dot'
def __init__(self):
gtk.DrawingArea.__init__(self)
self.graph = Graph()
self.openfilename = None
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect("size-allocate", self.on_area_size_allocate)
self.connect('key-press-event', self.on_key_press_event)
self.last_mtime = None
gobject.timeout_add(1000, self.update)
self.x, self.y = 0.0, 0.0
self.zoom_ratio = 1.0
self.zoom_to_fit_on_resize = False
self.animation = NoAnimation(self)
self.drag_action = NullAction(self)
self.presstime = None
self.highlight = None
def set_filter(self, filter):
self.filter = filter
def run_filter(self, dotcode):
if not self.filter:
return dotcode
p = subprocess.Popen(
[self.filter, '-Txdot'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
universal_newlines=True
)
xdotcode, error = p.communicate(dotcode)
sys.stderr.write(error)
if p.returncode != 0:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=error,
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return None
return xdotcode
def set_dotcode(self, dotcode, filename=None):
self.openfilename = None
if isinstance(dotcode, unicode):
dotcode = dotcode.encode('utf8')
xdotcode = self.run_filter(dotcode)
if xdotcode is None:
return False
try:
self.set_xdotcode(xdotcode)
except ParseError as ex:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return False
else:
if filename is None or filename == "<stdin>":
self.last_mtime = None
self.openfilename = None
else:
self.last_mtime = os.stat(filename).st_mtime
self.openfilename = filename
return True
def set_xdotcode(self, xdotcode):
parser = XDotParser(xdotcode)
self.graph = parser.parse()
self.zoom_image(self.zoom_ratio, center=True)
def reload(self):
if self.openfilename is not None:
try:
fp = file(self.openfilename, 'rt')
self.set_dotcode(fp.read(), self.openfilename)
fp.close()
except IOError:
pass
def update(self):
if self.openfilename is not None:
current_mtime = os.stat(self.openfilename).st_mtime
if current_mtime != self.last_mtime:
self.last_mtime = current_mtime
self.reload()
return True
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.save()
rect = self.get_allocation()
cr.translate(0.5*rect.width, 0.5*rect.height)
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
self.graph.draw(cr, highlight_items=self.highlight)
cr.restore()
self.drag_action.draw(cr)
return False
def get_current_pos(self):
return self.x, self.y
def set_current_pos(self, x, y):
self.x = x
self.y = y
self.queue_draw()
def set_highlight(self, items):
if self.highlight != items:
self.highlight = items
self.queue_draw()
def zoom_image(self, zoom_ratio, center=False, pos=None):
# Constrain zoom ratio to a sane range to prevent numeric instability.
zoom_ratio = min(zoom_ratio, 1E4)
zoom_ratio = max(zoom_ratio, 1E-6)
if center:
self.x = self.graph.width/2
self.y = self.graph.height/2
elif pos is not None:
rect = self.get_allocation()
x, y = pos
x -= 0.5*rect.width
y -= 0.5*rect.height
self.x += x / self.zoom_ratio - x / zoom_ratio
self.y += y / self.zoom_ratio - y / zoom_ratio
self.zoom_ratio = zoom_ratio
self.zoom_to_fit_on_resize = False
self.queue_draw()
def zoom_to_area(self, x1, y1, x2, y2):
rect = self.get_allocation()
width = abs(x1 - x2)
height = abs(y1 - y2)
if width == 0 and height == 0:
self.zoom_ratio *= self.ZOOM_INCREMENT
else:
self.zoom_ratio = min(
float(rect.width)/float(width),
float(rect.height)/float(height)
)
self.zoom_to_fit_on_resize = False
self.x = (x1 + x2) / 2
self.y = (y1 + y2) / 2
self.queue_draw()
def zoom_to_fit(self):
rect = self.get_allocation()
rect.x += self.ZOOM_TO_FIT_MARGIN
rect.y += self.ZOOM_TO_FIT_MARGIN
rect.width -= 2 * self.ZOOM_TO_FIT_MARGIN
rect.height -= 2 * self.ZOOM_TO_FIT_MARGIN
zoom_ratio = min(
float(rect.width)/float(self.graph.width),
float(rect.height)/float(self.graph.height)
)
self.zoom_image(zoom_ratio, center=True)
self.zoom_to_fit_on_resize = True
ZOOM_INCREMENT = 1.25
ZOOM_TO_FIT_MARGIN = 12
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_fit()
def on_zoom_100(self, action):
self.zoom_image(1.0)
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Up,
gtk.keysyms.plus,
gtk.keysyms.equal,
gtk.keysyms.KP_Add):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Down,
gtk.keysyms.minus,
gtk.keysyms.KP_Subtract):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Escape:
self.drag_action.abort()
self.drag_action = NullAction(self)
return True
if event.keyval == gtk.keysyms.r:
self.reload()
return True
if event.keyval == gtk.keysyms.f:
win = widget.get_toplevel()
find_toolitem = win.uimanager.get_widget('/ToolBar/Find')
textentry = find_toolitem.get_children()
win.set_focus(textentry[0])
return True
if event.keyval == gtk.keysyms.q:
gtk.main_quit()
return True
if event.keyval == gtk.keysyms.p:
self.on_print()
return True
return False
print_settings = None
def on_print(self, action=None):
print_op = gtk.PrintOperation()
if self.print_settings != None:
print_op.set_print_settings(self.print_settings)
print_op.connect("begin_print", self.begin_print)
print_op.connect("draw_page", self.draw_page)
res = print_op.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, self.parent.parent)
if res == gtk.PRINT_OPERATION_RESULT_APPLY:
print_settings = print_op.get_print_settings()
def begin_print(self, operation, context):
operation.set_n_pages(1)
return True
def draw_page(self, operation, context, page_nr):
cr = context.get_cairo_context()
rect = self.get_allocation()
cr.translate(0.5*rect.width, 0.5*rect.height)
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
self.graph.draw(cr, highlight_items=self.highlight)
def get_drag_action(self, event):
state = event.state
if event.button in (1, 2): # left or middle button
if state & gtk.gdk.CONTROL_MASK:
return ZoomAction
elif state & gtk.gdk.SHIFT_MASK:
return ZoomAreaAction
else:
return PanAction
return NullAction
def on_area_button_press(self, area, event):
self.animation.stop()
self.drag_action.abort()
action_type = self.get_drag_action(event)
self.drag_action = action_type(self)
self.drag_action.on_button_press(event)
self.presstime = time.time()
self.pressx = event.x
self.pressy = event.y
return False
def is_click(self, event, click_fuzz=4, click_timeout=1.0):
assert event.type == gtk.gdk.BUTTON_RELEASE
if self.presstime is None:
# got a button release without seeing the press?
return False
# XXX instead of doing this complicated logic, shouldn't we listen
# for gtk's clicked event instead?
deltax = self.pressx - event.x
deltay = self.pressy - event.y
return (time.time() < self.presstime + click_timeout
and math.hypot(deltax, deltay) < click_fuzz)
def on_click(self, element, event):
"""Override this method in subclass to process
click events. Note that element can be None
(click on empty space)."""
return False
def on_area_button_release(self, area, event):
self.drag_action.on_button_release(event)
self.drag_action = NullAction(self)
# HJO: need to allow right-click (button 3) in cylc:
x, y = int(event.x), int(event.y)
if self.is_click(event):
el = self.get_element(x, y)
if self.on_click(el, event):
return True
if event.button == 1 or event.button == 3:
url = self.get_url(x, y)
if event.button == 1:
jump = self.get_jump(x, y)
if jump is not None:
self.animate_to(jump.x, jump.y)
elif url is not None:
self.emit('clicked', unicode(url.url), event)
return True
if event.button == 1 or event.button == 2:
return True
return False
def on_area_scroll_event(self, area, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
return False
def on_area_motion_notify(self, area, event):
self.drag_action.on_motion_notify(event)
return True
def on_area_size_allocate(self, area, allocation):
if self.zoom_to_fit_on_resize:
self.zoom_to_fit()
def animate_to(self, x, y):
self.animation = ZoomToAnimation(self, x, y)
self.animation.start()
def window2graph(self, x, y):
rect = self.get_allocation()
x -= 0.5*rect.width
y -= 0.5*rect.height
x /= self.zoom_ratio
y /= self.zoom_ratio
x += self.x
y += self.y
return x, y
def get_element(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_element(x, y)
def get_url(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_url(x, y)
def get_jump(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_jump(x, y)
class FindMenuToolAction(gtk.Action):
__gtype_name__ = "FindMenuToolAction"
def __init__(self, *args, **kw):
gtk.Action.__init__(self, *args, **kw)
self.set_tool_item_type(gtk.ToolItem)
class DotWindow(gtk.Window):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Open"/>
<toolitem action="Reload"/>
<toolitem action="Print"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
<separator/>
<toolitem name="Find" action="Find"/>
</toolbar>
</ui>
'''
base_title = 'Dot Viewer'
def __init__(self, widget=None):
gtk.Window.__init__(self)
self.graph = Graph()
window = self
window.set_title(self.base_title)
window.set_default_size(512, 512)
vbox = gtk.VBox()
window.add(vbox)
self.widget = widget or DotWidget()
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Open', gtk.STOCK_OPEN, None, None, None, self.on_open),
('Reload', gtk.STOCK_REFRESH, None, None, None, self.on_reload),
('Print', gtk.STOCK_PRINT, None, None, "Prints the currently visible part of the graph", self.widget.on_print),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, None, None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
find_action = FindMenuToolAction("Find", None,
"Find a node by name", None)
actiongroup.add_action(find_action)
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI descrption
uimanager.add_ui_from_string(self.ui)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
vbox.pack_start(toolbar, False)
vbox.pack_start(self.widget)
self.last_open_dir = "."
self.set_focus(self.widget)
# Add Find text search
find_toolitem = uimanager.get_widget('/ToolBar/Find')
self.textentry = gtk.Entry(max=20)
self.textentry.set_icon_from_stock(0, gtk.STOCK_FIND)
find_toolitem.add(self.textentry)
self.textentry.set_activates_default(True)
self.textentry.connect ("activate", self.textentry_activate, self.textentry);
self.textentry.connect ("changed", self.textentry_changed, self.textentry);
self.show_all()
def update(self, filename):
if not hasattr(self, "last_mtime"):
self.last_mtime = None
current_mtime = os.stat(filename).st_mtime
if current_mtime != self.last_mtime:
self.last_mtime = current_mtime
self.open_file(filename)
return True
def find_text(self, entry_text):
found_items = []
dot_widget = self.widget
regexp = re.compile(entry_text)
for node in dot_widget.graph.nodes:
if node.search_text(regexp):
found_items.append(node)
return found_items
def textentry_changed(self, widget, entry):
entry_text = entry.get_text()
dot_widget = self.widget
if not entry_text:
dot_widget.set_highlight(None)
return
found_items = self.find_text(entry_text)
dot_widget.set_highlight(found_items)
def textentry_activate(self, widget, entry):
entry_text = entry.get_text()
dot_widget = self.widget
if not entry_text:
dot_widget.set_highlight(None)
return;
found_items = self.find_text(entry_text)
dot_widget.set_highlight(found_items)
if(len(found_items) == 1):
dot_widget.animate_to(found_items[0].x, found_items[0].y)
def set_filter(self, filter):
self.widget.set_filter(filter)
def set_dotcode(self, dotcode, filename=None):
if self.widget.set_dotcode(dotcode, filename):
self.widget.zoom_to_fit()
def set_xdotcode(self, xdotcode, filename=None):
if self.widget.set_xdotcode(xdotcode):
self.update_title(filename)
self.widget.zoom_to_fit()
def update_title(self, filename=None):
if filename is None:
self.set_title(self.base_title)
else:
self.set_title(os.path.basename(filename) + ' - ' + self.base_title)
def open_file(self, filename):
try:
fp = file(filename, 'rt')
self.set_dotcode(fp.read(), filename)
fp.close()
except IOError as ex:
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dlg.set_title(self.base_title)
dlg.run()
dlg.destroy()
def on_open(self, action):
chooser = gtk.FileChooserDialog(title="Open dot File",
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_current_folder(self.last_open_dir)
filter = gtk.FileFilter()
filter.set_name("Graphviz dot files")
filter.add_pattern("*.dot")
chooser.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
chooser.add_filter(filter)
if chooser.run() == gtk.RESPONSE_OK:
filename = chooser.get_filename()
self.last_open_dir = chooser.get_current_folder()
chooser.destroy()
self.open_file(filename)
else:
chooser.destroy()
def on_reload(self, action):
self.widget.reload()
class OptionParser(optparse.OptionParser):
def format_epilog(self, formatter):
# Prevent stripping the newlines in epilog message
# http://stackoverflow.com/questions/1857346/python-optparse-how-to-include-additional-info-in-usage-output
return self.epilog
def main():
parser = OptionParser(
usage='\n\t%prog [file]',
epilog='''
Shortcuts:
Up, Down, Left, Right scroll
PageUp, +, = zoom in
PageDown, - zoom out
R reload dot file
F find
Q quit
P print
Escape halt animation
Ctrl-drag zoom in/out
Shift-drag zooms an area
'''
)
parser.add_option(
'-f', '--filter',
type='choice', choices=('dot', 'neato', 'twopi', 'circo', 'fdp'),
dest='filter', default='dot',
help='graphviz filter: dot, neato, twopi, circo, or fdp [default: %default]')
parser.add_option(
'-n', '--no-filter',
action='store_const', const=None, dest='filter',
help='assume input is already filtered into xdot format (use e.g. dot -Txdot)')
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 1:
parser.error('incorrect number of arguments')
win = DotWindow()
win.connect('destroy', gtk.main_quit)
win.set_filter(options.filter)
if len(args) == 0:
if not sys.stdin.isatty():
win.set_dotcode(sys.stdin.read())
else:
if args[0] == '-':
win.set_dotcode(sys.stdin.read())
else:
win.open_file(args[0])
gobject.timeout_add(1000, win.update, args[0])
gtk.main()
# Apache-Style Software License for ColorBrewer software and ColorBrewer Color
# Schemes, Version 1.1
#
# Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania State
# University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions as source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The end-user documentation included with the redistribution, if any,
# must include the following acknowledgment:
#
# This product includes color specifications and designs developed by
# Cynthia Brewer (http://colorbrewer.org/).
#
# Alternately, this acknowledgment may appear in the software itself, if and
# wherever such third-party acknowledgments normally appear.
#
# 3. The name "ColorBrewer" must not be used to endorse or promote products
# derived from this software without prior written permission. For written
# permission, please contact Cynthia Brewer at cbrewer@psu.edu.
#
# 4. Products derived from this software may not be called "ColorBrewer",
# nor may "ColorBrewer" appear in their name, without prior written
# permission of Cynthia Brewer.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CYNTHIA
# BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
brewer_colors = {
'accent3': [(127, 201, 127), (190, 174, 212), (253, 192, 134)],
'accent4': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153)],
'accent5': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176)],
'accent6': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127)],
'accent7': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23)],
'accent8': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23), (102, 102, 102)],
'blues3': [(222, 235, 247), (158, 202, 225), (49, 130, 189)],
'blues4': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (33, 113, 181)],
'blues5': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues6': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues7': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues8': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues9': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 81, 156), (8, 48, 107)],
'brbg10': [(84, 48, 5), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg11': [(84, 48, 5), (1, 102, 94), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143)],
'brbg3': [(216, 179, 101), (245, 245, 245), (90, 180, 172)],
'brbg4': [(166, 97, 26), (223, 194, 125), (128, 205, 193), (1, 133, 113)],
'brbg5': [(166, 97, 26), (223, 194, 125), (245, 245, 245), (128, 205, 193), (1, 133, 113)],
'brbg6': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg7': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (245, 245, 245), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg8': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg9': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'bugn3': [(229, 245, 249), (153, 216, 201), (44, 162, 95)],
'bugn4': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (35, 139, 69)],
'bugn5': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn6': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn7': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 88, 36)],
'bugn8': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 88, 36)],
'bugn9': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 109, 44), (0, 68, 27)],
'bupu3': [(224, 236, 244), (158, 188, 218), (136, 86, 167)],
'bupu4': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 65, 157)],
'bupu5': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu6': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu7': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (110, 1, 107)],
'bupu8': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (110, 1, 107)],
'bupu9': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (129, 15, 124), (77, 0, 75)],
'dark23': [(27, 158, 119), (217, 95, 2), (117, 112, 179)],
'dark24': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138)],
'dark25': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30)],
'dark26': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2)],
'dark27': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2), (166, 118, 29)],
'dark28': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2), (166, 118, 29), (102, 102, 102)],
'gnbu3': [(224, 243, 219), (168, 221, 181), (67, 162, 202)],
'gnbu4': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (43, 140, 190)],
'gnbu5': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu6': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu7': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 88, 158)],
'gnbu8': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 88, 158)],
'gnbu9': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 104, 172), (8, 64, 129)],
'greens3': [(229, 245, 224), (161, 217, 155), (49, 163, 84)],
'greens4': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (35, 139, 69)],
'greens5': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens6': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens7': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 90, 50)],
'greens8': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 90, 50)],
'greens9': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 109, 44), (0, 68, 27)],
'greys3': [(240, 240, 240), (189, 189, 189), (99, 99, 99)],
'greys4': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (82, 82, 82)],
'greys5': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys6': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys7': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37)],
'greys8': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37)],
'greys9': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37), (0, 0, 0)],
'oranges3': [(254, 230, 206), (253, 174, 107), (230, 85, 13)],
'oranges4': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (217, 71, 1)],
'oranges5': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges6': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges7': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (140, 45, 4)],
'oranges8': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (140, 45, 4)],
'oranges9': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (166, 54, 3), (127, 39, 4)],
'orrd3': [(254, 232, 200), (253, 187, 132), (227, 74, 51)],
'orrd4': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (215, 48, 31)],
'orrd5': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd6': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd7': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (153, 0, 0)],
'orrd8': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (153, 0, 0)],
'orrd9': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (179, 0, 0), (127, 0, 0)],
'paired10': [(166, 206, 227), (106, 61, 154), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired11': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired12': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (177, 89, 40), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired3': [(166, 206, 227), (31, 120, 180), (178, 223, 138)],
'paired4': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44)],
'paired5': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153)],
'paired6': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28)],
'paired7': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111)],
'paired8': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0)],
'paired9': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'pastel13': [(251, 180, 174), (179, 205, 227), (204, 235, 197)],
'pastel14': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228)],
'pastel15': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166)],
'pastel16': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204)],
'pastel17': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189)],
'pastel18': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189), (253, 218, 236)],
'pastel19': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189), (253, 218, 236), (242, 242, 242)],
'pastel23': [(179, 226, 205), (253, 205, 172), (203, 213, 232)],
'pastel24': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228)],
'pastel25': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201)],
'pastel26': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174)],
'pastel27': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174), (241, 226, 204)],
'pastel28': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174), (241, 226, 204), (204, 204, 204)],
'piyg10': [(142, 1, 82), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'piyg11': [(142, 1, 82), (77, 146, 33), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (247, 247, 247), (230, 245, 208), (184, 225, 134), (127, 188, 65)],
'piyg3': [(233, 163, 201), (247, 247, 247), (161, 215, 106)],
'piyg4': [(208, 28, 139), (241, 182, 218), (184, 225, 134), (77, 172, 38)],
'piyg5': [(208, 28, 139), (241, 182, 218), (247, 247, 247), (184, 225, 134), (77, 172, 38)],
'piyg6': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (230, 245, 208), (161, 215, 106), (77, 146, 33)],
'piyg7': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (247, 247, 247), (230, 245, 208), (161, 215, 106), (77, 146, 33)],
'piyg8': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'piyg9': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (247, 247, 247), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'prgn10': [(64, 0, 75), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'prgn11': [(64, 0, 75), (27, 120, 55), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (247, 247, 247), (217, 240, 211), (166, 219, 160), (90, 174, 97)],
'prgn3': [(175, 141, 195), (247, 247, 247), (127, 191, 123)],
'prgn4': [(123, 50, 148), (194, 165, 207), (166, 219, 160), (0, 136, 55)],
'prgn5': [(123, 50, 148), (194, 165, 207), (247, 247, 247), (166, 219, 160), (0, 136, 55)],
'prgn6': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (217, 240, 211), (127, 191, 123), (27, 120, 55)],
'prgn7': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (247, 247, 247), (217, 240, 211), (127, 191, 123), (27, 120, 55)],
'prgn8': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'prgn9': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (247, 247, 247), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'pubu3': [(236, 231, 242), (166, 189, 219), (43, 140, 190)],
'pubu4': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (5, 112, 176)],
'pubu5': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu6': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu7': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (3, 78, 123)],
'pubu8': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (3, 78, 123)],
'pubu9': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (4, 90, 141), (2, 56, 88)],
'pubugn3': [(236, 226, 240), (166, 189, 219), (28, 144, 153)],
'pubugn4': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (2, 129, 138)],
'pubugn5': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn6': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn7': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 100, 80)],
'pubugn8': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 100, 80)],
'pubugn9': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 108, 89), (1, 70, 54)],
'puor10': [(127, 59, 8), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'puor11': [(127, 59, 8), (84, 39, 136), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (247, 247, 247), (216, 218, 235), (178, 171, 210), (128, 115, 172)],
'puor3': [(241, 163, 64), (247, 247, 247), (153, 142, 195)],
'puor4': [(230, 97, 1), (253, 184, 99), (178, 171, 210), (94, 60, 153)],
'puor5': [(230, 97, 1), (253, 184, 99), (247, 247, 247), (178, 171, 210), (94, 60, 153)],
'puor6': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (216, 218, 235), (153, 142, 195), (84, 39, 136)],
'puor7': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (247, 247, 247), (216, 218, 235), (153, 142, 195), (84, 39, 136)],
'puor8': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'puor9': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (247, 247, 247), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'purd3': [(231, 225, 239), (201, 148, 199), (221, 28, 119)],
'purd4': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (206, 18, 86)],
'purd5': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd6': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd7': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (145, 0, 63)],
'purd8': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (145, 0, 63)],
'purd9': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (152, 0, 67), (103, 0, 31)],
'purples3': [(239, 237, 245), (188, 189, 220), (117, 107, 177)],
'purples4': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (106, 81, 163)],
'purples5': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples6': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples7': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (74, 20, 134)],
'purples8': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (74, 20, 134)],
'purples9': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (84, 39, 143), (63, 0, 125)],
'rdbu10': [(103, 0, 31), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdbu11': [(103, 0, 31), (33, 102, 172), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (247, 247, 247), (209, 229, 240), (146, 197, 222), (67, 147, 195)],
'rdbu3': [(239, 138, 98), (247, 247, 247), (103, 169, 207)],
'rdbu4': [(202, 0, 32), (244, 165, 130), (146, 197, 222), (5, 113, 176)],
'rdbu5': [(202, 0, 32), (244, 165, 130), (247, 247, 247), (146, 197, 222), (5, 113, 176)],
'rdbu6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (209, 229, 240), (103, 169, 207), (33, 102, 172)],
'rdbu7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (247, 247, 247), (209, 229, 240), (103, 169, 207), (33, 102, 172)],
'rdbu8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdbu9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (247, 247, 247), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdgy10': [(103, 0, 31), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdgy11': [(103, 0, 31), (77, 77, 77), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (255, 255, 255), (224, 224, 224), (186, 186, 186), (135, 135, 135)],
'rdgy3': [(239, 138, 98), (255, 255, 255), (153, 153, 153)],
'rdgy4': [(202, 0, 32), (244, 165, 130), (186, 186, 186), (64, 64, 64)],
'rdgy5': [(202, 0, 32), (244, 165, 130), (255, 255, 255), (186, 186, 186), (64, 64, 64)],
'rdgy6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (224, 224, 224), (153, 153, 153), (77, 77, 77)],
'rdgy7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (255, 255, 255), (224, 224, 224), (153, 153, 153), (77, 77, 77)],
'rdgy8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdgy9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (255, 255, 255), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdpu3': [(253, 224, 221), (250, 159, 181), (197, 27, 138)],
'rdpu4': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (174, 1, 126)],
'rdpu5': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu6': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu7': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119)],
'rdpu8': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119)],
'rdpu9': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119), (73, 0, 106)],
'rdylbu10': [(165, 0, 38), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylbu11': [(165, 0, 38), (69, 117, 180), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (255, 255, 191), (224, 243, 248), (171, 217, 233), (116, 173, 209)],
'rdylbu3': [(252, 141, 89), (255, 255, 191), (145, 191, 219)],
'rdylbu4': [(215, 25, 28), (253, 174, 97), (171, 217, 233), (44, 123, 182)],
'rdylbu5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 217, 233), (44, 123, 182)],
'rdylbu6': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (224, 243, 248), (145, 191, 219), (69, 117, 180)],
'rdylbu7': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (255, 255, 191), (224, 243, 248), (145, 191, 219), (69, 117, 180)],
'rdylbu8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylbu9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (255, 255, 191), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylgn10': [(165, 0, 38), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'rdylgn11': [(165, 0, 38), (26, 152, 80), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (217, 239, 139), (166, 217, 106), (102, 189, 99)],
'rdylgn3': [(252, 141, 89), (255, 255, 191), (145, 207, 96)],
'rdylgn4': [(215, 25, 28), (253, 174, 97), (166, 217, 106), (26, 150, 65)],
'rdylgn5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (166, 217, 106), (26, 150, 65)],
'rdylgn6': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (217, 239, 139), (145, 207, 96), (26, 152, 80)],
'rdylgn7': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (255, 255, 191), (217, 239, 139), (145, 207, 96), (26, 152, 80)],
'rdylgn8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'rdylgn9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'reds3': [(254, 224, 210), (252, 146, 114), (222, 45, 38)],
'reds4': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (203, 24, 29)],
'reds5': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds6': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds7': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (153, 0, 13)],
'reds8': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (153, 0, 13)],
'reds9': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (165, 15, 21), (103, 0, 13)],
'set13': [(228, 26, 28), (55, 126, 184), (77, 175, 74)],
'set14': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163)],
'set15': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0)],
'set16': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51)],
'set17': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40)],
'set18': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40), (247, 129, 191)],
'set19': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40), (247, 129, 191), (153, 153, 153)],
'set23': [(102, 194, 165), (252, 141, 98), (141, 160, 203)],
'set24': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195)],
'set25': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84)],
'set26': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47)],
'set27': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47), (229, 196, 148)],
'set28': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47), (229, 196, 148), (179, 179, 179)],
'set310': [(141, 211, 199), (188, 128, 189), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set311': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set312': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 237, 111), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set33': [(141, 211, 199), (255, 255, 179), (190, 186, 218)],
'set34': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114)],
'set35': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211)],
'set36': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98)],
'set37': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105)],
'set38': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229)],
'set39': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'spectral10': [(158, 1, 66), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'spectral11': [(158, 1, 66), (50, 136, 189), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (230, 245, 152), (171, 221, 164), (102, 194, 165)],
'spectral3': [(252, 141, 89), (255, 255, 191), (153, 213, 148)],
'spectral4': [(215, 25, 28), (253, 174, 97), (171, 221, 164), (43, 131, 186)],
'spectral5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 221, 164), (43, 131, 186)],
'spectral6': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (230, 245, 152), (153, 213, 148), (50, 136, 189)],
'spectral7': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (255, 255, 191), (230, 245, 152), (153, 213, 148), (50, 136, 189)],
'spectral8': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'spectral9': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'ylgn3': [(247, 252, 185), (173, 221, 142), (49, 163, 84)],
'ylgn4': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (35, 132, 67)],
'ylgn5': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn6': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn7': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 90, 50)],
'ylgn8': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 90, 50)],
'ylgn9': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 104, 55), (0, 69, 41)],
'ylgnbu3': [(237, 248, 177), (127, 205, 187), (44, 127, 184)],
'ylgnbu4': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (34, 94, 168)],
'ylgnbu5': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu6': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu7': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (12, 44, 132)],
'ylgnbu8': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (12, 44, 132)],
'ylgnbu9': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (37, 52, 148), (8, 29, 88)],
'ylorbr3': [(255, 247, 188), (254, 196, 79), (217, 95, 14)],
'ylorbr4': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (204, 76, 2)],
'ylorbr5': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr6': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr7': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (140, 45, 4)],
'ylorbr8': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (140, 45, 4)],
'ylorbr9': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (153, 52, 4), (102, 37, 6)],
'ylorrd3': [(255, 237, 160), (254, 178, 76), (240, 59, 32)],
'ylorrd4': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (227, 26, 28)],
'ylorrd5': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd6': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd7': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42), (227, 26, 28), (177, 0, 38)],
'ylorrd8': [(255, 255, 204), (255, 237, 160), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42), (227, 26, 28), (177, 0, 38)],
}
if __name__ == '__main__':
main()
| jonnyhtw/cylc | lib/xdot.py | Python | gpl-3.0 | 97,799 |
"""
Code taken directly from http://preshing.com/20110822/penrose-tiling-in-obfuscated-python/
"""
_ =\
"""if!
1:"e,V=100
0,(0j-1)**-.2;
v,S=.5/ V.real,
[(0,0,4 *e,4*e*
V)];w=1 -v"def!
E(T,A, B,C):P
,Q,R=B*w+ A*v,B*w+C
*v,A*w+B*v;retur n[(1,Q,C,A),(1,P
,Q,B),(0,Q,P,A)]*T+[(0,C ,R,B),(1,R,C,A)]*(1-T)"f
or!i!in!_[:11]:S =sum([E (*x)for !x!in!S],[])"imp
ort!cair o!as!O; s=O.Ima geSurfac
e(1,e,e) ;c=O.Con text(s); M,L,G=c.
move_to ,c.line_to,c.s et_sour
ce_rgb a"def!z(f,a) :f(-a.
imag,a. real-e-e)"for!T,A,B,C!in[i !for!i!
in!S!if!i[""";exec(reduce(lambda x,i:x.replace(chr
(i),"\n "[34-i:]), range( 35),_+"""0]]:z(M,A
);z(L,B);z (L,C); c.close_pa
th()"G (.4,.3 ,1);c.
paint( );G(.7 ,.7,1)
;c.fil l()"fo r!i!in
!range (9):"! g=1-i/
8;d=i/ 4*g;G(d,d,d, 1-g*.8
)"!def !y(f,a):z(f,a+(1+2j)*( 1j**(i
/2.))*g)"!for!T,A,B,C!in!S:y(M,C);y(L,A);y(M
,A);y(L,B)"!c.st roke()"s.write_t
o_png('pen rose.png')
""" ))
| ccsplit/misc | penrose.py | Python | mit | 1,785 |
import numpy as np
import os
DEFAULT_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "datasets", "glove.6B.50d.txt")
def loadWordVectors(tokens, filepath=DEFAULT_FILE_PATH, dimensions=50):
"""Read pretrained GloVe vectors"""
wordVectors = np.zeros((len(tokens), dimensions))
with open(filepath) as ifs:
for line in ifs:
line = line.strip()
if not line:
continue
row = line.split()
token = row[0]
if token not in tokens:
continue
data = [float(x) for x in row[1:]]
if len(data) != dimensions:
raise RuntimeError("wrong number of dimensions")
wordVectors[tokens[token]] = np.asarray(data)
return wordVectors
| IAAAIAAIA/studyGroup | steffen/assignment1/utils/glove.py | Python | mit | 799 |
__author__ = 'teemupitkanen1'
import numpy as np
data=open("../data/paleo.csv","r").read().split("\n")
del data[0]
data.pop()
# Create array "accuracy". accuracy[i,j] = places after decimal points for data record i, field 10+j
accuracy = []
for i in range(len(data)):
row=[0,0,0]
record = data[i].split(";")
for indx in range(0,3):
if record[10+indx]=='':
row[indx]=-1
elif record[10+indx]=='#DIV/0!':
row[indx]=-2
else:
temp = record[10+indx].split(",")
if len(temp)==2:
row[indx]=len(temp[1])
# Set length of fields with arbitrary values to -3
record[0]='0'
for k in range(len(record)):
if record[k]=='' or record[k]=='#DIV/0!':
record[k]='0'
else:
record[k]=record[k].replace(",",".")
record[k]=float(record[k])
if record[3]==0:
if record[10]!=0:
row[0]=record[10]
if record[12]!=0:
row[2]=record[12]
else:
if record[10] != round(record[4]/record[3], row[0]):
row[0]=record[10]
if record[12] != round(record[6]/record[3], row[2]):
row[2]=record[12]
if record[5]==0:
if record[11]!=0:
row[1]=record[11]
elif record[11] != round(record[4]/record[5], row[1]):
row[1]=+record[11]
row = [str(x) for x in row]
accuracy.append(row)
print(row)
np.savetxt("a",accuracy,'%s') | assamite/itm_project | teemu/stuff/accuracy.py | Python | gpl-2.0 | 1,468 |
import os
import logging
import logging.handlers
import multiprocessing
class CentralLogger(multiprocessing.Process):
"""Logging process. Works with python
subprocesses and multiprocessing.
"""
def __init__(self, queue):
"""Reads messages from a queue
and logs them to a file.
:queue: the log queue
"""
multiprocessing.Process.__init__(self)
self._queue = queue
self.LOGFILE_NAME = 'log/pydzen.log'
self._log = logging.getLogger('pydzen')
self._log.setLevel(logging.WARN)
self._handler = logging.handlers.RotatingFileHandler(
self.LOGFILE_NAME, maxBytes=10*1024*1024, backupCount=3
)
self._log.addHandler(self._handler)
self._log.info('** Central Logger process started **')
def run(self):
while True:
log_level, message = self._queue.get()
if log_level is None:
self._log.info('** Shutting down Central Logger **')
break
else:
self._log.log(log_level, message)
self.terminate()
class Logger():
"""Interface to simplify logging
with CentralLogger. This class should be
imported in place of using logging.getLogger().
example use:
from log.centrallogger import Logger
logger = Logger(config.LOG_QUEUE)
except Exception as e:
logger.exception(e)
"""
def __init__(self, queue):
self._queue = queue
def info(self, msg):
self._queue.put((logging.INFO, msg))
def debug(self, msg):
self._queue.put((logging.DEBUG, msg))
def warn(self, msg):
self._queue.put((logging.WARN, msg))
def error(self, msg):
self._queue.put((logging.ERROR, msg))
def exception(self, msg):
self._queue.put((logging.CRITICAL, msg))
def log(self, log_level, msg):
self._queue.put((log_level, msg))
def quit(self):
self._queue.put((None, 'Quit'))
| Dissonant-Tech/pydzen | log/centrallogger.py | Python | gpl-3.0 | 2,034 |
import os
import re
import sys
import copy
import pickle
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.parse import urlencode
try:
import pylast
has_pylast = True
except ImportError:
has_pylast = False
import pafy
from . import g, c, paths, util
mswin = os.name == "nt"
class ConfigItem:
""" A configuration item. """
def __init__(self, name, value, minval=None, maxval=None, check_fn=None,
require_known_player=False, allowed_values=None):
""" If specified, the check_fn should return a dict.
{valid: bool, message: success/fail mesage, value: value to set}
"""
self.default = self.value = value
self.name = name
self.type = type(value)
self.maxval, self.minval = maxval, minval
self.check_fn = check_fn
self.require_known_player = require_known_player
self.allowed_values = []
if allowed_values:
self.allowed_values = allowed_values
@property
def get(self):
""" Return value. """
return self.value
@property
def display(self):
""" Return value in a format suitable for display. """
retval = self.value
if self.name == "max_res":
retval = str(retval) + "p"
if self.name == "encoder":
retval = str(retval) + " [%s]" % (str(g.encoders[retval]['name']))
return retval
def set(self, value):
""" Set value with checks. """
# note: fail_msg should contain %s %s for self.name, value
# success_msg should not
# pylint: disable=R0912
# too many branches
success_msg = fail_msg = ""
value = value.strip()
value_orig = value
# handle known player not set
if self.allowed_values and value not in self.allowed_values:
fail_msg = "%s must be one of * - not %s"
allowed_values = copy.copy(self.allowed_values)
if '' in allowed_values:
allowed_values[allowed_values.index('')] = "<nothing>"
fail_msg = fail_msg.replace("*", ", ".join(allowed_values))
if self.require_known_player and \
not util.is_known_player(Config.PLAYER.get):
fail_msg = "%s requires mpv or mplayer, can't set to %s"
# handle true / false values
elif self.type == bool:
if value.upper() in "0 OFF NO DISABLED FALSE".split():
value = False
success_msg = "%s set to False" % c.c("g", self.name)
elif value.upper() in "1 ON YES ENABLED TRUE".split():
value = True
success_msg = "%s set to True" % c.c("g", self.name)
else:
fail_msg = "%s requires True/False, got %s"
# handle int values
elif self.type == int:
if not value.isdigit():
fail_msg = "%s requires a number, got %s"
else:
value = int(value)
if self.maxval and self.minval:
if not self.minval <= value <= self.maxval:
m = " must be between %s and %s, got "
m = m % (self.minval, self.maxval)
fail_msg = "%s" + m + "%s"
if not fail_msg:
dispval = value or "None"
success_msg = "%s set to %s" % (c.c("g", self.name),
dispval)
# handle space separated list
elif self.type == list:
success_msg = "%s set to %s" % (c.c("g", self.name), value)
value = value.split()
# handle string values
elif self.type == str:
dispval = value or "None"
success_msg = "%s set to %s" % (c.c("g", self.name),
c.c("g", dispval))
# handle failure
if fail_msg:
failed_val = value_orig.strip() or "<nothing>"
colvals = c.y + self.name + c.w, c.y + failed_val + c.w
return fail_msg % colvals
elif self.check_fn:
checked = self.check_fn(value)
value = checked.get("value") or value
if checked['valid']:
value = checked.get("value", value)
self.value = value
Config.save()
return checked.get("message", success_msg)
else:
return checked.get('message', fail_msg)
elif success_msg:
self.value = value
Config.save()
return success_msg
def check_console_width(val):
""" Show ruler to check console width. """
valid = True
message = "-" * val + "\n"
message += "console_width set to %s, try a lower value if above line ove"\
"rlaps" % val
return dict(valid=valid, message=message)
def check_api_key(key):
""" Validate an API key by calling an API endpoint with no quota cost """
url = "https://www.googleapis.com/youtube/v3/i18nLanguages"
query = {"part": "snippet", "fields": "items/id", "key": key}
try:
urlopen(url + "?" + urlencode(query)).read()
message = "The key, '" + key + "' will now be used for API requests."
# Make pafy use the same api key
pafy.set_api_key(Config.API_KEY.get)
return dict(valid=True, message=message)
except HTTPError:
message = "Invalid key or quota exceeded, '" + key + "'"
return dict(valid=False, message=message)
def check_ddir(d):
""" Check whether dir is a valid directory. """
expanded = os.path.expanduser(d)
if os.path.isdir(expanded):
message = "Downloads will be saved to " + c.y + d + c.w
return dict(valid=True, message=message, value=expanded)
else:
message = "Not a valid directory: " + c.r + d + c.w
return dict(valid=False, message=message)
def check_win_pos(pos):
""" Check window position input. """
if not pos.strip():
return dict(valid=True, message="Window position not set (default)")
pos = pos.lower()
reg = r"(TOP|BOTTOM).?(LEFT|RIGHT)"
if not re.match(reg, pos, re.I):
msg = "Try something like top-left or bottom-right (or default)"
return dict(valid=False, message=msg)
else:
p = re.match(reg, pos, re.I).groups()
p = "%s-%s" % p
msg = "Window position set to %s" % p
return dict(valid=True, message=msg, value=p)
def check_win_size(size):
""" Check window size input. """
if not size.strip():
return dict(valid=True, message="Window size not set (default)")
size = size.lower()
reg = r"\d{1,4}x\d{1,4}"
if not re.match(reg, size, re.I):
msg = "Try something like 720x480"
return dict(valid=False, message=msg)
else:
return dict(valid=True, value=size)
def check_encoder(option):
""" Check encoder value is acceptable. """
encs = g.encoders
if option >= len(encs):
message = "%s%s%s is too high, type %sencoders%s to see valid values"
message = message % (c.y, option, c.w, c.g, c.w)
return dict(valid=False, message=message)
else:
message = "Encoder set to %s%s%s"
message = message % (c.y, encs[option]['name'], c.w)
return dict(valid=True, message=message)
def check_player(player):
""" Check player exefile exists and get mpv version. """
if util.has_exefile(player):
print(player)
util.assign_player(player)
if "mpv" in player:
version = "%s.%s.%s" % g.mpv_version
fmt = c.g, c.w, c.g, c.w, version
msg = "%splayer%s set to %smpv%s (version %s)" % fmt
return dict(valid=True, message=msg, value=player)
else:
msg = "%splayer%s set to %s%s%s" % (c.g, c.w, c.g, player, c.w)
return dict(valid=True, message=msg, value=player)
else:
if mswin and not (player.endswith(".exe") or player.endswith(".com")):
# Using mpv.exe has issues; use mpv.com
if "mpv" in player:
retval = check_player(player + ".com")
if retval["valid"]:
return retval
return check_player(player + ".exe")
else:
msg = "Player application %s%s%s not found" % (c.r, player, c.w)
return dict(valid=False, message=msg)
def check_lastfm_password(password):
if not has_pylast:
msg = "pylast not installed"
return dict(valid=False, message=msg)
password_hash = pylast.md5(password)
return dict(valid=True, value=password_hash)
class _Config:
""" Holds various configuration values. """
_configitems = [
ConfigItem("order", "relevance",
allowed_values="relevance date views rating title".split()),
ConfigItem("user_order", "", allowed_values =
[""] + "relevance date views rating".split()),
ConfigItem("max_results", 19, maxval=50, minval=1),
ConfigItem("console_width", 80, minval=70,
maxval=880, check_fn=check_console_width),
ConfigItem("max_res", 2160, minval=192, maxval=2160),
ConfigItem("player", "mplayer" + ".exe" * mswin,
check_fn=check_player),
ConfigItem("playerargs", ""),
ConfigItem("encoder", 0, minval=0, check_fn=check_encoder),
ConfigItem("notifier", ""),
ConfigItem("checkupdate", True),
ConfigItem("show_player_keys", True, require_known_player=True),
ConfigItem("fullscreen", False, require_known_player=True),
ConfigItem("show_status", True),
ConfigItem("columns", ""),
ConfigItem("ddir", paths.get_default_ddir(), check_fn=check_ddir),
ConfigItem("overwrite", True),
ConfigItem("show_video", False),
ConfigItem("search_music", True),
ConfigItem("window_pos", "", check_fn=check_win_pos,
require_known_player=True),
ConfigItem("window_size", "",
check_fn=check_win_size, require_known_player=True),
ConfigItem("download_command", ''),
ConfigItem("lastfm_username", ''),
ConfigItem("lastfm_password", '', check_fn=check_lastfm_password),
ConfigItem("lastfm_api_key", ''),
ConfigItem("lastfm_api_secret", ''),
ConfigItem("audio_format", "auto",
allowed_values="auto webm m4a".split()),
ConfigItem("video_format", "auto",
allowed_values="auto webm mp4 3gp".split()),
ConfigItem("api_key", "AIzaSyCIM4EzNqi1in22f4Z3Ru3iYvLaY8tc3bo",
check_fn=check_api_key),
ConfigItem("autoplay", False),
ConfigItem("set_title", True),
ConfigItem("mpris", not mswin),
]
def __getitem__(self, key):
# TODO: Possibly more efficient algorithm, w/ caching
for i in self._configitems:
if i.name.upper() == key:
return i
raise KeyError
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __iter__(self):
return (i.name.upper() for i in self._configitems)
def save(self):
""" Save current config to file. """
config = {setting: self[setting].value for setting in self}
with open(g.CFFILE, "wb") as cf:
pickle.dump(config, cf, protocol=2)
util.dbg(c.p + "Saved config: " + g.CFFILE + c.w)
def load(self):
""" Override config if config file exists. """
if os.path.exists(g.CFFILE):
with open(g.CFFILE, "rb") as cf:
saved_config = pickle.load(cf)
for k, v in saved_config.items():
try:
self[k].value = v
except KeyError: # Ignore unrecognised data in config
util.dbg("Unrecognised config item: %s", k)
# Update config files from versions <= 0.01.41
if isinstance(self.PLAYERARGS.get, list):
self.WINDOW_POS.value = "top-right"
redundant = ("-really-quiet --really-quiet -prefer-ipv4 -nolirc "
"-fs --fs".split())
for r in redundant:
util.dbg("removing redundant arg %s", r)
util.list_update(r, self.PLAYERARGS.value, remove=True)
self.PLAYERARGS.value = " ".join(self.PLAYERARGS.get)
self.save()
Config = _Config()
del _Config # _Config is a singleton and should not have more instances
# Prevent module from being deleted
# http://stackoverflow.com/questions/5365562/why-is-the-value-of-name-changing-after-assignment-to-sys-modules-name
ref = sys.modules[__name__]
# Any module trying to import config will get the Config object instead
sys.modules[__name__] = Config
| np1/mps-youtube | mps_youtube/config.py | Python | gpl-3.0 | 13,150 |
import csv
from datetime import datetime
from matplotlib import pyplot as plt
# Get dates, high, and low temperatures from file.
filename = 'sitka_weather_2017.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], [], []
for row in reader:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
dates.append(current_date)
high = int(row[1])
highs.append(high)
low = int(row[3])
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
plt.title("Daily high and low temperatures - 2017", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
| helanan/Panda_Prospecting | panda_prospecting/prospecting/insights/high_lows.py | Python | mit | 966 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import memory
from metrics import Metric
from telemetry.value import scalar
class SystemMemoryMetric(Metric):
"""SystemMemoryMetric gathers system memory statistic.
This metric collects system memory stats per test. It reports the difference
(delta) in system memory starts from the start of the test to the end of it.
"""
def __init__(self, browser):
super(SystemMemoryMetric, self).__init__()
self._browser = browser
self._memory_stats_start = None
self._memory_stats_end = None
def Start(self, page, tab):
"""Start the per-page preparation for this metric.
Records the system memory stats at this point.
"""
self._memory_stats_start = self._browser.memory_stats
def Stop(self, page, tab):
"""Prepare the results for this page.
The results are the differences between the current system memory stats
and the values when Start() was called.
"""
assert self._memory_stats_start, 'Must call Start() first'
self._memory_stats_end = self._browser.memory_stats
# |trace_name| and |exclude_metrics| args are not in base class Metric.
# pylint: disable=W0221
def AddResults(self, tab, results, trace_name=None, exclude_metrics=None):
"""Add results for this page to the results object.
Reports the delta in memory stats between the start stats and the end stats
(as *_delta metrics). It reports end memory stats in case no matching start
memory stats exists.
Args:
trace_name: Trace name to identify the summary results for current page.
exclude_metrics: List of memory metrics to exclude from results,
e.g. VM, VMPeak, etc. See AddResultsForProcesses().
"""
assert self._memory_stats_end, 'Must call Stop() first'
memory_stats = _SubtractMemoryStats(self._memory_stats_end,
self._memory_stats_start)
if not memory_stats['Browser']:
return
exclude_metrics = exclude_metrics or {}
memory.AddResultsForProcesses(results, memory_stats,
metric_trace_name=trace_name, chart_trace_name='delta',
exclude_metrics=exclude_metrics)
if 'SystemCommitCharge' not in exclude_metrics:
results.AddValue(scalar.ScalarValue(
results.current_page,
'commit_charge_delta.%s' % (trace_name or 'commit_charge'), 'kb',
memory_stats['SystemCommitCharge'], important=False))
if 'ProcessCount' not in exclude_metrics:
results.AddValue(scalar.ScalarValue(
results.current_page,
'processes_delta.%s' % (trace_name or 'processes'), 'count',
memory_stats['ProcessCount'], important=False))
def _SubtractMemoryStats(end_memory_stats, start_memory_stats):
"""Computes the difference in memory usage stats.
Each of the two stats arguments is a dict with the following format:
{'Browser': {metric: value, ...},
'Renderer': {metric: value, ...},
'Gpu': {metric: value, ...},
'ProcessCount': value,
etc
}
The metrics can be VM, WorkingSetSize, ProportionalSetSize, etc depending on
the platform/test.
NOTE: The only metrics that are not subtracted from original are the *Peak*
memory values.
Returns:
A dict of process type names (Browser, Renderer, etc.) to memory usage
metrics between the end collected stats and the start collected stats.
"""
memory_stats = {}
end_memory_stats = end_memory_stats or {}
start_memory_stats = start_memory_stats or {}
for process_type in end_memory_stats:
memory_stats[process_type] = {}
end_process_memory = end_memory_stats[process_type]
if not end_process_memory:
continue
# If a process has end stats without start stats then report the end stats.
# For example, a GPU process that started just after media playback.
if (process_type not in start_memory_stats or
not start_memory_stats[process_type]):
memory_stats[process_type] = end_process_memory
continue
if not isinstance(end_process_memory, dict):
start_value = start_memory_stats[process_type] or 0
memory_stats[process_type] = end_process_memory - start_value
else:
for metric in end_process_memory:
end_value = end_process_memory[metric]
start_value = start_memory_stats[process_type][metric] or 0
if 'Peak' in metric:
memory_stats[process_type][metric] = end_value
else:
memory_stats[process_type][metric] = end_value - start_value
return memory_stats
| 7kbird/chrome | tools/perf/metrics/system_memory.py | Python | bsd-3-clause | 4,705 |
# Import smorgasbord
import os
import sys
sys.path.insert(0, '../')
import gc
import pdb
import time
import csv
import shutil
import multiprocessing as mp
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import astropy
astropy.log.setLevel('ERROR')
import astropy.io.fits
import astropy.wcs
import aplpy
import ChrisFuncs
import CAAPR
plt.ioff()
# Dunction that uses band table CSV file to create a dictionary of band values
def SourcesDictFromCSV(sources_table_path):
# Initially read in CSV file as a numpy structured array, and prepare output dictionary
sources_table = np.genfromtxt(sources_table_path, names=True, delimiter=',', dtype=None, comments='#', encoding=None)
sources_dict = {}
# Deal with special case of CSV having only 1 line (where looping through lines doesn't work), otherwise do things properly
if sources_table.shape==():
sources_dict[ sources_table['name'].tolist() ] = {}
for field in sources_table.dtype.names:
sources_dict[ sources_table['name'].tolist() ][field] = sources_table[field].tolist()
else:
# Loop over each row of the bands table, using band name as the the key for this 1st-level entry
for row in range(0, sources_table.shape[0]):
sources_dict[ sources_table['name'][row] ] = {}
# Loop over each field in the current row
for field in sources_table.dtype.names:
sources_dict[ sources_table['name'][row] ][field] = sources_table[field][row]
if field=='name':
sources_dict[ sources_table['name'][row] ][field] = str(sources_table[field][row])
# Return dictionary
return sources_dict
# Function that uses band table CSV file to create a dictionary of band values
def BandsDictFromCSV(bands_table_path):
# Initially read in CSV file as a numpy structured array, and prepare output dictionary
bands_table = np.genfromtxt(bands_table_path, names=True, delimiter=',', dtype=None, comments='#', encoding=None)
bands_dict = {}
# Deal with special case of CSV having only 1 line (where looping through lines doesn't work), otherwise do things properly
if bands_table.shape==():
bands_dict[ bands_table['band_name'].tolist() ] = {}
for field in bands_table.dtype.names:
bands_dict[ bands_table['band_name'].tolist() ][field] = bands_table[field].tolist()
else:
# Loop over each row of the bands table, using band name as the the key for this 1st-level entry
for row in range(0, bands_table.shape[0]):
bands_dict[ bands_table['band_name'][row] ] = {}
# Loop over each field in the current row
for field in bands_table.dtype.names:
bands_dict[ bands_table['band_name'][row] ][field] = bands_table[field][row]
# Return dictionary
return bands_dict
# Function that prepares output directory
def OutputDirPrepare(kwargs_dict):
# If needed, create output directory (warning user if there is an existing directory there, which hence may have its contents overwritten)
if os.path.exists( kwargs_dict['output_dir_path'] ):
print '[CAAPR] Warning: Output directory already exists; some files may be overridden'
else:
os.mkdir( kwargs_dict['output_dir_path'] )
# If aperture fitting requested, make corresponding output sub-directory
if kwargs_dict['fit_apertures'] and kwargs_dict['thumbnails'] and not os.path.exists( os.path.join( kwargs_dict['output_dir_path'], 'Aperture_Fitting_Thumbnails' ) ):
os.mkdir( os.path.join(kwargs_dict['output_dir_path'],'Aperture_Fitting_Thumbnails') )
# If actual photometry requested, make corresponding output sub-directory
if kwargs_dict['do_photom'] and kwargs_dict['thumbnails'] and not os.path.exists( os.path.join( kwargs_dict['output_dir_path'], 'Photometry_Thumbnails' ) ):
os.mkdir( os.path.join( kwargs_dict['output_dir_path'],'Photometry_Thumbnails' ) )
# Function that prepares temporary directory
def TempDirPrepare(kwargs_dict):
# Create temporary directory; if temporary directory already exists, delete it and make a new one
if os.path.exists( kwargs_dict['temp_dir_path'] ):
if kwargs_dict['messy'] == False:
shutil.rmtree( kwargs_dict['temp_dir_path'] )
os.mkdir( kwargs_dict['temp_dir_path'] )
else:
os.mkdir( kwargs_dict['temp_dir_path'] )
# If star-subtraction requested, make temporary sub-directory to hold AstroMagic products
if not os.path.exists(os.path.join( kwargs_dict['temp_dir_path'], 'AstroMagic' )):
if kwargs_dict['starsub']==True:
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'], 'AstroMagic' ) )
# If thumbnails requested, make temporary sub-directory for thumbnail cutouts
if not os.path.exists(os.path.join( kwargs_dict['temp_dir_path'], 'Processed_Maps' )):
if kwargs_dict['thumbnails']==True:
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'],'Processed_Maps' ) )
# Function that prepares output file for apertures
def ApertureTablePrepare(kwargs_dict):
# Check whether a new aperture file is required; if not, immediately return kwargs dict unchanged
if kwargs_dict['fit_apertures']!=True:
return kwargs_dict
# If no aperture table path is specified construct defualt; otherwis, use path supplied by user
if kwargs_dict['aperture_table_path']==None:
kwargs_dict['aperture_table_path'] = os.path.join( kwargs_dict['output_dir_path'], 'CAAPR_Aperture_Table_'+kwargs_dict['timestamp']+'.csv' )
# Create output file, and write standard header
aperture_table_header = 'name,semimaj_arcsec,axial_ratio,pos_angle\n'
aperture_table_file = open( kwargs_dict['aperture_table_path'], 'a')
aperture_table_file.write(aperture_table_header)
aperture_table_file.close()
# Return (potentially-updated) kwargs dict
return kwargs_dict
# Function that prepares output file for photometry
def PhotomTablePrepare(kwargs_dict):
# Check that photometry output table is required; if not, immediately return kwargs dict unchanged
if kwargs_dict['do_photom']!=True:
return kwargs_dict
# If no photometry table path is specified construct defualt; otherwis, use path supplied by user
if kwargs_dict['photom_table_path']==None:
kwargs_dict['photom_table_path'] = os.path.join( kwargs_dict['output_dir_path'], 'CAAPR_Photom_Table_'+kwargs_dict['timestamp']+'.csv' )
# Use band input table to establish order in which to put bands in header
bands_table = np.genfromtxt(kwargs_dict['bands_table_path'], delimiter=',', names=True, dtype=None, encoding=None)
bands_list = bands_table['band_name']
# Create header, handling special case of a single band
photom_table_header = 'name'
if bands_list.shape==():
bands_list = [bands_list.tolist()]
for band in bands_list:
photom_table_header += ','+band+','+band+'_ERR'
photom_table_header += '\n'
# Create output file, and write output header to it
photom_table_file = open(kwargs_dict['photom_table_path'], 'a')
photom_table_file.write(photom_table_header)
photom_table_file.close()
# Return (potentially-updated) kwargs dict
return kwargs_dict
# Function that produces a cutout of a given source in a given band
def Cutout(source_dict, band_dict, kwargs_dict):
# Check whether cutout has been requested; if not, return band path unchanged
if str(band_dict['make_cutout'])==True:
raise Exception('If you want to produce a cutout, please set the \'make_cutout\' field of the band table to be your desired cutout width, in arcsec.')
if not float(band_dict['make_cutout'])>0:
return band_dict
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
# Make sure appropriate cutout sub-directories exist in temp directory
if not os.path.exists( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts' ) ):
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts' ) )
if not os.path.exists( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'] ) ):
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'] ) )
# Work out whether the file extension is .fits or .fits.gz
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
else:
in_fitspath = None
return band_dict
# If error maps are being used, construct this path also
if band_dict['use_error_map']==True:
in_fitspath_error = in_fitspath.replace('.fits','_Error.fits')
if os.path.exists(in_fitspath_error):
pass
elif os.path.exists(in_fitspath_error+'.gz'):
in_fitspath_error = in_fitspath_error+'.gz'
else:
raise Exception('No appropriately-named error file found in target directroy (please ensure that error filesnames are in \"[NAME]_[BAND]_Error.fits\" format.')
# Construct output path (likewise for error map, if necessary)
out_fitspath = os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'], source_dict['name']+'_'+band_dict['band_name']+'.fits' )
if band_dict['use_error_map']==True:
out_fitspath_error = os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'], source_dict['name']+'_'+band_dict['band_name']+'_Error.fits' )
# Create cutout
ChrisFuncs.FitsCutout(in_fitspath, source_dict['ra'], source_dict['dec'], int(round(float(band_dict['make_cutout'])/2.0)), exten=0, outfile=out_fitspath, reproj=True)
if band_dict['use_error_map']==True:
ChrisFuncs.FitsCutout(in_fitspath_error, source_dict['ra'], source_dict['dec'], int(round(float(band_dict['make_cutout'])/2.0)), exten=0, outfile=out_fitspath_error, reproj=True)
# Return the directory of the newly-created cutout
out_fitsdir = os.path.split(out_fitspath)[0]
band_dict['band_dir'] = out_fitsdir
return band_dict
# Function that determines if there is unncessary 'padding' around the coverage region of a map, that can be removed
def UnpaddingCutout(source_dict, band_dict, kwargs_dict):
# Only proceed with unpadding if the user hasn't requested a particular cutout; if they have, return band dict unchanged
if band_dict['make_cutout']!=False:
return band_dict
# Make sure that band directory isn't stuck on cutout directory from previous source
if 'band_dir_inviolate' in band_dict.keys():
band_dict['band_dir'] = band_dict['band_dir_inviolate']
# Determine whether the user is specificing a directroy full of FITS files in this band (in which case use standardised filename format), or just a single FITS file
if os.path.isdir(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'], source_dict['name']+'_'+band_dict['band_name'] )
elif os.path.isfile(band_dict['band_dir']):
in_fitspath = os.path.join( band_dict['band_dir'] )
else:
pdb.set_trace()
# Make sure appropriate cutout sub-directories exist in temp directory
if not os.path.exists( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts' ) ):
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts' ) )
if not os.path.exists( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'] ) ):
os.mkdir( os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'] ) )
# Work out whether the file extension is .fits or .fits.gz
if os.path.exists(in_fitspath+'.fits'):
in_fitspath = in_fitspath+'.fits'
elif os.path.exists(in_fitspath+'.fits.gz'):
in_fitspath = in_fitspath+'.fits.gz'
else:
in_fitspath = None
return band_dict
# If error maps are being used, construct this path also
if band_dict['use_error_map']==True:
in_fitspath_error = in_fitspath.replace('.fits','_Error.fits')
if os.path.exists(in_fitspath_error):
pass
elif os.path.exists(in_fitspath_error+'.gz'):
in_fitspath_error = in_fitspath_error+'.gz'
else:
raise Exception('No appropriately-named error file found in target directroy (please ensure that error filesnames are in \"[NAME]_[BAND]_Error.fits\" format.')
# Load in map and extract WCS
in_fits, in_header = astropy.io.fits.getdata(in_fitspath, header=True)
in_wcs = astropy.wcs.WCS(in_header)
# Measure size of padding borders
in_borders = np.where( np.isnan( in_fits )==False )
x_min_border = np.min(in_borders[1])
x_max_border = in_fits.shape[1] - np.max(in_borders[1]) - 1
y_min_border = np.min(in_borders[0])
y_max_border = in_fits.shape[0] - np.max(in_borders[0]) - 1
# Decide if it's worth removing the padding; if not, just return False output
if ((x_min_border+x_max_border)<(0.1*in_fits.shape[1])) and ((y_min_border+y_max_border)<(0.1*in_fits.shape[0])):
return band_dict
# Slice smaller map out of original map
out_fits = in_fits.copy()
if y_min_border>0:
out_fits = out_fits[y_min_border:,:]
if x_min_border>0:
out_fits = out_fits[:,x_min_border:]
if y_max_border>0:
out_fits = out_fits[:-y_max_border,:]
if x_max_border>0:
out_fits = out_fits[:,:-x_max_border]
# Update header WCS to reflect changes
out_wcs = astropy.wcs.WCS(naxis=2)
out_wcs.wcs.crpix = [in_wcs.wcs.crpix[0]-x_min_border, in_wcs.wcs.crpix[1]-y_min_border]
out_wcs.wcs.cdelt = in_wcs.wcs.cdelt
out_wcs.wcs.crval = in_wcs.wcs.crval
out_wcs.wcs.ctype = in_wcs.wcs.ctype
out_header = out_wcs.to_header()
# Save cutout to file
out_fitspath = os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'], source_dict['name']+'_'+band_dict['band_name']+'.fits' )
out_cutout_hdu = astropy.io.fits.PrimaryHDU(data=out_fits, header=out_header)
out_cutout_hdulist = astropy.io.fits.HDUList([out_cutout_hdu])
out_cutout_hdulist.writeto(out_fitspath, overwrite=True)
# Repeat process for error map, if necessary
if band_dict['use_error_map']==True:
# Load in error map and extract WCS
in_fits_error = astropy.io.fits.getdata(in_fitspath_error)
# Slice smaller map out of original map
out_fits_error = in_fits_error.copy()
out_fits_error = out_fits_error[y_min_border:,:]
out_fits_error = out_fits_error[:,x_min_border:]
out_fits_error = out_fits_error[:-y_max_border,:]
out_fits_error = out_fits_error[:,:-x_max_border]
# Save cutout to file
out_fitspath_error = os.path.join( kwargs_dict['temp_dir_path'], 'Cutouts', source_dict['name'], source_dict['name']+'_'+band_dict['band_name']+'_Error.fits' )
out_cutout_hdu_error = astropy.io.fits.PrimaryHDU(data=out_fits_error, header=out_header)
out_cutout_hdulist_error = astropy.io.fits.HDUList([out_cutout_hdu_error])
out_cutout_hdulist_error.writeto(out_fitspath_error, overwrite=True)
# Return the directory of the newly-created cutout
out_fitsdir = os.path.split(out_fitspath)[0]
band_dict['band_dir'] = out_fitsdir
return band_dict
# Define function that writes final aperture for given soruce to aperture output file
def RecordAperture(aperture_combined, source_dict, kwargs_dict):
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Writing apertures to file.'
# Consturct string of line to be written
aperture_string = str([ source_dict['name'].encode('utf-8'), aperture_combined[0], aperture_combined[1], aperture_combined[2] ])#'name','semimaj_arcsec,axial_ratio,pos_angle\n'
aperture_string = aperture_string.replace('[','').replace(']','').replace(' ','').replace('\'','')+'\n'
# Write line to file
aperture_table_file = open( kwargs_dict['aperture_table_path'], 'a')
aperture_table_file.write(aperture_string)
aperture_table_file.close()
# Define function that writes final aperture for given soruce to aperture output file
def RecordPhotom(photom_list, source_dict, bands_dict, kwargs_dict):
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Writing photometry to file.'
# Find any bands not listed in the photom results, and add them as null measurements
for band in bands_dict.keys():
band_photom = False
for photom_entry in photom_list:
if band == photom_entry['band_name']:
band_photom = True
break
if band_photom==True:
continue
elif band_photom==False:
photom_null = {'band_name':band,
'ap_sum':np.NaN,
'ap_error':np.NaN}
photom_list.append(photom_null)
# Use band input table to establish order in which to put bands in results file, handling case of only one band
photom_string = source_dict['name']
bands_table = np.genfromtxt(kwargs_dict['bands_table_path'], delimiter=',', names=True, dtype=None)
bands_list = bands_table['band_name']
# Consturct string of line to be written
if bands_list.shape==():
bands_list = [bands_list.tolist()]
for band_name in bands_list:
for photom_entry in photom_list:
if photom_entry['band_name']==band_name:
photom_string += ','+str(photom_entry['ap_sum'])+','+str(photom_entry['ap_error'])
photom_string += '\n'
# Write line to file
photom_table_file = open( kwargs_dict['photom_table_path'], 'a')
photom_table_file.write(photom_string)
photom_table_file.close()
# Define function that checks whether a decent amount of RAM is free before allowing things to progress
def MemCheck(pod, thresh_fraction=0.90, thresh_factor=20.0, swap_thresh_fraction=0.90, return_status=False):
# Check whether psutil is available (as it is only for unix systems)
try:
import psutil
except:
if pod['verbose']:print '['+pod['id']+'] Library psutil not available (is this a Windows system?); unable to check RAM, proceeding regardless.'
# Start infinite loop
wait_initial = True
wait_count = 0
while True:
mem_wait = False
# Assess how much RAM is free
mem_stats = psutil.virtual_memory()
mem_usage = 1.0-(float(mem_stats[1])/float(mem_stats[0])) #float(mem_stats[2]) / 100.0
# Also, require wait if the amount of RAM free is more than some multiple the size of the current file
mem_free = float(psutil.virtual_memory()[4])
if thresh_factor!=None:
if ( float(thresh_factor) * float(pod['in_fitspath_size']) )>mem_free:
if pod['in_fitspath_size']!=None:
if (pod['in_fitspath_size']*thresh_factor)>(0.75):
thresh_fraction = 0.25
else:
mem_wait = True
# Require wait if less than some fraction of RAM is free
if thresh_fraction!=None:
if mem_usage>=float(thresh_fraction):
mem_wait = True
# Also, require that some fraction of the swap is free
swap_stats = psutil.swap_memory()
swap_usage = float(swap_stats[3]) / 100.0
if swap_usage>swap_thresh_fraction:
mem_wait = True
# If process has waited loads of times, progress regardless
if wait_count>=20:
mem_wait = False
# If required, conduct wait for a semi-random period of time; otherwise, break
if mem_wait==True:
if wait_initial:
if pod['verbose']: print '['+pod['id']+'] Waiting for necessary RAM to free up before continuing processing.'
wait_initial = False
wait_count += 1
time.sleep(10.0+(5.0*np.random.rand()))
else:
break
# Return memory status
if return_status:
return mem_stats
# Define function that makes a grid of aperture thumbnails for a given source's output
def ApertureThumbGrid(source_dict, bands_dict, kwargs_dict, aperture_list, aperture_combined):
# Check that thumbnails are requested; if so, set up warnings and proceeed
if kwargs_dict['thumbnails']!=True:
return
import warnings
warnings.filterwarnings('ignore')
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Producing image grid of aperture thumbnails.'
# Define sub-function to find all possible factors for given value
def Factors(value):
factors = []
for i in range(1, int(value**0.5)+1):
if value % i == 0:
factors.append([i, value/i])
return factors
# Find how many thumnails need tiling
list_files = np.array( os.listdir( os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps') ) )
thumb_files = sum( [ '.fits' in list_file for list_file in list_files ] )
# Find grid dimensions that closely match the golden ratio for the number of images being plotted
phi = (1.0+5.0**0.5)/2.0
edge_short = np.floor(float(thumb_files)**0.5)
edge_long = np.floor(phi*float(edge_short))
while int(edge_short*edge_long)<thumb_files:
if int(edge_short+1)<int(edge_long):
edge_short += 1
else:
edge_long += 1
if (int(edge_short*edge_long)-int(edge_long))>=thumb_files:
edge_short -= 1
if (int(edge_short*edge_long)-int(edge_short))>=thumb_files:
edge_long -= 1
# Set up various variables
counter = 0
row_counter = 0
column_counter = 0
# Calculate figure coordinates (Border of 1, each subplot of width 10, separated by 1)
x_grid = edge_long
y_grid = edge_short
x_subdim = 4.0
y_subdim = 4.0
x_margin = 0.4
y_margin = 0.4
x_dim = (x_subdim * x_grid) + (x_margin * (x_grid+1.0))
y_dim = (y_subdim * y_grid) + (y_margin * (y_grid+1.0)) + (1.0 * y_margin)
x_fig_subdim = x_subdim / x_dim;
y_fig_subdim = y_subdim / y_dim
x_fig_margin = x_margin / x_dim
y_fig_margin = y_margin / y_dim
# Use band input table to establish order in which to plot thumbnails
bands_table = np.genfromtxt(kwargs_dict['bands_table_path'], delimiter=',', names=True, dtype=None)
bands_list = bands_table['band_name']
# Handle case where there's only one band
if len(bands_list.shape)==0:
bands_list = [bands_list.tolist()]
# Prepare figure and add title
fig = plt.figure(figsize=(x_dim, y_dim))
x_title = 1.0 * x_margin #x_fig_margin + ( np.mod(0.0, x_grid) * x_fig_subdim ) + ( np.mod(0.0, x_grid) * x_fig_margin )
y_title = y_dim - (0.65 * y_margin) #1.0 - ( y_fig_margin + y_fig_subdim + ( np.floor(float(counter)/x_grid) * y_fig_subdim ) + ( np.floor(float(counter)/x_grid) * y_fig_margin ) )
plt.figtext(x_title/x_dim, y_title/y_dim, source_dict['name'], size=30, color='black', weight='bold', horizontalalignment='left', verticalalignment='top', figure=fig)
# Find largest beam size and outer annulus size, and hence work out thumbnail size that will contain the largest beam-convolved aperture
beam_arcsec_max = 0.0
outer_annulus_max = 0.0
pix_arcsec_max = 0.0
for band_name in bands_dict:
if not os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')):
continue
band_pix_matrix = astropy.wcs.WCS(astropy.io.fits.getheader(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits'))).pixel_scale_matrix
band_pix_arcsec = 3600.0 * np.sqrt( np.min(np.abs(band_pix_matrix))**2.0 + np.max(np.abs(band_pix_matrix))**2.0 )
if band_pix_arcsec>pix_arcsec_max:
pix_arcsec_max = band_pix_arcsec
if bands_dict[band_name]['beam_arcsec']>beam_arcsec_max:
beam_arcsec_max = bands_dict[band_name]['beam_arcsec']
if bands_dict[band_name]['annulus_outer']>outer_annulus_max:
outer_annulus_max = bands_dict[band_name]['annulus_outer']
thumb_rad = np.ceil( 1.0 * pix_arcsec_max ) + np.ceil( 1.2 * 0.5 * np.sqrt( (2.0*outer_annulus_max*aperture_combined[0])**2.0 + (beam_arcsec_max)**2.0 ) )
# Begin preliminary loop, to produce cutouts for thumbnails
thumb_pool = mp.Pool()
bands_list_present = []
for band_name in bands_list:
# Check whether cutout exists,
img_input = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')
if not os.path.exists(img_input):
continue
else:
bands_list_present.append(band_name)
# Produce cutouts, and end loop
if kwargs_dict['parallel']==True:
thumb_pool.apply_async( ThumbCutout, args=(source_dict, bands_dict[band_name], kwargs_dict, img_input, thumb_rad,) )
elif kwargs_dict['parallel']==False:
ThumbCutout(source_dict, bands_dict[band_name], kwargs_dict, img_input, thumb_rad)
thumb_pool.close()
thumb_pool.join()
# Begin main thumbnail plotting loop
for band_name in bands_list_present:
for w in range(0, thumb_files):
if aperture_list[w]['band_name']==band_name:
b = w
break
continue
# Check whether cutout exists,
img_input = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')
if not os.path.exists(img_input):
continue
img_output = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'_Thumbnail.fits')
# Calculate subplot coordinates
x_min = x_fig_margin + ( np.mod(float(counter), x_grid) * x_fig_subdim ) + ( np.mod(float(counter), x_grid) * x_fig_margin )
y_min = 1.0 - ( 1.0 * y_fig_margin ) - ( y_fig_margin + y_fig_subdim + ( np.floor(float(counter)/x_grid) * y_fig_subdim ) + ( np.floor(float(counter)/x_grid) * y_fig_margin ) )
dx = x_fig_subdim
dy = y_fig_subdim
# Create and format image
vars()['subfig'+str(b)] = aplpy.FITSFigure(img_output, figure=fig, subplot=[x_min, y_min, dx, dy])
vars()['subfig'+str(b)].show_colorscale(cmap=bands_dict[band_name]['colour_map'], stretch='arcsinh', pmin=7.5, pmax=99.5)
vars()['subfig'+str(b)].set_nan_color('black')
vars()['subfig'+str(b)].axis_labels.hide()
vars()['subfig'+str(b)].tick_labels.hide()
vars()['subfig'+str(b)].ticks.hide()
line_width = 4.0
# Extract band-specific aperture dimensions
band_ap_angle = aperture_list[b]['opt_angle']
band_ap_axial_ratio = aperture_list[b]['opt_axial_ratio']
band_ap_semimaj = (aperture_list[b]['opt_semimaj_arcsec'])/3600.0
band_ap_semimin = band_ap_semimaj / band_ap_axial_ratio
band_beam_width = bands_dict[band_name]['beam_arcsec'] / 3600.0
band_ap_semimaj = ( band_ap_semimaj**2.0 + (0.5*band_beam_width)**2.0 )**0.5
band_ap_semimin = ( band_ap_semimin**2.0 + (0.5*band_beam_width)**2.0 )**0.5
band_ap_axial_ratio = band_ap_semimaj / band_ap_semimaj
# Plot band-specific aperture (if one was provided)
if isinstance(source_dict['aperture_bands_exclude'], basestring):
aperture_bands_exclude = source_dict['aperture_bands_exclude'].split(';')
else:
aperture_bands_exclude = []
if bands_dict[band_name]['consider_aperture']==True and band_name not in aperture_bands_exclude:
vars()['subfig'+str(b)].show_ellipses(source_dict['ra'], source_dict['dec'], 2.0*band_ap_semimaj, 2.0*band_ap_semimin, angle=band_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width/2.0, linestyle='dotted')
# Extract combined aperture dimensions
comb_ap_angle = aperture_combined[2]
comb_ap_axial_ratio = aperture_combined[1]
comb_ap_semimaj = aperture_combined[0]/3600.0
comb_ap_semimin = comb_ap_semimaj / comb_ap_axial_ratio
comb_ap_semimaj = 0.5 * ( (2.0*comb_ap_semimaj)**2.0 + band_beam_width**2.0 )**0.5
comb_ap_semimin = 0.5 * ( (2.0*comb_ap_semimin)**2.0 + band_beam_width**2.0 )**0.5
# Plot combined aperture
vars()['subfig'+str(b)].show_ellipses(source_dict['ra'], source_dict['dec'], 2.0*comb_ap_semimaj, 2.0*comb_ap_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width)
# Plot combined background annulus
band_ann_inner_semimaj = comb_ap_semimaj * 2.0 * bands_dict[band_name]['annulus_inner']
band_ann_inner_semimin = comb_ap_semimin * 2.0 * bands_dict[band_name]['annulus_inner']
band_ann_outer_semimaj = comb_ap_semimaj * 2.0 * bands_dict[band_name]['annulus_outer']
band_ann_outer_semimin = comb_ap_semimin * 2.0 * bands_dict[band_name]['annulus_outer']
vars()['subfig'+str(b)].show_ellipses(source_dict['ra'], source_dict['dec'], band_ann_inner_semimaj, band_ann_inner_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width/3.0, linestyle='dashed')
vars()['subfig'+str(b)].show_ellipses(source_dict['ra'], source_dict['dec'], band_ann_outer_semimaj,band_ann_outer_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width/3.0, linestyle='dashed')
# Plot label
vars()['subfig'+str(b)].add_label(0.035, 0.92, bands_dict[band_name]['band_name'], relative=True, size=20, color='white', horizontalalignment='left')
# Progress counters
counter += 1
column_counter += 1
if np.mod(float(counter)+1, x_grid)==0:
row_counter += 1
column_counter = 0
# Save figure, and remove temporary files
fig.savefig( os.path.join(kwargs_dict['output_dir_path'],'Aperture_Fitting_Thumbnails',source_dict['name']+'_Thumbnail_Grid.png'), facecolor='white', dpi=100.0)
if kwargs_dict['messy'] == False:
[os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map]
fig.clear()
plt.close('all')
gc.collect()
# Define function that makes a grid of photometry thumbnails for a given source's output
def PhotomThumbGrid(source_dict, bands_dict, kwargs_dict):
# Check that thumbnails are requested; if so, set up warnings and proceeed
if kwargs_dict['thumbnails']!=True:
return
import warnings
warnings.filterwarnings('ignore')
if kwargs_dict['verbose']: print '['+source_dict['name']+'] Producing image grid of photometry thumbnails.'
# Define sub-function to find all possible factors for given value
def Factors(value):
factors = []
for i in range(1, int(value**0.5)+1):
if value % i == 0:
factors.append([i, value/i])
return factors
# Read in aperture file
aperture_table = np.genfromtxt(kwargs_dict['aperture_table_path'], delimiter=',', names=True, dtype=None)
aperture_index = np.where( aperture_table['name']==source_dict['name'] )
if aperture_index[0].shape[0]>1:
raise Exception('Aperture value caontains more than one entry for current galaxy')
else:
aperture_index = aperture_index[0][0]
# Extract aperture corresponding to current source, dealing with special case where aperture file contains only one source
if aperture_table['semimaj_arcsec'].shape==() and np.where( aperture_table['name']==source_dict['name'] )[0][0]==0:
opt_semimaj_arcsec = aperture_table['semimaj_arcsec'].tolist()
opt_axial_ratio = aperture_table['axial_ratio'].tolist()
opt_angle = aperture_table['pos_angle'].tolist()
else:
opt_semimaj_arcsec = aperture_table['semimaj_arcsec'][aperture_index]
opt_axial_ratio = aperture_table['axial_ratio'][aperture_index]
opt_angle = aperture_table['pos_angle'][aperture_index]
# Find how many thumnails need tiling
list_files = np.array( os.listdir( os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps') ) )
thumb_files = sum( [ '.fits' in list_file for list_file in list_files ] )
# Find grid dimensions that closely match the golden ratio for the number of images being plotted
phi = (1.0+5.0**0.5)/2.0
edge_short = np.floor(float(thumb_files)**0.5)
edge_long = np.round(phi*float(edge_short))
if int(edge_short*edge_long)<thumb_files:
if int(edge_short+1)<int(edge_long):
edge_short += 1
else:
edge_long += 1
if (int(edge_short*edge_long)-int(edge_long))>=thumb_files:
edge_short -= 1
if (int(edge_short*edge_long)-int(edge_short))>=thumb_files:
edge_long -= 1
# Set up various variables
counter = 0
row_counter = 0
column_counter = 0
# Calculate figure coordinates (Border of 1, each subplot of width 10, separated by 1)
x_grid = edge_long
y_grid = edge_short
x_subdim = 4.0
y_subdim = 4.0
x_margin = 0.4
y_margin = 0.4
x_dim = (x_subdim * x_grid) + (x_margin * (x_grid+1.0))
y_dim = (y_subdim * y_grid) + (y_margin * (y_grid+1.0)) + (1.0 * y_margin)
x_fig_subdim = x_subdim / x_dim;
y_fig_subdim = y_subdim / y_dim
x_fig_margin = x_margin / x_dim
y_fig_margin = y_margin / y_dim
# Use band input table to establish order in which to plot thumbnails
bands_table = np.genfromtxt(kwargs_dict['bands_table_path'], delimiter=',', names=True, dtype=None)
bands_list = bands_table['band_name']
# Handle case where there's only one band
if len(bands_list.shape)==0:
bands_list = [bands_list.tolist()]
# Prepare figure and add title
fig = plt.figure(figsize=(x_dim, y_dim))
x_title = 1.0 * x_margin #x_fig_margin + ( np.mod(0.0, x_grid) * x_fig_subdim ) + ( np.mod(0.0, x_grid) * x_fig_margin )
y_title = y_dim - (0.65 * y_margin) #1.0 - ( y_fig_margin + y_fig_subdim + ( np.floor(float(counter)/x_grid) * y_fig_subdim ) + ( np.floor(float(counter)/x_grid) * y_fig_margin ) )
plt.figtext(x_title/x_dim, y_title/y_dim, source_dict['name'], size=30, color='black', weight='bold', horizontalalignment='left', verticalalignment='top', figure=fig)
# Find largest beam size, outer annulus size, and pixel size - and hence work out thumbnail size that will contain the largest beam-convolved aperture
beam_arcsec_max = 0.0
outer_annulus_max = 0.0
pix_arcsec_max = 0.0
for band_name in bands_dict:
if not os.path.exists(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')):
continue
band_pix_matrix = astropy.wcs.WCS(astropy.io.fits.getheader(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits'))).pixel_scale_matrix
band_pix_arcsec = 3600.0 * np.sqrt( np.min(np.abs(band_pix_matrix))**2.0 + np.max(np.abs(band_pix_matrix))**2.0 )
if band_pix_arcsec>pix_arcsec_max:
pix_arcsec_max = band_pix_arcsec
if bands_dict[band_name]['beam_arcsec']>beam_arcsec_max:
beam_arcsec_max = bands_dict[band_name]['beam_arcsec']
if bands_dict[band_name]['annulus_outer']>outer_annulus_max:
outer_annulus_max = bands_dict[band_name]['annulus_outer']
thumb_rad = np.ceil( 1.0 * pix_arcsec_max ) + np.ceil( 1.2 * 0.5 * np.sqrt( (2.0*outer_annulus_max*opt_semimaj_arcsec)**2.0 + (beam_arcsec_max)**2.0 ) )
# Begin preliminary loop, to produce cutouts for thumbnails
thumb_pool = mp.Pool()
bands_list_present = []
for band_name in bands_list:
# Check whether cutout exists,
img_input = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')
if not os.path.exists(img_input):
continue
else:
bands_list_present.append(band_name)
# Produce cutouts, and end loop
if kwargs_dict['parallel']==True:
thumb_pool.apply_async( ThumbCutout, args=(source_dict, bands_dict[band_name], kwargs_dict, img_input, thumb_rad,) )
elif kwargs_dict['parallel']==False:
ThumbCutout(source_dict, bands_dict[band_name], kwargs_dict, img_input, thumb_rad)
thumb_pool.close()
thumb_pool.join()
# Begin main thumbnail plotting loop
w = 0
for band_name in bands_list_present:
w += 1
# Check whether cutout exists,
img_input = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'.fits')
if not os.path.exists(img_input):
continue
img_output = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_name+'_Thumbnail.fits')
# Calculate subplot coordinates
x_min = x_fig_margin + ( np.mod(float(counter), x_grid) * x_fig_subdim ) + ( np.mod(float(counter), x_grid) * x_fig_margin )
y_min = 1.0 - ( 1.0 * y_fig_margin ) - ( y_fig_margin + y_fig_subdim + ( np.floor(float(counter)/x_grid) * y_fig_subdim ) + ( np.floor(float(counter)/x_grid) * y_fig_margin ) )
dx = x_fig_subdim
dy = y_fig_subdim
# Create and format image
vars()['subfig'+str(w)] = aplpy.FITSFigure(img_output, figure=fig, subplot=[x_min, y_min, dx, dy])
vars()['subfig'+str(w)].show_colorscale(cmap=bands_dict[band_name]['colour_map'], stretch='arcsinh', pmin=7.5, pmax=99.5)
vars()['subfig'+str(w)].set_nan_color('black')
vars()['subfig'+str(w)].axis_labels.hide()
vars()['subfig'+str(w)].tick_labels.hide()
vars()['subfig'+str(w)].ticks.hide()
# Extract combined aperture dimensions
band_beam_width = bands_dict[band_name]['beam_arcsec'] / 3600.0
comb_ap_angle = opt_angle
comb_ap_axial_ratio = opt_axial_ratio
comb_ap_semimaj = opt_semimaj_arcsec / 3600.0
comb_ap_semimin = comb_ap_semimaj / comb_ap_axial_ratio
comb_ap_semimaj = 0.5 * ( (2.0*comb_ap_semimaj)**2.0 + band_beam_width**2.0 )**0.5
comb_ap_semimin = 0.5 * ( (2.0*comb_ap_semimin)**2.0 + band_beam_width**2.0 )**0.5
# Plot combined aperture
line_width = 4.0
vars()['subfig'+str(w)].show_ellipses(source_dict['ra'], source_dict['dec'], 2.0*comb_ap_semimaj, 2.0*comb_ap_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width)
# Plot combined background annulus
band_ann_inner_semimaj = comb_ap_semimaj * 2.0 * bands_dict[band_name]['annulus_inner']
band_ann_inner_semimin = comb_ap_semimin * 2.0 * bands_dict[band_name]['annulus_inner']
band_ann_outer_semimaj = comb_ap_semimaj * 2.0 * bands_dict[band_name]['annulus_outer']
band_ann_outer_semimin = comb_ap_semimin * 2.0 * bands_dict[band_name]['annulus_outer']
vars()['subfig'+str(w)].show_ellipses(source_dict['ra'], source_dict['dec'], band_ann_inner_semimaj, band_ann_inner_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width/3.0, linestyle='dashed')
vars()['subfig'+str(w)].show_ellipses(source_dict['ra'], source_dict['dec'], band_ann_outer_semimaj,band_ann_outer_semimin, angle=comb_ap_angle, edgecolor='#00FF40', facecolor='none', linewidth=line_width/3.0, linestyle='dashed')
# Plot label
vars()['subfig'+str(w)].add_label(0.035, 0.92, bands_dict[band_name]['band_name'], relative=True, size=20, color='white', horizontalalignment='left')
# Progress counters
counter += 1
column_counter += 1
if np.mod(float(counter)+1, x_grid)==0:
row_counter += 1
column_counter = 0
# Save figure, and remove temporary files
fig.savefig( os.path.join(kwargs_dict['output_dir_path'],'Photometry_Thumbnails',source_dict['name']+'_Thumbnail_Grid.png'), facecolor='white', dpi=100.0)
if kwargs_dict['messy'] == False:
[ os.remove(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',processed_map)) for processed_map in os.listdir(os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps')) if '.fits' in processed_map ]
fig.clear()
plt.close('all')
gc.collect()
# Define function for producing an appropriately-sized cutout in current band
def ThumbCutout(source_dict, band_dict, kwargs_dict, img_input, thumb_rad):
# Construct output path
img_output = os.path.join(kwargs_dict['temp_dir_path'],'Processed_Maps',source_dict['name']+'_'+band_dict['band_name']+'_Thumbnail.fits')
# Produce cutout, and neaten header as necessary (to quell noisy APLpy verbosity later on)
cutout_parallel = ( not kwargs_dict['parallel'] )
cutout_hdulist = ChrisFuncs.FitsCutout( img_input, source_dict['ra'], source_dict['dec'], thumb_rad, exten=0, variable=True, reproj=True, parallel=cutout_parallel, fast=True )
cutout_data = cutout_hdulist[0].data
cutout_header = cutout_hdulist[0].header
cutout_header['EQUINOX'] = 2000.0
cutout_header['EPOCH'] = 2000.0
# Write thumbnail cutout to file, and end output supression
astropy.io.fits.writeto(img_output, cutout_data, header=cutout_header, overwrite=True)
| Stargrazer82301/CAAPR | CAAPR/CAAPR_IO/CAAPR_IO.py | Python | mit | 42,977 |
"""
Django settings for my_ap project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2h!j72qgqczhk84pi!9^+945eo2cd*s1owm6#v0pko6mc4&=+j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'anthologies'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'my_ap.urls'
WSGI_APPLICATION = 'my_ap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'manas',
'USER':'root',
'PASSWORD':'mrtv123',
'HOST':'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
| 912/M-new | my_ap/my_ap/settings.py | Python | gpl-2.0 | 2,221 |
# coding: utf-8
#
# Copyright © 2017 weirdgiraffe <giraffe@cyberzoo.xyz>
#
# Distributed under terms of the MIT license.
#
| weirdgiraffe/plugin.video.giraffe.seasonvar | resources/site-packages/mock_kodi/__init__.py | Python | mit | 125 |
import json
with open('data/78mm.json', 'r') as _78mm:
polygons78 = json.load(_78mm)["features"][0]["geometry"]["geometries"]
with open('data/100mm.json', 'r') as _100mm:
polygons100 = json.load(_100mm)["features"][0]["geometry"]["geometries"]
with open('data/130mm.json', 'r') as _130mm:
polygons130 = json.load(_130mm)["features"][0]["geometry"]["geometries"]
def dot(x1, y1, x2, y2):
return x1*y1+x2*y2
def det(x1, y1, x2, y2):
return x1*y2-x2*y1
def dett(x0, y0, x1, y1, x2, y2):
z = det(x1-x0, y1-y0, x2-x0, y2-y0)
return -1 if z < 0 else z > 0
'''
inline DB ang(cPo p0,cPo p1){return acos(dot(p0,p1)/p0.len()/p1.len());}
def ang(x1, y1, x2, y2):
return
def arg(x1, y1, x2, y2):
DB a=ang(x,y);return~dett(x,y)?a:2*PI-a;}
return
'''
def intersect(lx1, ly1, lx2, ly2, rx1, ry1, rx2, ry2):
return 1 if (dett(lx1, ly1, lx2, ly2, rx1, ry1) * dett(lx1, ly1, lx2, ly2, rx2, ry2) <= 0 and
dett(rx1, ry1, rx2, ry2, lx1, ly1) * dett(rx1, ry1, rx2, ry2, lx2, ly2) <= 0) else 0
def within(p, x, y):
z = 0
for i in range(0, len(p)-1):
if x == p[i][0] and y == p[i][1]:
continue
if x == p[i+1][0] and y == p[i+1][1]:
continue
z += intersect(x, y, -3232, -4344, p[i][0], p[i][1], p[i+1][0], p[i+1][1])
return 1 if z % 2 == 1 else 0
def _check(p, d, x, y):
for i in range(0, len(p)):
if within(p[i]["coordinates"][0], x, y):
return [d, i]
return []
def check(x, y):
res = _check(polygons78, 78, x, y)
if len(res) > 0:
return 0.2 # 0.078
res = _check(polygons100, 100, x, y)
if len(res) > 0:
return 0.5 # 0.1
res = _check(polygons130, 130, x, y)
if len(res) > 0:
return 0.8 # 0.13
return 1.0
# init()
# #display()
# #x, y = 121.555764, 24.9833
#
# x, y = 121.565764, 24.9830
# res = check(x, y)
# print res
# if (len(res) > 0):
# if (res[0] == 78):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Red')
# if (res[0] == 100):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Orange')
# if (res[0] == 130):
# print_polygon(polygons78[res[1]]["coordinates"][0], 'Yellow')
# plt.plot(x, y, marker='*')
# ax.grid()
# ax.axis('equal')
# plt.show()
| HackCigriculture/cigriculture-ml | src/polygon.py | Python | gpl-3.0 | 2,316 |
import json
from PIL import Image
import collections
with open('../config/nodes.json') as data_file:
nodes = json.load(data_file)
# empty fucker
ordered_nodes = [None] * len(nodes)
# populate fucker
for i, pos in nodes.items():
ordered_nodes[int(i)] = [pos['x'], pos['y']]
filename = "04_rgb_vertical_lines"
im = Image.open("../gif_generators/output/"+filename+".gif") #Can be many different formats.
target_size = 400, 400
resize = False
if target_size != im.size:
resize = True
data = []
# To iterate through the entire gif
try:
frame_num = 0
while True:
im.seek(frame_num)
frame_data = []
# do something to im
img = im.convert('RGB')
if resize == True:
print "Resizing"
img.thumbnail(target_size, Image.ANTIALIAS)
for x, y in ordered_nodes:
frame_data.append(img.getpixel((x, y)))
#print r, g, b
data.append(frame_data)
# write to json
print frame_num
frame_num+=1
except EOFError:
pass # end of sequence
#print data
#print r, g, b
with open(filename+'.json', 'w') as outfile:
json.dump({
"meta": {},
"data": data
}, outfile)
print im.size #Get the width and hight of the image for iterating over
#print pix[,y] #Get the RGBA Value of the a pixel of an image
| Ibuprofen/gizehmoviepy | gif_parsers/read_rgb.py | Python | mit | 1,281 |
"""
weasyprint.tests.layout.block
-----------------------------
Tests for blocks layout.
"""
import pytest
from weasyprint.formatting_structure import boxes
from ..testing_utils import assert_no_logs, render_pages
@assert_no_logs
def test_block_widths():
page, = render_pages('''
<style>
@page { margin: 0; size: 120px 2000px }
body { margin: 0 }
div { margin: 10px }
p { padding: 2px; border-width: 1px; border-style: solid }
</style>
<div>
<p></p>
<p style="width: 50px"></p>
</div>
<div style="direction: rtl">
<p style="width: 50px; direction: rtl"></p>
</div>
<div>
<p style="margin: 0 10px 0 20px"></p>
<p style="width: 50px; margin-left: 20px; margin-right: auto"></p>
<p style="width: 50px; margin-left: auto; margin-right: 20px"></p>
<p style="width: 50px; margin: auto"></p>
<p style="margin-left: 20px; margin-right: auto"></p>
<p style="margin-left: auto; margin-right: 20px"></p>
<p style="margin: auto"></p>
<p style="width: 200px; margin: auto"></p>
<p style="min-width: 200px; margin: auto"></p>
<p style="max-width: 50px; margin: auto"></p>
<p style="min-width: 50px; margin: auto"></p>
<p style="width: 70%"></p>
</div>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
assert body.width == 120
divs = body.children
paragraphs = []
for div in divs:
assert isinstance(div, boxes.BlockBox)
assert div.element_tag == 'div'
assert div.width == 100
for paragraph in div.children:
assert isinstance(paragraph, boxes.BlockBox)
assert paragraph.element_tag == 'p'
assert paragraph.padding_left == 2
assert paragraph.padding_right == 2
assert paragraph.border_left_width == 1
assert paragraph.border_right_width == 1
paragraphs.append(paragraph)
assert len(paragraphs) == 15
# width is 'auto'
assert paragraphs[0].width == 94
assert paragraphs[0].margin_left == 0
assert paragraphs[0].margin_right == 0
# No 'auto', over-constrained equation with ltr, the initial
# 'margin-right: 0' was ignored.
assert paragraphs[1].width == 50
assert paragraphs[1].margin_left == 0
# No 'auto', over-constrained equation with rtl, the initial
# 'margin-left: 0' was ignored.
assert paragraphs[2].width == 50
assert paragraphs[2].margin_right == 0
# width is 'auto'
assert paragraphs[3].width == 64
assert paragraphs[3].margin_left == 20
# margin-right is 'auto'
assert paragraphs[4].width == 50
assert paragraphs[4].margin_left == 20
# margin-left is 'auto'
assert paragraphs[5].width == 50
assert paragraphs[5].margin_left == 24
# Both margins are 'auto', remaining space is split in half
assert paragraphs[6].width == 50
assert paragraphs[6].margin_left == 22
# width is 'auto', other 'auto' are set to 0
assert paragraphs[7].width == 74
assert paragraphs[7].margin_left == 20
# width is 'auto', other 'auto' are set to 0
assert paragraphs[8].width == 74
assert paragraphs[8].margin_left == 0
# width is 'auto', other 'auto' are set to 0
assert paragraphs[9].width == 94
assert paragraphs[9].margin_left == 0
# sum of non-auto initially is too wide, set auto values to 0
assert paragraphs[10].width == 200
assert paragraphs[10].margin_left == 0
# Constrained by min-width, same as above
assert paragraphs[11].width == 200
assert paragraphs[11].margin_left == 0
# Constrained by max-width, same as paragraphs[6]
assert paragraphs[12].width == 50
assert paragraphs[12].margin_left == 22
# NOT constrained by min-width
assert paragraphs[13].width == 94
assert paragraphs[13].margin_left == 0
# 70%
assert paragraphs[14].width == 70
assert paragraphs[14].margin_left == 0
@assert_no_logs
def test_block_heights_p():
page, = render_pages('''
<style>
@page { margin: 0; size: 100px 20000px }
html, body { margin: 0 }
div { margin: 4px; border: 2px solid; padding: 4px }
/* Use top margins so that margin collapsing doesn't change result */
p { margin: 16px 0 0; border: 4px solid; padding: 8px; height: 50px }
</style>
<div>
<p></p>
<!-- Not in normal flow: don't contribute to the parent’s height -->
<p style="position: absolute"></p>
<p style="float: left"></p>
</div>
<div> <p></p> <p></p> <p></p> </div>
<div style="height: 20px"> <p></p> </div>
<div style="height: 120px"> <p></p> </div>
<div style="max-height: 20px"> <p></p> </div>
<div style="min-height: 120px"> <p></p> </div>
<div style="min-height: 20px"> <p></p> </div>
<div style="max-height: 120px"> <p></p> </div>
''')
html, = page.children
body, = html.children
heights = [div.height for div in body.children]
assert heights == [90, 90 * 3, 20, 120, 20, 120, 90, 90]
@assert_no_logs
def test_block_heights_img():
page, = render_pages('''
<style>
body { height: 200px; font-size: 0 }
</style>
<div>
<img src=pattern.png style="height: 40px">
</div>
<div style="height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 20px">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="min-height: 20px"></div>
<div style="min-height: 10%"></div>
''')
html, = page.children
body, = html.children
heights = [div.height for div in body.children]
assert heights == [40, 20, 20, 20, 20, 20]
@assert_no_logs
def test_block_heights_img_no_body_height():
# Same but with no height on body: percentage *-height is ignored
page, = render_pages('''
<style>
body { font-size: 0 }
</style>
<div>
<img src=pattern.png style="height: 40px">
</div>
<div style="height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 20px">
<img src=pattern.png style="height: 40px">
</div>
<div style="max-height: 10%">
<img src=pattern.png style="height: 40px">
</div>
<div style="min-height: 20px"></div>
<div style="min-height: 10%"></div>
''')
html, = page.children
body, = html.children
heights = [div.height for div in body.children]
assert heights == [40, 40, 20, 40, 20, 0]
@assert_no_logs
def test_block_percentage_heights_no_html_height():
page, = render_pages('''
<style>
html, body { margin: 0 }
body { height: 50% }
</style>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
# Since html’s height depend on body’s, body’s 50% means 'auto'
assert body.height == 0
@assert_no_logs
def test_block_percentage_heights():
page, = render_pages('''
<style>
html, body { margin: 0 }
html { height: 300px }
body { height: 50% }
</style>
''')
html, = page.children
assert html.element_tag == 'html'
body, = html.children
assert body.element_tag == 'body'
# This time the percentage makes sense
assert body.height == 150
@assert_no_logs
@pytest.mark.parametrize('size', (
('width: 10%; height: 1000px',),
('max-width: 10%; max-height: 1000px; height: 2000px',),
('width: 5%; min-width: 10%; min-height: 1000px',),
('width: 10%; height: 1000px; min-width: auto; max-height: none',),
))
def test_box_sizing(size):
# http://www.w3.org/TR/css3-ui/#box-sizing
page, = render_pages('''
<style>
@page { size: 100000px }
body { width: 10000px; margin: 0 }
div { %s; margin: 100px; padding: 10px; border: 1px solid }
</style>
<div></div>
<div style="box-sizing: content-box"></div>
<div style="box-sizing: padding-box"></div>
<div style="box-sizing: border-box"></div>
''' % size)
html, = page.children
body, = html.children
div_1, div_2, div_3, div_4 = body.children
for div in div_1, div_2:
assert div.style['box_sizing'] == 'content-box'
assert div.width == 1000
assert div.height == 1000
assert div.padding_width() == 1020
assert div.padding_height() == 1020
assert div.border_width() == 1022
assert div.border_height() == 1022
assert div.margin_height() == 1222
# margin_width() is the width of the containing block
# padding-box
assert div_3.style['box_sizing'] == 'padding-box'
assert div_3.width == 980 # 1000 - 20
assert div_3.height == 980
assert div_3.padding_width() == 1000
assert div_3.padding_height() == 1000
assert div_3.border_width() == 1002
assert div_3.border_height() == 1002
assert div_3.margin_height() == 1202
# border-box
assert div_4.style['box_sizing'] == 'border-box'
assert div_4.width == 978 # 1000 - 20 - 2
assert div_4.height == 978
assert div_4.padding_width() == 998
assert div_4.padding_height() == 998
assert div_4.border_width() == 1000
assert div_4.border_height() == 1000
assert div_4.margin_height() == 1200
@assert_no_logs
@pytest.mark.parametrize('size', (
('width: 0; height: 0'),
('max-width: 0; max-height: 0'),
('min-width: 0; min-height: 0; width: 0; height: 0'),
))
def test_box_sizing_zero(size):
# http://www.w3.org/TR/css3-ui/#box-sizing
page, = render_pages('''
<style>
@page { size: 100000px }
body { width: 10000px; margin: 0 }
div { %s; margin: 100px; padding: 10px; border: 1px solid }
</style>
<div></div>
<div style="box-sizing: content-box"></div>
<div style="box-sizing: padding-box"></div>
<div style="box-sizing: border-box"></div>
''' % size)
html, = page.children
body, = html.children
for div in body.children:
assert div.width == 0
assert div.height == 0
assert div.padding_width() == 20
assert div.padding_height() == 20
assert div.border_width() == 22
assert div.border_height() == 22
assert div.margin_height() == 222
# margin_width() is the width of the containing block
COLLAPSING = (
('10px', '15px', 15), # not 25
# "The maximum of the absolute values of the negative adjoining margins is
# deducted from the maximum of the positive adjoining margins"
('-10px', '15px', 5),
('10px', '-15px', -5),
('-10px', '-15px', -15),
('10px', 'auto', 10), # 'auto' is 0
)
NOT_COLLAPSING = (
('10px', '15px', 25),
('-10px', '15px', 5),
('10px', '-15px', -5),
('-10px', '-15px', -25),
('10px', 'auto', 10), # 'auto' is 0
)
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_1(margin_1, margin_2, result):
# Siblings
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<p id=p2>dolor sit amet
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, p2 = body.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_2(margin_1, margin_2, result):
# Not siblings, first is nested
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<div>
<p id=p1>Lorem ipsum
</div>
<p id=p2>dolor sit amet
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
div, p2 = body.children
p1, = div.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_3(margin_1, margin_2, result):
# Not siblings, second is nested
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div>
<p id=p2>dolor sit amet
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div = body.children
p2, = div.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_4(margin_1, margin_2, result):
# Not siblings, second is doubly nested
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div>
<div>
<p id=p2>dolor sit amet
</div>
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_5(margin_1, margin_2, result):
# Collapsing with children
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#div1 { margin-top: %s }
#div2 { margin-top: %s }
</style>
<p>Lorem ipsum
<div id=div1>
<div id=div2>
<p id=p2>dolor sit amet
</div>
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
# Parent and element edge are the same:
assert div1.border_box_y() == p2.border_box_y()
assert div2.border_box_y() == p2.border_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', NOT_COLLAPSING)
def test_vertical_space_6(margin_1, margin_2, result):
# Block formatting context: Not collapsing with children
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#div1 { margin-top: %s; overflow: hidden }
#div2 { margin-top: %s }
</style>
<p>Lorem ipsum
<div id=div1>
<div id=div2>
<p id=p2>dolor sit amet
</div>
</div>
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, div1 = body.children
div2, = div1.children
p2, = div2.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_7(margin_1, margin_2, result):
# Collapsing through an empty div
page, = render_pages('''
<style>
p { font: 20px/1 serif } /* block height == 20px */
#p1 { margin-bottom: %s }
#p2 { margin-top: %s }
div { margin-bottom: %s; margin-top: %s }
</style>
<p id=p1>Lorem ipsum
<div></div>
<p id=p2>dolor sit amet
''' % (2 * (margin_1, margin_2)))
html, = page.children
body, = html.children
p1, div, p2 = body.children
p1_bottom = p1.content_box_y() + p1.height
p2_top = p2.content_box_y()
assert p2_top - p1_bottom == result
@pytest.mark.parametrize('margin_1, margin_2, result', NOT_COLLAPSING)
def test_vertical_space_8(margin_1, margin_2, result):
# The root element does not collapse
page, = render_pages('''
<style>
html { margin-top: %s }
body { margin-top: %s }
</style>
<p>Lorem ipsum
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
p1, = body.children
p1_top = p1.content_box_y()
# Vertical space from y=0
assert p1_top == result
@pytest.mark.parametrize('margin_1, margin_2, result', COLLAPSING)
def test_vertical_space_9(margin_1, margin_2, result):
# <body> DOES collapse
page, = render_pages('''
<style>
body { margin-top: %s }
div { margin-top: %s }
</style>
<div>
<p>Lorem ipsum
''' % (margin_1, margin_2))
html, = page.children
body, = html.children
div, = body.children
p1, = div.children
p1_top = p1.content_box_y()
# Vertical space from y=0
assert p1_top == result
@assert_no_logs
def test_box_decoration_break_block_slice():
# http://www.w3.org/TR/css3-background/#the-box-decoration-break
page_1, page_2 = render_pages('''
<style>
@page { size: 100px }
p { padding: 2px; border: 3px solid; margin: 5px }
img { display: block; height: 40px }
</style>
<p>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>''')
html, = page_1.children
body, = html.children
paragraph, = body.children
img_1, img_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 5
assert paragraph.border_top_width == 3
assert paragraph.padding_top == 2
assert paragraph.content_box_y() == 10
assert img_1.position_y == 10
assert img_2.position_y == 50
assert paragraph.height == 90
assert paragraph.margin_bottom == 0
assert paragraph.border_bottom_width == 0
assert paragraph.padding_bottom == 0
assert paragraph.margin_height() == 100
html, = page_2.children
body, = html.children
paragraph, = body.children
img_1, img_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 0
assert paragraph.border_top_width == 0
assert paragraph.padding_top == 0
assert paragraph.content_box_y() == 0
assert img_1.position_y == 0
assert img_2.position_y == 40
assert paragraph.height == 80
assert paragraph.padding_bottom == 2
assert paragraph.border_bottom_width == 3
assert paragraph.margin_bottom == 5
assert paragraph.margin_height() == 90
@assert_no_logs
def test_box_decoration_break_block_clone():
# http://www.w3.org/TR/css3-background/#the-box-decoration-break
page_1, page_2 = render_pages('''
<style>
@page { size: 100px }
p { padding: 2px; border: 3px solid; margin: 5px;
box-decoration-break: clone }
img { display: block; height: 40px }
</style>
<p>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>
<img src=pattern.png>''')
html, = page_1.children
body, = html.children
paragraph, = body.children
img_1, img_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 5
assert paragraph.border_top_width == 3
assert paragraph.padding_top == 2
assert paragraph.content_box_y() == 10
assert img_1.position_y == 10
assert img_2.position_y == 50
assert paragraph.height == 80
# TODO: bottom margin should be 0
# https://www.w3.org/TR/css-break-3/#valdef-box-decoration-break-clone
# "Cloned margins are truncated on block-level boxes."
# See https://github.com/Kozea/WeasyPrint/issues/115
assert paragraph.margin_bottom == 5
assert paragraph.border_bottom_width == 3
assert paragraph.padding_bottom == 2
assert paragraph.margin_height() == 100
html, = page_2.children
body, = html.children
paragraph, = body.children
img_1, img_2 = paragraph.children
assert paragraph.position_y == 0
assert paragraph.margin_top == 0
assert paragraph.border_top_width == 3
assert paragraph.padding_top == 2
assert paragraph.content_box_y() == 5
assert img_1.position_y == 5
assert img_2.position_y == 45
assert paragraph.height == 80
assert paragraph.padding_bottom == 2
assert paragraph.border_bottom_width == 3
assert paragraph.margin_bottom == 5
assert paragraph.margin_height() == 95
@assert_no_logs
def test_box_decoration_break_clone_bottom_padding():
page_1, page_2 = render_pages('''
<style>
@page { size: 80px; margin: 0 }
div { height: 20px }
article { padding: 12px; box-decoration-break: clone }
</style>
<article>
<div>a</div>
<div>b</div>
<div>c</div>
</article>''')
html, = page_1.children
body, = html.children
article, = body.children
assert article.height == 80 - 2 * 12
div_1, div_2 = article.children
assert div_1.position_y == 12
assert div_2.position_y == 12 + 20
html, = page_2.children
body, = html.children
article, = body.children
assert article.height == 20
div, = article.children
assert div.position_y == 12
@pytest.mark.xfail
@assert_no_logs
def test_box_decoration_break_slice_bottom_padding(): # pragma: no cover
# Last div fits in first, but not article's padding. As it is impossible to
# break between a parent and its last child, put last child on next page.
# TODO: at the end of block_container_layout, we should check that the box
# with its bottom border/padding doesn't cross the bottom line. If it does,
# we should re-render the box with a bottom_space including the bottom
# border/padding.
page_1, page_2 = render_pages('''
<style>
@page { size: 80px; margin: 0 }
div { height: 20px }
article { padding: 12px; box-decoration-break: slice }
</style>
<article>
<div>a</div>
<div>b</div>
<div>c</div>
</article>''')
html, = page_1.children
body, = html.children
article, = body.children
assert article.height == 80 - 12
div_1, div_2 = article.children
assert div_1.position_y == 12
assert div_2.position_y == 12 + 20
html, = page_2.children
body, = html.children
article, = body.children
assert article.height == 20
div, = article.children
assert div.position_y == 0
@assert_no_logs
def test_overflow_auto():
page, = render_pages('''
<article style="overflow: auto">
<div style="float: left; height: 50px; margin: 10px">bla bla bla</div>
toto toto''')
html, = page.children
body, = html.children
article, = body.children
assert article.height == 50 + 10 + 10
@assert_no_logs
def test_box_margin_top_repagination():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/943
page_1, page_2 = render_pages('''
<style>
@page { size: 50px }
:root { line-height: 1; font-size: 10px }
a::before { content: target-counter(attr(href), page) }
div { margin: 20px 0 0; background: yellow }
</style>
<p><a href="#title"></a></p>
<div>1<br/>1<br/>2<br/>2</div>
<h1 id="title">title</h1>
''')
html, = page_1.children
body, = html.children
p, div = body.children
assert div.margin_top == 20
assert div.padding_box_y() == 10 + 20
html, = page_2.children
body, = html.children
div, h1 = body.children
assert div.margin_top == 0
assert div.padding_box_y() == 0
@assert_no_logs
def test_continue_discard():
page_1, = render_pages('''
<style>
@page { size: 80px; margin: 0 }
div { display: inline-block; width: 100%; height: 25px }
article { continue: discard; border: 1px solid; line-height: 1 }
</style>
<article>
<div>a</div>
<div>b</div>
<div>c</div>
<div>d</div>
<div>e</div>
<div>f</div>
</article>''')
html, = page_1.children
body, = html.children
article, = body.children
assert article.height == 3 * 25
div_1, div_2, div_3 = article.children
assert div_1.position_y == 1
assert div_2.position_y == 1 + 25
assert div_3.position_y == 1 + 25 * 2
assert article.border_bottom_width == 1
@assert_no_logs
def test_continue_discard_children():
page_1, = render_pages('''
<style>
@page { size: 80px; margin: 0 }
div { display: inline-block; width: 100%; height: 25px }
section { border: 1px solid }
article { continue: discard; border: 1px solid; line-height: 1 }
</style>
<article>
<section>
<div>a</div>
<div>b</div>
<div>c</div>
<div>d</div>
<div>e</div>
<div>f</div>
</section>
</article>''')
html, = page_1.children
body, = html.children
article, = body.children
assert article.height == 2 + 3 * 25
section, = article.children
assert section.height == 3 * 25
div_1, div_2, div_3 = section.children
assert div_1.position_y == 2
assert div_2.position_y == 2 + 25
assert div_3.position_y == 2 + 25 * 2
assert article.border_bottom_width == 1
@assert_no_logs
def test_block_in_block_with_bottom_padding():
# Test regression: https://github.com/Kozea/WeasyPrint/issues/1476
page_1, page_2 = render_pages('''
<style>
@font-face { src: url(weasyprint.otf); font-family: weasyprint }
@page { size: 8em 3.5em }
body { line-height: 1; orphans: 1; widows: 1; font-family: weasyprint }
div { padding-bottom: 1em }
</style>
abc def
<div>
<p>
ghi jkl
mno pqr
</p>
</div>
stu vwx''')
html, = page_1.children
body, = html.children
anon_body, div = body.children
line, = anon_body.children
assert line.height == 16
assert line.children[0].text == 'abc def'
p, = div.children
line, = p.children
assert line.height == 16
assert line.children[0].text == 'ghi jkl'
html, = page_2.children
body, = html.children
div, anon_body = body.children
p, = div.children
line, = p.children
assert line.height == 16
assert line.children[0].text == 'mno pqr'
line, = anon_body.children
assert line.height == 16
assert line.content_box_y() == 16 + 16 # p content + div padding
assert line.children[0].text == 'stu vwx'
| Kozea/WeasyPrint | tests/layout/test_block.py | Python | bsd-3-clause | 27,099 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vkontakte_users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='instagram',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='is_banned',
field=models.BooleanField(default=False, verbose_name='\u0417\u0430\u0431\u0430\u043d\u0435\u043d'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='is_deleted',
field=models.BooleanField(default=False, verbose_name='\u0423\u0434\u0430\u043b\u0435\u043d'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='nickname',
field=models.CharField(default='', max_length=32),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='schools',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='universities',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='bdate',
field=models.CharField(max_length=10),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='facebook',
field=models.CharField(max_length=18),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='facebook_name',
field=models.CharField(max_length=50),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='faculty_name',
field=models.CharField(max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=32),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='has_avatar',
field=models.BooleanField(default=True, db_index=True, verbose_name='\u0415\u0441\u0442\u044c \u0430\u0432\u0430\u0442\u0430\u0440'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='home_phone',
field=models.CharField(max_length=24),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='is_deactivated',
field=models.BooleanField(default=False, db_index=True, verbose_name='\u0414\u0435\u0430\u043a\u0442\u0438\u0432\u0438\u0440\u043e\u0432\u0430\u043d'),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=32),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='livejournal',
field=models.CharField(max_length=31),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='mobile_phone',
field=models.CharField(max_length=24),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='screen_name',
field=models.CharField(max_length=32, db_index=True),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='skype',
field=models.CharField(max_length=32),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='twitter',
field=models.CharField(max_length=15),
preserve_default=True,
),
migrations.AlterField(
model_name='user',
name='university_name',
field=models.CharField(max_length=255),
preserve_default=True,
),
]
| Andertaker/django-vkontakte-users | vkontakte_users/migrations/0002_auto_20160213_0238.py | Python | bsd-3-clause | 4,586 |
# Copyright 2007-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import time
from portage import os
from portage.versions import best, catsplit, vercmp
from portage.dep import Atom
from portage.localization import _
from portage._sets.base import PackageSet
from portage._sets import SetConfigError, get_boolean
import portage
__all__ = ["CategorySet", "DowngradeSet",
"EverythingSet", "OwnerSet", "VariableSet"]
class EverythingSet(PackageSet):
_operations = ["merge"]
description = "Package set which contains SLOT " + \
"atoms to match all installed packages"
_filter = None
def __init__(self, vdbapi, **kwargs):
super(EverythingSet, self).__init__()
self._db = vdbapi
def load(self):
myatoms = []
db_keys = ["SLOT"]
aux_get = self._db.aux_get
cp_list = self._db.cp_list
for cp in self._db.cp_all():
for cpv in cp_list(cp):
# NOTE: Create SLOT atoms even when there is only one
# SLOT installed, in order to avoid the possibility
# of unwanted upgrades as reported in bug #338959.
slot, = aux_get(cpv, db_keys)
atom = Atom("%s:%s" % (cp, slot))
if self._filter:
if self._filter(atom):
myatoms.append(atom)
else:
myatoms.append(atom)
self._setAtoms(myatoms)
def singleBuilder(self, options, settings, trees):
return EverythingSet(trees["vartree"].dbapi)
singleBuilder = classmethod(singleBuilder)
class OwnerSet(PackageSet):
_operations = ["merge", "unmerge"]
description = "Package set which contains all packages " + \
"that own one or more files."
def __init__(self, vardb=None, exclude_files=None, files=None):
super(OwnerSet, self).__init__()
self._db = vardb
self._exclude_files = exclude_files
self._files = files
def mapPathsToAtoms(self, paths, exclude_paths=None):
"""
All paths must have $EROOT stripped from the left side.
"""
rValue = set()
vardb = self._db
aux_get = vardb.aux_get
aux_keys = ["SLOT"]
if exclude_paths is None:
for link, p in vardb._owners.iter_owners(paths):
slot, = aux_get(link.mycpv, aux_keys)
rValue.add("%s:%s" % (link.mycpv.cp, slot))
else:
all_paths = set()
all_paths.update(paths)
all_paths.update(exclude_paths)
exclude_atoms = set()
for link, p in vardb._owners.iter_owners(all_paths):
slot, = aux_get(link.mycpv, aux_keys)
atom = "%s:%s" % (link.mycpv.cp, slot)
rValue.add(atom)
if p in exclude_paths:
exclude_atoms.add(atom)
rValue.difference_update(exclude_atoms)
return rValue
def load(self):
self._setAtoms(self.mapPathsToAtoms(self._files,
exclude_paths=self._exclude_files))
def singleBuilder(cls, options, settings, trees):
if not "files" in options:
raise SetConfigError(_("no files given"))
exclude_files = options.get("exclude-files")
if exclude_files is not None:
exclude_files = frozenset(portage.util.shlex_split(exclude_files))
return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
files=frozenset(portage.util.shlex_split(options["files"])))
singleBuilder = classmethod(singleBuilder)
class VariableSet(EverythingSet):
_operations = ["merge", "unmerge"]
description = "Package set which contains all packages " + \
"that match specified values of a specified variable."
def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
super(VariableSet, self).__init__(vardb)
self._metadatadb = metadatadb
self._variable = variable
self._includes = includes
self._excludes = excludes
def _filter(self, atom):
ebuild = best(self._metadatadb.match(atom))
if not ebuild:
return False
values, = self._metadatadb.aux_get(ebuild, [self._variable])
values = values.split()
if self._includes and not self._includes.intersection(values):
return False
if self._excludes and self._excludes.intersection(values):
return False
return True
def singleBuilder(cls, options, settings, trees):
variable = options.get("variable")
if variable is None:
raise SetConfigError(_("missing required attribute: 'variable'"))
includes = options.get("includes", "")
excludes = options.get("excludes", "")
if not (includes or excludes):
raise SetConfigError(_("no includes or excludes given"))
metadatadb = options.get("metadata-source", "vartree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi,
excludes=frozenset(excludes.split()),
includes=frozenset(includes.split()),
variable=variable)
singleBuilder = classmethod(singleBuilder)
class DowngradeSet(PackageSet):
_operations = ["merge", "unmerge"]
description = "Package set which contains all packages " + \
"for which the highest visible ebuild version is lower than " + \
"the currently installed version."
def __init__(self, portdb=None, vardb=None):
super(DowngradeSet, self).__init__()
self._portdb = portdb
self._vardb = vardb
def load(self):
atoms = []
xmatch = self._portdb.xmatch
xmatch_level = "bestmatch-visible"
cp_list = self._vardb.cp_list
aux_get = self._vardb.aux_get
aux_keys = ["SLOT"]
for cp in self._vardb.cp_all():
for cpv in cp_list(cp):
slot, = aux_get(cpv, aux_keys)
slot_atom = "%s:%s" % (cp, slot)
ebuild = xmatch(xmatch_level, slot_atom)
if not ebuild:
continue
if vercmp(cpv.version, ebuild.version) > 0:
atoms.append(slot_atom)
self._setAtoms(atoms)
def singleBuilder(cls, options, settings, trees):
return cls(portdb=trees["porttree"].dbapi,
vardb=trees["vartree"].dbapi)
singleBuilder = classmethod(singleBuilder)
class UnavailableSet(EverythingSet):
_operations = ["unmerge"]
description = "Package set which contains all installed " + \
"packages for which there are no visible ebuilds " + \
"corresponding to the same $CATEGORY/$PN:$SLOT."
def __init__(self, vardb, metadatadb=None):
super(UnavailableSet, self).__init__(vardb)
self._metadatadb = metadatadb
def _filter(self, atom):
return not self._metadatadb.match(atom)
def singleBuilder(cls, options, settings, trees):
metadatadb = options.get("metadata-source", "porttree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option "
"metadata-source") % (metadatadb,))
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi)
singleBuilder = classmethod(singleBuilder)
class UnavailableBinaries(EverythingSet):
_operations = ('merge', 'unmerge',)
description = "Package set which contains all installed " + \
"packages for which corresponding binary packages " + \
"are not available."
def __init__(self, vardb, metadatadb=None):
super(UnavailableBinaries, self).__init__(vardb)
self._metadatadb = metadatadb
def _filter(self, atom):
inst_pkg = self._db.match(atom)
if not inst_pkg:
return False
inst_cpv = inst_pkg[0]
return not self._metadatadb.cpv_exists(inst_cpv)
def singleBuilder(cls, options, settings, trees):
metadatadb = options.get("metadata-source", "bintree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option "
"metadata-source") % (metadatadb,))
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi)
singleBuilder = classmethod(singleBuilder)
class CategorySet(PackageSet):
_operations = ["merge", "unmerge"]
def __init__(self, category, dbapi, only_visible=True):
super(CategorySet, self).__init__()
self._db = dbapi
self._category = category
self._check = only_visible
if only_visible:
s="visible"
else:
s="all"
self.description = "Package set containing %s packages of category %s" % (s, self._category)
def load(self):
myatoms = []
for cp in self._db.cp_all():
if catsplit(cp)[0] == self._category:
if (not self._check) or len(self._db.match(cp)) > 0:
myatoms.append(cp)
self._setAtoms(myatoms)
def _builderGetRepository(cls, options, repositories):
repository = options.get("repository", "porttree")
if not repository in repositories:
raise SetConfigError(_("invalid repository class '%s'") % repository)
return repository
_builderGetRepository = classmethod(_builderGetRepository)
def _builderGetVisible(cls, options):
return get_boolean(options, "only_visible", True)
_builderGetVisible = classmethod(_builderGetVisible)
def singleBuilder(cls, options, settings, trees):
if not "category" in options:
raise SetConfigError(_("no category given"))
category = options["category"]
if not category in settings.categories:
raise SetConfigError(_("invalid category name '%s'") % category)
repository = cls._builderGetRepository(options, trees.keys())
visible = cls._builderGetVisible(options)
return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
singleBuilder = classmethod(singleBuilder)
def multiBuilder(cls, options, settings, trees):
rValue = {}
if "categories" in options:
categories = options["categories"].split()
invalid = set(categories).difference(settings.categories)
if invalid:
raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
else:
categories = settings.categories
repository = cls._builderGetRepository(options, trees.keys())
visible = cls._builderGetVisible(options)
name_pattern = options.get("name_pattern", "$category/*")
if not "$category" in name_pattern and not "${category}" in name_pattern:
raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
for cat in categories:
myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
myname = name_pattern.replace("$category", cat)
myname = myname.replace("${category}", cat)
rValue[myname] = myset
return rValue
multiBuilder = classmethod(multiBuilder)
class AgeSet(EverythingSet):
_operations = ["merge", "unmerge"]
_aux_keys = ('BUILD_TIME',)
def __init__(self, vardb, mode="older", age=7):
super(AgeSet, self).__init__(vardb)
self._mode = mode
self._age = age
def _filter(self, atom):
cpv = self._db.match(atom)[0]
try:
date, = self._db.aux_get(cpv, self._aux_keys)
date = int(date)
except (KeyError, ValueError):
return bool(self._mode == "older")
age = (time.time() - date) / (3600 * 24)
if ((self._mode == "older" and age <= self._age) \
or (self._mode == "newer" and age >= self._age)):
return False
else:
return True
def singleBuilder(cls, options, settings, trees):
mode = options.get("mode", "older")
if str(mode).lower() not in ["newer", "older"]:
raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
try:
age = int(options.get("age", "7"))
except ValueError as e:
raise SetConfigError(_("value of option 'age' is not an integer"))
return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
singleBuilder = classmethod(singleBuilder)
class DateSet(EverythingSet):
_operations = ["merge", "unmerge"]
_aux_keys = ('BUILD_TIME',)
def __init__(self, vardb, date, mode="older"):
super(DateSet, self).__init__(vardb)
self._mode = mode
self._date = date
def _filter(self, atom):
cpv = self._db.match(atom)[0]
try:
date, = self._db.aux_get(cpv, self._aux_keys)
date = int(date)
except (KeyError, ValueError):
return bool(self._mode == "older")
# Make sure inequality is _strict_ to exclude tested package
if ((self._mode == "older" and date < self._date) \
or (self._mode == "newer" and date > self._date)):
return True
else:
return False
def singleBuilder(cls, options, settings, trees):
vardbapi = trees["vartree"].dbapi
mode = options.get("mode", "older")
if str(mode).lower() not in ["newer", "older"]:
raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
formats = []
if options.get("package") is not None:
formats.append("package")
if options.get("filestamp") is not None:
formats.append("filestamp")
if options.get("seconds") is not None:
formats.append("seconds")
if options.get("date") is not None:
formats.append("date")
if not formats:
raise SetConfigError(_("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"))
elif len(formats) > 1:
raise SetConfigError(_("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"))
format = formats[0]
if (format == "package"):
package = options.get("package")
try:
cpv = vardbapi.match(package)[0]
date, = vardbapi.aux_get(cpv, ('BUILD_TIME',))
date = int(date)
except (KeyError, ValueError):
raise SetConfigError(_("cannot determine installation date of package %s") % package)
elif (format == "filestamp"):
filestamp = options.get("filestamp")
try:
date = int(os.stat(filestamp).st_mtime)
except (OSError, ValueError):
raise SetConfigError(_("cannot determine 'filestamp' of '%s'") % filestamp)
elif (format == "seconds"):
try:
date = int(options.get("seconds"))
except ValueError:
raise SetConfigError(_("option 'seconds' must be an integer"))
else:
dateopt = options.get("date")
try:
dateformat = options.get("dateformat", "%x %X")
date = int(time.mktime(time.strptime(dateopt, dateformat)))
except ValueError:
raise SetConfigError(_("'date=%s' does not match 'dateformat=%s'") % (dateopt, dateformat))
return DateSet(vardb=vardbapi, date=date, mode=mode)
singleBuilder = classmethod(singleBuilder)
class RebuiltBinaries(EverythingSet):
_operations = ('merge',)
_aux_keys = ('BUILD_TIME',)
def __init__(self, vardb, bindb=None):
super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
self._bindb = bindb
def _filter(self, atom):
cpv = self._db.match(atom)[0]
inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
try:
bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
except KeyError:
return False
return bool(bin_build_time and (inst_build_time != bin_build_time))
def singleBuilder(cls, options, settings, trees):
return RebuiltBinaries(trees["vartree"].dbapi,
bindb=trees["bintree"].dbapi)
singleBuilder = classmethod(singleBuilder)
| devurandom/portage | pym/portage/_sets/dbapi.py | Python | gpl-2.0 | 14,306 |
#!/usr/bin/python
# Copyright (C) 2008. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests for the Boost Jam builtin SORT rule.
import BoostBuild
###############################################################################
#
# testSORTCorrectness()
# ---------------------
#
###############################################################################
def testSORTCorrectness():
"""Testing that Boost Jam's SORT builtin rule actually sorts correctly."""
t = BoostBuild.Tester(["-ftest.jam", "-d1"], pass_toolset=False,
use_test_config=False)
t.write("test.jam", """\
NOCARE all ;
source-data = 1 8 9 2 7 3 4 7 1 27 27 9 98 98 1 1 4 5 6 2 3 4 8 1 -2 -2 0 0 0 ;
target-data = -2 -2 0 0 0 1 1 1 1 1 2 2 27 27 3 3 4 4 4 5 6 7 7 8 8 9 9 98 98 ;
ECHO "starting up" ;
sorted-data = [ SORT $(source-data) ] ;
ECHO "done" ;
if $(sorted-data) != $(target-data)
{
ECHO "Source :" $(source-data) ;
ECHO "Expected :" $(target-data) ;
ECHO "SORT returned:" $(sorted-data) ;
EXIT "SORT error" : -2 ;
}
""")
t.run_build_system()
t.expect_output_lines("starting up")
t.expect_output_lines("done")
t.expect_output_lines("SORT error", False)
t.cleanup()
###############################################################################
#
# testSORTDuration()
# ------------------
#
###############################################################################
def testSORTDuration():
"""
Regression test making sure Boost Jam's SORT builtin rule does not get
quadratic behaviour again in this use case.
"""
t = BoostBuild.Tester(["-ftest.jam", "-d1"], pass_toolset=False,
use_test_config=False)
f = open(t.workpath("test.jam"), "w")
print >> f, "data = "
for i in range(0, 20000):
if i % 2:
print >> f, '"aaa"'
else:
print >> f, '"bbb"'
print >> f, """;
ECHO "starting up" ;
sorted = [ SORT $(data) ] ;
ECHO "done" ;
NOCARE all ;
"""
f.close()
t.run_build_system(expected_duration=1)
t.expect_output_lines("starting up")
t.expect_output_lines("done")
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
testSORTCorrectness()
testSORTDuration()
| flingone/frameworks_base_cmds_remoted | libs/boost/tools/build/test/sort_rule.py | Python | apache-2.0 | 2,589 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from base import PostgreSQL
PostgreSQL.dump_sql = True
PostgreSQL.perform(
'INSERT INTO person VALUES (%s, %s)',
params = [
('dara', 'Dara Torres'),
('eden', 'Eden Tseng'),
]
)
PostgreSQL.perform("DELETE FROM person WHERE person_id = 'dara'")
PostgreSQL.perform("DELETE FROM person WHERE person_id = %s", ('eden', ))
#print PostgreSQL.perform(proc='add', param=(1, 2)).fetchall()
| uranusjr/mosql | archived/examples/postgresql/perform.py | Python | mit | 459 |
# -*- coding: utf-8 -*-
# Author: Florian Mayer <florian.mayer@bitsrc.org>
#pylint: disable=W0613
from __future__ import absolute_import
import pytest
from sunpy.net import vso
from sunpy.net.vso import attrs as va
from sunpy.net import attr
from sunpy.util.unit_conversion import energy, frequency
def pytest_funcarg__eit(request):
return va.Instrument('eit')
def pytest_funcarg__client(request):
return vso.VSOClient()
def pytest_funcarg__iclient(request):
return vso.InteractiveVSOClient()
def test_simpleattr_apply():
a = attr.ValueAttr({('test', ): 1})
dct = {}
va.walker.apply(a, None, dct)
assert dct['test'] == 1
def test_simpleattr_create(client):
a = attr.ValueAttr({('instrument', ): 'eit'})
assert va.walker.create(a, client.api)[0].instrument == 'eit'
def test_simpleattr_and_duplicate():
attr = va.Instrument('foo')
pytest.raises(TypeError, lambda: attr & va.Instrument('bar'))
attr |= va.Source('foo')
pytest.raises(TypeError, lambda: attr & va.Instrument('bar'))
otherattr = va.Instrument('foo') | va.Source('foo')
pytest.raises(TypeError, lambda: attr & otherattr)
pytest.raises(TypeError, lambda: (attr | otherattr) & va.Instrument('bar'))
tst = va.Instrument('foo') & va.Source('foo')
pytest.raises(TypeError, lambda: tst & tst)
def test_simpleattr_or_eq():
attr = va.Instrument('eit')
assert attr | attr == attr
assert attr | va.Instrument('eit') == attr
def test_complexattr_apply():
tst = {('test', 'foo'): 'a', ('test', 'bar'): 'b'}
a = attr.ValueAttr(tst)
dct = {'test': {}}
va.walker.apply(a, None, dct)
assert dct['test'] == {'foo': 'a', 'bar': 'b'}
def test_complexattr_create(client):
a = attr.ValueAttr({('time', 'start'): 'test'})
assert va.walker.create(a, client.api)[0].time.start == 'test'
def test_complexattr_and_duplicate():
attr = va.Time((2011, 1, 1), (2011, 1, 1, 1))
pytest.raises(
TypeError,
lambda: attr & va.Time((2011, 2, 1), (2011, 2, 1, 1))
)
attr |= va.Source('foo')
pytest.raises(
TypeError,
lambda: attr & va.Time((2011, 2, 1), (2011, 2, 1, 1))
)
def test_complexattr_or_eq():
attr = va.Time((2011, 1, 1), (2011, 1, 1, 1))
assert attr | attr == attr
assert attr | va.Time((2011, 1, 1), (2011, 1, 1, 1)) == attr
def test_attror_and():
attr = va.Instrument('foo') | va.Instrument('bar')
one = attr & va.Source('bar')
other = (
(va.Instrument('foo') & va.Source('bar')) |
(va.Instrument('bar') & va.Source('bar'))
)
assert one == other
def test_wave_toangstrom():
for name, factor in energy:
w = va.Wave(62 / factor, 62 / factor, name)
assert int(w.min) == 199
w = va.Wave(62, 62, 'eV')
assert int(w.min) == 199
w = va.Wave(62e-3, 62e-3, 'keV')
assert int(w.min) == 199
for name, factor in frequency:
w = va.Wave(1.506e16 / factor, 1.506e16 / factor, name)
assert int(w.min) == 199
w = va.Wave(1.506e16, 1.506e16, 'Hz')
assert int(w.min) == 199
w = va.Wave(1.506e7, 1.506e7, 'GHz')
assert int(w.min) == 199
def test_time_xor():
one = va.Time((2010, 1, 1), (2010, 1, 2))
a = one ^ va.Time((2010, 1, 1, 1), (2010, 1, 1, 2))
assert a == attr.AttrOr(
[va.Time((2010, 1, 1), (2010, 1, 1, 1)),
va.Time((2010, 1, 1, 2), (2010, 1, 2))]
)
a ^= va.Time((2010, 1, 1, 4), (2010, 1, 1, 5))
assert a == attr.AttrOr(
[va.Time((2010, 1, 1), (2010, 1, 1, 1)),
va.Time((2010, 1, 1, 2), (2010, 1, 1, 4)),
va.Time((2010, 1, 1, 5), (2010, 1, 2))]
)
def test_wave_xor():
one = va.Wave(0, 1000)
a = one ^ va.Wave(200, 400)
assert a == attr.AttrOr([va.Wave(0, 200), va.Wave(400, 1000)])
a ^= va.Wave(600, 800)
assert a == attr.AttrOr(
[va.Wave(0, 200), va.Wave(400, 600), va.Wave(800, 1000)])
| mjm159/sunpy | sunpy/tests/net/test_vso.py | Python | bsd-2-clause | 3,984 |
#!/usr/bin/env python
from __future__ import unicode_literals
import os
import sys
scripts_dir = os.path.abspath(os.path.dirname(__file__))
# Source root directory
sys.path.insert(0, os.path.abspath(os.path.join(scripts_dir, '..', '..')))
# Script config directory
sys.path.insert(0, os.path.join(scripts_dir, 'conf'))
from reviewboard import django_version
import __main__
__main__.__requires__ = [django_version]
import pkg_resources
from django.core.management import call_command
if __name__ == '__main__':
os.putenv('FORCE_BUILD_MEDIA', '1')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'reviewboard.settings')
ret = call_command('collectstatic', interactive=False, verbosity=2)
sys.exit(ret)
| 1tush/reviewboard | contrib/internal/build-media.py | Python | mit | 728 |
#!/usr/bin/env python
from urllib.parse import urlsplit, urlunsplit
from urllib.parse import urlencode
from wsgiref.util import request_uri
from .query import Query
class URI(object):
def __init__(self, uri='', scheme='', host='', path=None, query=None,
anchor=''):
self.scheme, self.host, self.path, self.query, self.anchor = \
urlsplit(uri, scheme='http')
self.scheme = scheme or self.scheme
self.host = host or self.host
self.path = path or self.path or '/'
self.query = query or self.query or Query()
self.anchor = anchor or self.anchor
@property
def query(self):
return self.__dict__['query']
@query.setter
def query(self, new):
if isinstance(new, str):
new = Query.parse(new)
elif not isinstance(new, Query):
new = Query(new)
self.__dict__['query'] = new
def get_path(self):
return '/'.join(['']+self.path)
def get_query(self):
return urlencode(self.query)
@classmethod
def from_env(cls, env):
return cls(request_uri(env))
def __str__(self):
return urlunsplit((self.scheme, self.host, self.path,
self.get_query(), self.anchor))
def __repr__(self):
return 'URI(%r)' % (str(self),)
| orbnauticus/silk | silk/webreq/uri.py | Python | bsd-3-clause | 1,341 |
from django.db import models
class Episode(models.Model):
name = models.CharField(max_length=255)
number = models.IntegerField()
def __str__(self):
return '{}: {}'.format(self.number, self.name)
class Meta(object):
ordering = ['number']
class Starship(models.Model):
name = models.CharField(max_length=255)
length = models.FloatField(default=0)
class Character(models.Model):
name = models.CharField(max_length=255)
friends = models.ManyToManyField('self', blank=True)
appears_in = models.ManyToManyField(
Episode,
blank=True,
related_name='characters',
)
def __str__(self):
return self.name
class Meta(object):
ordering = ['pk']
class Human(Character):
starships = models.ManyToManyField(Starship)
total_credits = models.PositiveSmallIntegerField(default=0)
class Droid(Character):
primary_function = models.CharField(max_length=255)
| melinath/django-graph-api | test_app/models.py | Python | mit | 965 |
"""This module provides instrumentation for Celery. Has been tested on
Celery versions 2.2.X through 2.5.X.
Note that Celery has a habit of moving things around in code base or of
completely rewriting stuff across minor versions. See additional notes
about this below.
"""
import functools
from newrelic.api.application import application_instance
from newrelic.api.background_task import BackgroundTaskWrapper
from newrelic.api.pre_function import wrap_pre_function
def instrument_celery_app_task(module):
# Triggered for both 'celery.app.task' and 'celery.task.base'.
if hasattr(module, 'BaseTask'):
# Need to add a wrapper for background task entry point.
# In Celery 2.2 the 'BaseTask' class actually resided in the
# module 'celery.task.base'. In Celery 2.3 the 'BaseTask' class
# moved to 'celery.app.task' but an alias to it was retained in
# the module 'celery.task.base'. We need to detect both module
# imports, but we check the module name associated with
# 'BaseTask' to ensure that we do not instrument the class via
# the alias in Celery 2.3 and later.
# In Celery 2.5+, although 'BaseTask' still exists execution of
# the task doesn't pass through it. For Celery 2.5+ need to wrap
# the tracer instead.
def task_name(task, *args, **kwargs):
return task.name
if module.BaseTask.__module__ == module.__name__:
module.BaseTask.__call__ = BackgroundTaskWrapper(
module.BaseTask.__call__, name=task_name,
group='Celery')
def instrument_celery_execute_trace(module):
# Triggered for 'celery.execute_trace'.
if hasattr(module, 'build_tracer'):
# Need to add a wrapper for background task entry point.
# In Celery 2.5+ we need to wrap the task when tracer is being
# created. Note that in Celery 2.5 the 'build_tracer' function
# actually resided in the module 'celery.execute.task'. In
# Celery 3.0 the 'build_tracer' function moved to
# 'celery.task.trace'.
_build_tracer = module.build_tracer
def build_tracer(name, task, *args, **kwargs):
task = task or module.tasks[name]
task = BackgroundTaskWrapper(task, name=name, group='Celery')
return _build_tracer(name, task, *args, **kwargs)
module.build_tracer = build_tracer
def instrument_celery_worker(module):
# Triggered for 'celery.worker' and 'celery.concurrency.processes'.
if hasattr(module, 'process_initializer'):
# We try and force registration of default application after
# fork of worker process rather than lazily on first request.
# Originally the 'process_initializer' function was located in
# 'celery.worker'. In Celery 2.5 the function 'process_initializer'
# was moved to the module 'celery.concurrency.processes'.
_process_initializer = module.process_initializer
@functools.wraps(module.process_initializer)
def process_initializer(*args, **kwargs):
application_instance().activate()
return _process_initializer(*args, **kwargs)
module.process_initializer = process_initializer
def instrument_celery_loaders_base(module):
def force_application_activation(*args, **kwargs):
application_instance().activate()
wrap_pre_function(module, 'BaseLoader.init_worker',
force_application_activation)
| galaxy-team/website | newrelic/hooks/application_celery.py | Python | agpl-3.0 | 3,425 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsField.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '16/08/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from qgis.core import QgsField, QgsVectorLayer, NULL
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest
)
from unittest import expectedFailure
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsFields(TestCase):
def test_expections(self):
ml = QgsVectorLayer("Point?crs=epsg:4236&field=id:integer&field=value:double",
"test_data", "memory")
assert ml.isValid()
fields = ml.fields()
# check no error
fields.remove(1)
# check exceptions raised
with self.assertRaises(KeyError):
fields.remove(-1)
with self.assertRaises(KeyError):
fields.remove(111)
fields = ml.fields()
# check no error
fields.at(1)
# check exceptions raised
with self.assertRaises(KeyError):
fields.at(-1)
with self.assertRaises(KeyError):
fields.at(111)
# check no error
fields.field(1)
# check exceptions raised
with self.assertRaises(KeyError):
fields.field(-1)
with self.assertRaises(KeyError):
fields.field(111)
# check no error
fields.field('value')
# check exceptions raised
with self.assertRaises(KeyError):
fields.field('bad')
# check no error
fields.fieldOrigin(1)
# check exceptions raised
with self.assertRaises(KeyError):
fields.fieldOrigin(-1)
with self.assertRaises(KeyError):
fields.fieldOrigin(111)
# check no error
fields.fieldOriginIndex(1)
# check exceptions raised
with self.assertRaises(KeyError):
fields.fieldOriginIndex(-1)
with self.assertRaises(KeyError):
fields.fieldOriginIndex(111)
if __name__ == '__main__':
unittest.main()
| carolinux/QGIS | tests/src/python/test_qgsfield.py | Python | gpl-2.0 | 2,527 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
#! Linear solvers
#! =================
#!
#! We demonstrate solving a dense system A.x=b by using different linear solvers.
#!
from casadi import *
from numpy import *
import time
n=100
#$ We generate $A \in \mathbf{R}^{n \times n}$, $x \in \mathbf{R}^{n}$ with $n=100$
A=DMatrix([[cos(i*j)-sin(i) for i in range(n)] for j in range(n)])
x=DMatrix([tan(i) for i in range(n)])
#! We generate the b vector:
b=mul(A,x)
#! We demonstrate the LinearSolver API with CSparse:
s = LinearSolver("csparse", A.sparsity())
s.init()
#! Give it the matrix A
s.setInput(A,"A")
#! Do the LU factorization
s.prepare()
#! Give it the matrix b
s.setInput(b,"B")
#! And we are off to find x...
s.solve()
x_ = s.getOutput("X")
#! By looking at the residuals between the x we knew in advance and the computed x, we see that the CSparse solver works
print "Sum of residuals = %.2e" % sumAll(fabs(x-x_))
#! Comparison of different linear solvers
#! ======================================
for solver in ("lapacklu","lapackqr","csparse"):
s = LinearSolver(solver, A.sparsity()) # We create a solver
s.init()
s.setInput(A,"A") # Give it the matrix A
t0 = time.time()
for i in range(100):
s.prepare() # Do the LU factorization
pt = (time.time()-t0)/100
s.setInput(b,"B") # Give it the matrix b
t0 = time.time()
for i in range(100):
s.solve()
st = (time.time()-t0)/100
x_ = s.getOutput("X")
print ""
print solver
print "=" * 10
print "Sum of residuals = %.2e" % sumAll(fabs(x-x_))
print "Preparation time = %0.2f ms" % (pt*1000)
print "Solve time = %0.2f ms" % (st*1000)
assert(sumAll(fabs(x-x_))<1e-9)
#! Note that these
| ghorn/debian-casadi | docs/api/examples/algebra/LinearSolver.py | Python | lgpl-3.0 | 2,711 |
import argparse
from datetime import datetime
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--input', action="store", type=str, help="input miRBase GFF3 file")
the_parser.add_argument(
'--output', action="store", type=str,
help="output GFF3 file with converted mature mir coordinates")
args = the_parser.parse_args()
return args
def get_gff_header(gff_input_file):
string_list = []
for line in open(gff_input_file, "r"):
if line[0] == '#':
string_list.append(line)
string_list.append('# generated by mature_mir_gff_translation.py %s\n#\n' %
str(datetime.now()))
return ''.join(string_list)
def load_gff_in_dict(gff_input_file):
'''
Reads the gff3 file and return a dictionary of dictionaries
with keys equal to standard gff3 fields (9)
Note that the key of the primary dictionary is the ID
'''
gff_dict = {}
for line in open(gff_input_file, "r"):
if line[0] == "#":
continue
gff_fields = line[:-1].split("\t")
ID = gff_fields[8].split("ID=")[1].split(";")[0]
gff_dict[ID] = {}
gff_dict[ID]["seqid"] = gff_fields[0]
gff_dict[ID]["source"] = gff_fields[1]
gff_dict[ID]["type"] = gff_fields[2]
gff_dict[ID]["start"] = gff_fields[3]
gff_dict[ID]["end"] = gff_fields[4]
gff_dict[ID]["score"] = gff_fields[5]
gff_dict[ID]["strand"] = gff_fields[6]
gff_dict[ID]["phase"] = gff_fields[7]
gff_dict[ID]["attributes"] = gff_fields[8]
if "erives_from" in gff_dict[ID]["attributes"]:
parent_primary_transcript = gff_dict[ID]["attributes"].split(
"erives_from=")[1]
parent_primary_transcript = gff_dict[parent_primary_transcript][
"attributes"].split("Name=")[1]
gff_dict[ID]["attributes"] = "%s;Parent_mir_Name=%s" % (
gff_dict[ID]["attributes"], parent_primary_transcript)
return gff_dict
def genome_to_mir_gff(gff_dict, output, header):
'''
Converts seqid field from chromosome to item Name
Then converts coordinates relative to "miRNA_primary_transcript"
Note that GFF files are 1-based coordinates
'''
for key in gff_dict:
name = gff_dict[key]["attributes"].split("Name=")[1].split(";")[0]
gff_dict[key]["seqid"] = name
if "erives_from=" in gff_dict[key]["attributes"]:
parent_ID = gff_dict[key]["attributes"].split(
"erives_from=")[1].split(";")[0]
gff_dict[key]["start"] = str(int(gff_dict[key]["start"])-int(
gff_dict[parent_ID]["start"])+1)
gff_dict[key]["end"] = str(int(gff_dict[key]["end"])-int(
gff_dict[parent_ID]["start"])+1)
hairpins = {}
matures = {}
# treats miRNA_primary_transcript coordinates
# in a second loop to avoid errors in conversion
for key in gff_dict:
if gff_dict[key]["type"] == "miRNA_primary_transcript":
gff_dict[key]["end"] = str(int(gff_dict[key]["end"])-int(
gff_dict[key]["start"]) + 1)
gff_dict[key]["start"] = '1'
# now, do a dict[ID]=Name but only for miRNA_primary_transcript
hairpins[key] = gff_dict[key]["attributes"].split(
"Name=")[1].split(
";")[0]
else:
matures[key] = gff_dict[key]["attributes"].split(
"Name=")[1].split(
";")[0]
with open(output, "w") as output:
output.write(header)
for ID in sorted(hairpins, key=hairpins.get):
output.write("\t".join([gff_dict[ID]["seqid"],
gff_dict[ID]["source"], gff_dict[ID]["type"],
gff_dict[ID]["start"], gff_dict[ID]["end"],
gff_dict[ID]["score"], gff_dict[ID]["strand"],
gff_dict[ID]["phase"], gff_dict[ID]["attributes"]]))
output.write("\n")
for id in sorted(matures, key=matures.get, reverse=True):
if ID in gff_dict[id]["attributes"]:
output.write("\t".join([gff_dict[id]["seqid"],
gff_dict[id]["source"], gff_dict[id]["type"],
gff_dict[id]["start"], gff_dict[id]["end"],
gff_dict[id]["score"],
gff_dict[id]["strand"],
gff_dict[id]["phase"],
gff_dict[id]["attributes"]]))
output.write("\n")
def main(infile, outfile):
gff_dict = load_gff_in_dict(infile)
genome_to_mir_gff(gff_dict, outfile, get_gff_header(infile))
if __name__ == "__main__":
args = Parser()
main(args.input, args.output)
| drosofff/tools-artbio | tools/mircounts/mature_mir_gff_translation.py | Python | mit | 5,174 |
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def backfill_user_deleted_logs(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
RealmAuditLog.USER_DELETED = 106
UserProfile = apps.get_model("zerver", "UserProfile")
objects_to_create = []
for user_profile in UserProfile.objects.filter(
is_mirror_dummy=True, is_active=False, delivery_email__regex=r"^deleteduser\d+@.+"
):
entry = RealmAuditLog(
realm_id=user_profile.realm_id,
modified_user=user_profile,
acting_user=user_profile,
event_type=RealmAuditLog.USER_DELETED,
# For old dummy users, the date_joined is the time of the deletion.
event_time=user_profile.date_joined,
backfilled=True,
)
objects_to_create.append(entry)
RealmAuditLog.objects.bulk_create(objects_to_create)
def reverse_code(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
RealmAuditLog = apps.get_model("zerver", "RealmAuditLog")
RealmAuditLog.USER_DELETED = 106
RealmAuditLog.objects.filter(event_type=RealmAuditLog.USER_DELETED, backfilled=True).delete()
class Migration(migrations.Migration):
dependencies = [
("zerver", "0373_fix_deleteduser_dummies"),
]
operations = [
migrations.RunPython(
backfill_user_deleted_logs,
reverse_code=reverse_code,
elidable=True,
)
]
| zulip/zulip | zerver/migrations/0374_backfill_user_delete_realmauditlog.py | Python | apache-2.0 | 1,626 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from PIL import Image, ImageChops
from utils.misc import get_file_list, validate_color
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.argument('color', callback=validate_color)
def tint(path, color):
for f in get_file_list(path):
img = Image.open(f)
img = ImageChops.multiply(img, Image.new('RGBA', img.size, color))
img.save(f)
if __name__ == '__main__':
tint()
| vladimirgamalian/pictools | tint.py | Python | mit | 490 |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from future.utils import PY3, viewitems
from six import StringIO
from collections import defaultdict
import pandas as pd
import numpy as np
import warnings
from skbio.util import find_duplicates
import qiita_db as qdb
if PY3:
from string import ascii_letters as letters, digits
else:
from string import letters, digits
def prefix_sample_names_with_id(md_template, study_id):
r"""prefix the sample_names in md_template with the study id
Parameters
----------
md_template : DataFrame
The metadata template to modify
study_id : int
The study to which the metadata belongs to
"""
# loop over the samples and prefix those that aren't prefixed
md_template['qiita_sample_name_with_id'] = pd.Series(
[idx if idx.split('.', 1)[0] == str(study_id)
else '%d.%s' % (study_id, idx)
for idx in md_template.index], index=md_template.index)
# get the rows that are gonna change
changes = len(md_template.index[
md_template['qiita_sample_name_with_id'] != md_template.index])
if changes != 0 and changes != len(md_template.index):
warnings.warn(
"Some of the samples were already prefixed with the study id.",
qdb.exceptions.QiitaDBWarning)
md_template.index = md_template.qiita_sample_name_with_id
del md_template['qiita_sample_name_with_id']
# The original metadata template had the index column unnamed -> remove
# the name of the index for consistency
md_template.index.name = None
def load_template_to_dataframe(fn, index='sample_name'):
"""Load a sample/prep template or a QIIME mapping file into a data frame
Parameters
----------
fn : str or file-like object
filename of the template to load, or an already open template file
index : str, optional
Defaults to 'sample_name'. The index to use in the loaded information
Returns
-------
DataFrame
Pandas dataframe with the loaded information
Raises
------
ValueError
Empty file passed
QiitaDBColumnError
If the sample_name column is not present in the template.
QiitaDBWarning
When columns are dropped because they have no content for any sample.
QiitaDBError
When non UTF-8 characters are found in the file.
QiitaDBDuplicateHeaderError
If duplicate columns are present in the template
Notes
-----
The index attribute of the DataFrame will be forced to be 'sample_name'
and will be cast to a string. Additionally rows that start with a '\t'
character will be ignored and columns that are empty will be removed. Empty
sample names will be removed from the DataFrame.
Column names are case-insensitive but will be lowercased on addition to
the database
Everything in the DataFrame will be read and managed as string
"""
# Load in file lines
holdfile = None
with qdb.util.open_file(fn, mode='U') as f:
errors = defaultdict(list)
holdfile = f.readlines()
# here we are checking for non UTF-8 chars
for row, line in enumerate(holdfile):
for col, block in enumerate(line.split('\t')):
try:
tblock = block.encode('utf-8')
except UnicodeDecodeError:
tblock = unicode(block, errors='replace')
tblock = tblock.replace(u'\ufffd', '🐾')
errors[tblock].append('(%d, %d)' % (row, col))
if bool(errors):
raise ValueError(
"There are invalid (non UTF-8) characters in your information "
"file. The offending fields and their location (row, column) "
"are listed below, invalid characters are represented using "
"🐾: %s" % '; '.join(
['"%s" = %s' % (k, ', '.join(v))
for k, v in viewitems(errors)]))
if not holdfile:
raise ValueError('Empty file passed!')
if index == "#SampleID":
# We're going to parse a QIIME mapping file. We are going to first
# parse it with the QIIME function so we can remove the comments
# easily and make sure that QIIME will accept this as a mapping file
data, headers, comments = _parse_mapping_file(holdfile)
holdfile = ["%s\n" % '\t'.join(d) for d in data]
holdfile.insert(0, "%s\n" % '\t'.join(headers))
# The QIIME parser fixes the index and removes the #
index = 'SampleID'
# Strip all values in the cells in the input file
for pos, line in enumerate(holdfile):
cols = line.split('\t')
if pos == 0 and index != 'SampleID':
# get and clean the controlled columns
ccols = {'sample_name'}
ccols.update(qdb.metadata_template.constants.CONTROLLED_COLS)
newcols = [
c.lower().strip() if c.lower().strip() in ccols
else c.strip()
for c in cols]
# while we are here, let's check for duplicate columns headers
if len(set(newcols)) != len(newcols):
raise qdb.exceptions.QiitaDBDuplicateHeaderError(
find_duplicates(newcols))
else:
# .strip will remove odd chars, newlines, tabs and multiple
# spaces but we need to read a new line at the end of the
# line(+'\n')
newcols = [d.strip(" \r\n") for d in cols]
holdfile[pos] = '\t'.join(newcols) + '\n'
# index_col:
# is set as False, otherwise it is cast as a float and we want a string
# keep_default:
# is set as False, to avoid inferring empty/NA values with the defaults
# that Pandas has.
# comment:
# using the tab character as "comment" we remove rows that are
# constituted only by delimiters i. e. empty rows.
template = pd.read_csv(
StringIO(''.join(holdfile)),
sep='\t',
dtype=str,
encoding='utf-8',
infer_datetime_format=False,
keep_default_na=False,
index_col=False,
comment='\t',
converters={index: lambda x: str(x).strip()})
# remove newlines and tabs from fields
template.replace(to_replace='[\t\n\r\x0b\x0c]+', value='',
regex=True, inplace=True)
initial_columns = set(template.columns)
if index not in template.columns:
raise qdb.exceptions.QiitaDBColumnError(
"The '%s' column is missing from your template, this file cannot "
"be parsed." % index)
# remove rows that have no sample identifier but that may have other data
# in the rest of the columns
template.dropna(subset=[index], how='all', inplace=True)
# set the sample name as the index
template.set_index(index, inplace=True)
# it is not uncommon to find templates that have empty columns so let's
# find the columns that are all ''
columns = np.where(np.all(template.applymap(lambda x: x == ''), axis=0))
template.drop(template.columns[columns], axis=1, inplace=True)
initial_columns.remove(index)
dropped_cols = initial_columns - set(template.columns)
if dropped_cols:
warnings.warn(
'The following column(s) were removed from the template because '
'all their values are empty: %s'
% ', '.join(dropped_cols), qdb.exceptions.QiitaDBWarning)
# Pandas represents data with np.nan rather than Nones, change it to None
# because psycopg2 knows that a None is a Null in SQL, while it doesn't
# know what to do with NaN
template = template.where((pd.notnull(template)), None)
return template
def get_invalid_sample_names(sample_names):
"""Get a list of sample names that are not QIIME compliant
Parameters
----------
sample_names : iterable
Iterable containing the sample names to check.
Returns
-------
list
List of str objects where each object is an invalid sample name.
References
----------
.. [1] QIIME File Types documentaiton:
http://qiime.org/documentation/file_formats.html#mapping-file-overview.
"""
# from the QIIME mapping file documentation
valid = set(letters+digits+'.')
inv = []
for s in sample_names:
if set(s) - valid:
inv.append(s)
return inv
def validate_invalid_column_names(column_names):
"""Validate a list of column names that are not SQL compliant
Parameters
----------
column_names : iterable
Iterable containing the column names to check.
Raises
------
QiitaDBColumnError
If column_name is in get_pgsql_reserved_words or contains invalid
chars or is within the forbidden_values
References
----------
.. [1] postgresql SQL-SYNTAX-IDENTIFIERS: https://goo.gl/EF0cUV.
"""
column_names = set(column_names)
# testing for specific column names that are not included in the other
# tests.
forbidden_values = {
# https://github.com/biocore/qiita/issues/2026
'sampleid',
# https://github.com/biocore/qiita/issues/
# Note that this are actually remove/ignored in the function that
# calls this function: base_metadata_template._clean_validate_template.
# However, leaving here to avoid any possible issues.
'qiita_study_id',
'qiita_prep_id'
}
forbidden = forbidden_values & column_names
# pgsql reserved words
pgsql_reserved = (
qdb.metadata_template.util.get_pgsql_reserved_words() & column_names)
# invalid letters in headers
valid_initial_char = letters
valid_rest = set(letters+digits+'_')
invalid = []
for s in column_names:
if s[0] not in valid_initial_char:
invalid.append(s)
elif set(s) - valid_rest:
invalid.append(s)
error = []
if pgsql_reserved:
error.append(
"The following column names in the template contain PgSQL "
"reserved words: %s." % ", ".join(pgsql_reserved))
if invalid:
error.append(
"The following column names in the template contain invalid "
"chars: %s." % ", ".join(invalid))
if forbidden:
error.append(
"The following column names in the template contain invalid "
"values: %s." % ", ".join(forbidden))
if error:
raise qdb.exceptions.QiitaDBColumnError(
"%s\nYou need to modify them." % '\n'.join(error))
def looks_like_qiime_mapping_file(fp):
"""Checks if the file looks like a QIIME mapping file
Parameters
----------
fp : str or file-like object
filepath to check if it looks like a QIIME mapping file
Returns
-------
bool
True if fp looks like a QIIME mapping file, false otherwise.
Notes
-----
This is not doing a validation of the QIIME mapping file. It simply checks
the first line in the file and it returns true if the line starts with
'#SampleID', since a sample/prep template will start with 'sample_name' or
some other different column.
"""
first_line = None
with qdb.util.open_file(fp, mode='U') as f:
first_line = f.readline()
if not first_line:
return False
first_col = first_line.split()[0]
return first_col == '#SampleID'
def _parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):
"""Parser for map file that relates samples to metadata.
Format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
Parameters
----------
lines : iterable of str
The contents of the QIIME mapping file
strip_quotes : bool, optional
Defaults to true. If true, quotes are removed from the data
suppress_stripping : bool, optional
Defaults to false. If true, spaces are not stripped
Returns
-------
list of lists, list of str, list of str
The data in the mapping file, the headers and the comments
Raises
------
QiitaDBError
If there is any error parsing the mapping file
Notes
-----
This code has been ported from QIIME.
"""
if strip_quotes:
if suppress_stripping:
# remove quotes but not spaces
def strip_f(x):
return x.replace('"', '')
else:
# remove quotes and spaces
def strip_f(x):
return x.replace('"', '').strip()
else:
if suppress_stripping:
# don't remove quotes or spaces
def strip_f(x):
return x
else:
# remove spaces but not quotes
def strip_f(x):
return x.strip()
# Create lists to store the results
mapping_data = []
header = []
comments = []
# Begin iterating over lines
for line in lines:
line = strip_f(line)
if not line or (suppress_stripping and not line.strip()):
# skip blank lines when not stripping lines
continue
if line.startswith('#'):
line = line[1:]
if not header:
header = line.strip().split('\t')
else:
comments.append(line)
else:
# Will add empty string to empty fields
tmp_line = map(strip_f, line.split('\t'))
if len(tmp_line) < len(header):
tmp_line.extend([''] * (len(header) - len(tmp_line)))
mapping_data.append(tmp_line)
if not header:
raise qdb.exceptions.QiitaDBError(
"No header line was found in mapping file.")
if not mapping_data:
raise qdb.exceptions.QiitaDBError(
"No data found in mapping file.")
return mapping_data, header, comments
def get_pgsql_reserved_words():
"""Returns a list of the current reserved words in pgsql
Returns
-------
set: str
The reserved words
"""
with qdb.sql_connection.TRN:
sql = "SELECT word FROM pg_get_keywords() WHERE catcode = 'R';"
qdb.sql_connection.TRN.add(sql)
return set(qdb.sql_connection.TRN.execute_fetchflatten())
| josenavas/QiiTa | qiita_db/metadata_template/util.py | Python | bsd-3-clause | 14,704 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
#============= standard library imports ========================
#============= local library imports ==========================
from setuptools import setup, find_packages
setup(
name = "ArArPy",
version = "0.1",
packages = ['ararpy'],
author = 'Jake Ross',
author_email = "jirhiker@nmt.edu",
description = "40Ar/39Ar geochronology package",
license = "Apache 2.0",
keywords = "argon geology science geochronology",
url = "https://github.com/NMGRL/ararpy", # project home page, if any
)
#============= EOF =============================================
| NMGRL/ararpy | setup.py | Python | apache-2.0 | 1,397 |
# coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
(
sh % "cat"
<< r"""
[extensions]
tweakdefaults=
rebase=
[experimental]
updatecheck=noconflict
"""
>> "$HGRCPATH"
)
sh % "setconfig 'ui.suggesthgprev=True'"
# Set up the repository.
sh % "hg init repo"
sh % "cd repo"
sh % "hg debugbuilddag -m '+4 *3 +1'"
sh % "hg log --graph -r '0::' -T '{rev}'" == r"""
o 5
│
o 4
│
│ o 3
│ │
│ o 2
├─╯
o 1
│
o 0"""
sh % "hg up 3" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# Make an uncommitted change.
sh % "echo foo" > "foo"
sh % "hg add foo"
sh % "hg st" == "A foo"
# Can always update to current commit.
sh % "hg up ." == "0 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# Abort with --check set, succeed with --merge
sh % "hg up 2 --check" == r"""
abort: uncommitted changes
[255]"""
sh % "hg up --merge 2" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# Updates to other branches should fail without --merge.
sh % "hg up 4 --check" == r"""
abort: uncommitted changes
[255]"""
sh % "hg up --merge 4" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
# Certain flags shouldn't work together.
sh % "hg up --check --merge 3" == r"""
abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
[255]"""
sh % "hg up --check --clean 3" == r"""
abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
[255]"""
sh % "hg up --clean --merge 3" == r"""
abort: can only specify one of -C/--clean, -c/--check, or -m/--merge
[255]"""
# --clean should work as expected.
sh % "hg st" == "A foo"
sh % "hg up --clean 3" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg st" == "? foo"
sh % "enable amend"
sh % "hg update '.^'" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
hint[update-prev]: use 'hg prev' to move to the parent changeset
hint[hint-ack]: use 'hg hint --ack update-prev' to silence these hints"""
| facebookexperimental/eden | eden/hg-server/tests/test-fb-hgext-tweakdefaults-update-t.py | Python | gpl-2.0 | 2,375 |
# Servo Control
import time
delay_period = 0.01
def set(property, value):
try:
f = open("/sys/class/rpi-pwm/pwm0/" + property, 'w')
f.write(value)
f.close()
except:
print("Error writing to: " + property + " value: " + value)
def set_servo(angle):
set("servo", str(angle))
def test_servo():
for angle in range(0, 180):
set_servo(angle)
time.sleep(delay_period)
for angle in range(0, 180):
set_servo(180 - angle)
time.sleep(delay_period)
set("delayed", "0")
set("mode", "servo")
set("servo_max", "180")
set("active", "1")
| erikulven/flapmagick | servo.py | Python | bsd-2-clause | 632 |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from .forms import CommentForm
from .models import Comment
@login_required #(login_url='/login/') #LOGIN_URL = '/login/'
def comment_delete(request, id):
#obj = get_object_or_404(Comment, id=id)
# obj = CommentFormmment.objects.get(id=id)
try:
obj = Comment.objects.get(id=id)
except:
raise Http404
if obj.user != request.user:
#messages.success(request, "You do not have permission to view this.")
#raise Http404
reponse = HttpResponse("You do not have permission to do this.")
reponse.status_code = 403
return reponse
#return render(request, "confirm_delete.html", context, status_code=403)
if request.method == "POST":
parent_obj_url = obj.content_object.get_absolute_url()
obj.delete()
messages.success(request, "This has been deleted.")
return HttpResponseRedirect(parent_obj_url)
context = {
"object": obj
}
return render(request, "confirm_delete.html", context)
def comment_thread(request, id):
#obj = Comment.objects.get(id=id)
try:
obj = Comment.objects.get(id=id)
except:
raise Http404
if not obj.is_parent:
obj = obj.parent
content_object = obj.content_object # Post that the comment is on
content_id = obj.content_object.id
initial_data = {
"content_type": obj.content_type,
"object_id": obj.object_id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid() and request.user.is_authenticated():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get('object_id')
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists() and parent_qs.count() == 1:
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user = request.user,
content_type= content_type,
object_id = obj_id,
content = content_data,
parent = parent_obj,
)
return HttpResponseRedirect(new_comment.content_object.get_absolute_url())
context = {
"comment": obj,
"form": form,
}
return render(request, "comment_thread.html", context) | rohitkyadav/blog-api | src/comments/views.py | Python | mit | 2,962 |
from tinymce.widgets import TinyMCE
from Nahuatilli import settings
class CustomTinyMCEWidget(TinyMCE):
def __init__(self, content_language=None, attrs=None, mce_attrs={}):
# here we add some configuration on the fly
# as you can see our button is called mybutton
#buttons_str = 'daybutton,monthbutton,yearbutton,separator,entitybutton,fieldbutton,separator,referencebutton,separator'
#buttons_str += "," + settings.TINYMCE_DEFAULT_CONFIG['theme_advanced_buttons1']
#settings.TINYMCE_DEFAULT_CONFIG['theme_advanced_buttons1'] = buttons_str
# then here I add a callback function for that setup named 'add_button_callback'
# I realized thet tinyMCE calls the function even when passed as a string, the problems was when
# passing an anonymous function as a string since it wasn't rendered as javascript so wasn't called
settings.TINYMCE_DEFAULT_CONFIG['setup'] = 'additional_setup'
super(CustomTinyMCEWidget, self).__init__(content_language, attrs, mce_attrs)
# class Media:
# try:
# # here we add our custom javascript which contains the mybutton setup callback function
# js = [
# 'js/extend_tinymce.js',
# ]
# except AttributeError:
# pass | acuriel/nahuatilli | contracts/widgets.py | Python | gpl-3.0 | 1,321 |
'''
Created by auto_sdk on 2015.06.23
'''
from aliyun.api.base import RestApi
class Rkvstore20150101FlushInstanceRequest(RestApi):
def __init__(self,domain='r-kvstore.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.InstanceId = None
def getapiname(self):
return 'r-kvstore.aliyuncs.com.FlushInstance.2015-01-01'
| francisar/rds_manager | aliyun/api/rest/Rkvstore20150101FlushInstanceRequest.py | Python | mit | 338 |
from ethereum import tester
from datetime import datetime, date
import math
import pytest
slow = pytest.mark.slow
from utilRelay import dblSha256Flip, disablePyethLogging
disablePyethLogging()
class TestBtcSpecialTx(object):
CONTRACT = 'example-btc-eth/btcSpecialTx.se'
CONTRACT_GAS = 55000
ETHER = 10 ** 18
def setup_class(cls):
tester.gas_limit = int(2e6)
cls.s = tester.state()
cls.c = cls.s.abi_contract(cls.CONTRACT, endowment=2000*cls.ETHER)
cls.snapshot = cls.s.snapshot()
cls.seed = tester.seed
def setup_method(self, method):
self.s.revert(self.snapshot)
tester.seed = self.seed
def testGetBytesLE(self):
assert self.c.getBytesLE('23'.decode('hex'), 0, 8) == [1, 0x23]
assert self.c.getBytesLE('45'.decode('hex'), 0, 8) == [1, 0x45]
assert self.c.getBytesLE('2345'.decode('hex'), 0, 16) == [2, 0x4523]
# assert self.c.getBytesLE('012345'.decode('hex'), 0, 24) == [3, 0x452301] bits 24 is not supported
assert self.c.getBytesLE('012345'.decode('hex'), 1, 16) == [2, 0x4523]
assert self.c.getBytesLE('01234567'.decode('hex'), 1, 16) == [2, 0x4523]
assert self.c.getBytesLE('01234567'.decode('hex'), 0, 32) == [4, 0x67452301]
assert self.c.getBytesLE('01234567'.decode('hex'), 2, 8) == [1, 0x45]
assert self.c.getBytesLE('01234567'.decode('hex'), 2, 16) == [2, 0x6745]
assert self.c.getBytesLE('0123456789abcdef'.decode('hex'), 0, 64) == [8, 0xefcdab8967452301]
assert self.c.getBytesLE('0123456789abcdef'.decode('hex'), 4, 32) == [4, 0xefcdab89]
def test_testnetTx(self):
# testnet tx a51a71f8094f9b4e266fcccd55068e809277ec79bfa44b7bdb8f1355e9bb8460
# tx[9] of block 350559
txStr = '010000000158115acce0e68bc58ecb89e6452380bd68da56dc0a163d9806c04b24dfefe269000000008a47304402207a0bf036d5c78d6910d608c47c9e59cbf5708df51fd22362051b8f1ecd9691d1022055ee6ace9f12f02720ce91f62916570dbd93b2aa1e91be7da8e5230f62606db7014104858527cb6bf730cbd1bcf636bc7e77bbaf0784b9428ec5cca2d8378a0adc75f5ca893d14d9db2034cbb7e637aacf28088a68db311ff6f1ebe6d00a62fed9951effffffff0210980200000000001976a914a0dc485fc3ade71be5e1b68397abded386c0adb788ac10270000000000001976a914d3193ccb3564d5425e4875fe763e26e2fce1fd3b88ac00000000'
res = self.c.getFirst2Outputs(txStr.decode('hex'))
assert res[0] == 170000
out1stScriptIndex = res[1]
assert out1stScriptIndex == 194
btcAddrIndex = out1stScriptIndex*2 + 6
assert txStr[btcAddrIndex:btcAddrIndex+40] == 'a0dc485fc3ade71be5e1b68397abded386c0adb7'
out2ndScriptIndex = res[2]
assert out2ndScriptIndex == 228
ethAddrIndex = out2ndScriptIndex*2 + 6
assert txStr[ethAddrIndex:ethAddrIndex+40] == 'd3193ccb3564d5425e4875fe763e26e2fce1fd3b'
| ethereum/btcrelay | example-btc-eth/test/test_btcSpecialTx.py | Python | mit | 2,850 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.api import base
from neutron.tests.tempest import config
from neutron.tests.tempest import test
from tempest_lib.common.utils import data_utils
CONF = config.CONF
class SharedNetworksTest(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
cls.shared_network = cls.create_shared_network()
@test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
shared_network = self.create_shared_network()
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
self.assertTrue(self.shared_network['shared'])
new_name = "New_shared_network"
body = self.admin_client.update_network(net_id, name=new_name,
admin_state_up=False,
shared=False)
updated_net = body['network']
self.assertEqual(new_name, updated_net['name'])
self.assertFalse(updated_net['shared'])
self.assertFalse(updated_net['admin_state_up'])
@test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
def test_create_port_shared_network_as_non_admin_tenant(self):
# create a port as non admin
body = self.client.create_port(network_id=self.shared_network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
self.assertNotEqual(self.shared_network['tenant_id'],
port['tenant_id'])
@test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
def test_create_bulk_shared_network(self):
# Creates 2 networks in one request
net_nm = [data_utils.rand_name('network'),
data_utils.rand_name('network')]
body = self.admin_client.create_bulk_network(net_nm, shared=True)
created_networks = body['networks']
for net in created_networks:
self.addCleanup(self.admin_client.delete_network, net['id'])
self.assertIsNotNone(net['id'])
self.assertTrue(net['shared'])
def _list_shared_networks(self, user):
body = user.list_networks(shared=True)
networks_list = [net['id'] for net in body['networks']]
self.assertIn(self.shared_network['id'], networks_list)
self.assertTrue(self.shared_network['shared'])
@test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28')
def test_list_shared_networks(self):
# List the shared networks and confirm that
# shared network extension attribute is returned for those networks
# that are created as shared
self._list_shared_networks(self.admin_client)
self._list_shared_networks(self.client)
def _show_shared_network(self, user):
body = user.show_network(self.shared_network['id'])
show_shared_net = body['network']
self.assertEqual(self.shared_network['name'], show_shared_net['name'])
self.assertEqual(self.shared_network['id'], show_shared_net['id'])
self.assertTrue(show_shared_net['shared'])
@test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58')
def test_show_shared_networks_attribute(self):
# Show a shared network and confirm that
# shared network extension attribute is returned.
self._show_shared_network(self.admin_client)
self._show_shared_network(self.client)
| pnavarro/neutron | neutron/tests/api/admin/test_shared_network_extension.py | Python | apache-2.0 | 4,322 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Tests corresponding to the DataflowRunner implementation of MetricsResult,
the DataflowMetrics class.
"""
from __future__ import absolute_import
import types
import unittest
from builtins import object
import mock
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metricbase import MetricName
from apache_beam.runners.dataflow import dataflow_metrics
class DictToObject(object):
"""Translate from a dict(list()) structure to an object structure"""
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
return DictToObject(value) if isinstance(value, dict) else value
class TestDataflowMetrics(unittest.TestCase):
ONLY_COUNTERS_LIST = {"metrics": [
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"},
{"key": "tentative",
"value": "true"}]
},
"name": "words",
"origin": "user"
},
"scalar": {"integer_value": 26185},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"}]
},
"name": "words",
"origin": "user"
},
"scalar": {"integer_value": 26181},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"},
{"key": "tentative",
"value": "true"}]
},
"name": "empty_lines",
"origin": "user"
},
"scalar": {"integer_value": 1080},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"}]
},
"name": "empty_lines",
"origin": "user"
},
"scalar": {"integer_value": 1080},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
]}
STRUCTURED_COUNTER_LIST = {"metrics": [
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"},
{"key": "tentative",
"value": "true"}]
},
"name": "word_lengths",
"origin": "user"
},
"scalar": {"integer_value": 109475},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"}]
},
"name": "word_lengths",
"origin": "user"
},
"scalar": {"integer_value": 109475},
"distribution": None,
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"},
{"key": "tentative",
"value": "true"}]
},
"name": "word_length_dist",
"origin": "user"
},
"scalar": None,
"distribution": {
"object_value": {
"properties": [
{"key": "min", "value":
{"integer_value": 2}},
{"key": "max", "value":
{"integer_value": 16}},
{"key": "count", "value":
{"integer_value": 2}},
{"key": "mean", "value":
{"integer_value": 9}},
{"key": "sum", "value":
{"integer_value": 18}},]
}
},
"updateTime": "2017-03-22T18:47:06.402Z"
},
{"name": {"context":
{"additionalProperties": [
{"key": "namespace",
"value": "__main__.WordExtractingDoFn"},
{"key": "step",
"value": "s2"}]
},
"name": "word_length_dist",
"origin": "user"
},
"scalar": None,
"distribution": {
"object_value": {
"properties": [
{"key": "min", "value":
{"integer_value": 2}},
{"key": "max", "value":
{"integer_value": 16}},
{"key": "count", "value":
{"integer_value": 2}},
{"key": "mean", "value":
{"integer_value": 9}},
{"key": "sum", "value":
{"integer_value": 18}},
]
}
},
"updateTime": "2017-03-22T18:47:06.402Z"
},
]}
def setup_mock_client_result(self, counter_list=None):
mock_client = mock.Mock()
mock_query_result = DictToObject(counter_list)
mock_client.get_job_metrics.return_value = mock_query_result
mock_job_result = mock.Mock()
mock_job_result.job_id.return_value = 1
mock_job_result.is_in_terminal_state.return_value = False
return mock_client, mock_job_result
def test_cache_functions(self):
mock_client, mock_job_result = self.setup_mock_client_result(
self.STRUCTURED_COUNTER_LIST)
dm = dataflow_metrics.DataflowMetrics(mock_client, mock_job_result)
# At first creation, we should always query dataflow.
self.assertTrue(dm._cached_metrics is None)
# Right after querying, we still query again.
dm.query()
self.assertTrue(dm._cached_metrics is None)
# The job has ended. The query should not run again after this.
mock_job_result.is_in_terminal_state.return_value = True
dm.query()
self.assertTrue(dm._cached_metrics)
def test_query_structured_metrics(self):
mock_client, mock_job_result = self.setup_mock_client_result(
self.STRUCTURED_COUNTER_LIST)
dm = dataflow_metrics.DataflowMetrics(mock_client, mock_job_result)
dm._translate_step_name = types.MethodType(lambda self, x: 'split', dm)
query_result = dm.query()
expected_counters = [
MetricResult(
MetricKey('split',
MetricName('__main__.WordExtractingDoFn',
'word_lengths')),
109475, 109475),
]
self.assertEqual(query_result['counters'], expected_counters)
expected_distributions = [
MetricResult(
MetricKey('split',
MetricName('__main__.WordExtractingDoFn',
'word_length_dist')),
DistributionResult(DistributionData(
18, 2, 2, 16)),
DistributionResult(DistributionData(
18, 2, 2, 16))),
]
self.assertEqual(query_result['distributions'], expected_distributions)
def test_query_counters(self):
mock_client, mock_job_result = self.setup_mock_client_result(
self.ONLY_COUNTERS_LIST)
dm = dataflow_metrics.DataflowMetrics(mock_client, mock_job_result)
dm._translate_step_name = types.MethodType(lambda self, x: 'split', dm)
query_result = dm.query()
expected_counters = [
MetricResult(
MetricKey('split',
MetricName('__main__.WordExtractingDoFn', 'empty_lines')),
1080, 1080),
MetricResult(
MetricKey('split',
MetricName('__main__.WordExtractingDoFn', 'words')),
26181, 26185),
]
self.assertEqual(sorted(query_result['counters'],
key=lambda x: x.key.metric.name),
sorted(expected_counters,
key=lambda x: x.key.metric.name))
if __name__ == '__main__':
unittest.main()
| rangadi/incubator-beam | sdks/python/apache_beam/runners/dataflow/dataflow_metrics_test.py | Python | apache-2.0 | 10,106 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from types import GeneratorType, ModuleType
from django.core.urlresolvers import reverse
from django.test import TestCase
from haystack.utils import app_loading
class AppLoadingTests(TestCase):
def test_load_apps(self):
apps = app_loading.haystack_load_apps()
self.assertIsInstance(apps, (list, GeneratorType))
self.assertIn('hierarchal_app_django', apps)
self.assertNotIn('test_app_without_models', apps,
msg='haystack_load_apps should exclude apps without defined models')
def test_get_app_modules(self):
app_modules = app_loading.haystack_get_app_modules()
self.assertIsInstance(app_modules, (list, GeneratorType))
for i in app_modules:
self.assertIsInstance(i, ModuleType)
def test_get_models_all(self):
models = app_loading.haystack_get_models('core')
self.assertIsInstance(models, (list, GeneratorType))
def test_get_models_specific(self):
from test_haystack.core.models import MockModel
models = app_loading.haystack_get_models('core.MockModel')
self.assertIsInstance(models, (list, GeneratorType))
self.assertListEqual(models, [MockModel])
def test_hierarchal_app_get_models(self):
models = app_loading.haystack_get_models('hierarchal_app_django')
self.assertIsInstance(models, (list, GeneratorType))
self.assertSetEqual(set(str(i._meta) for i in models),
set(('hierarchal_app_django.hierarchalappsecondmodel',
'hierarchal_app_django.hierarchalappmodel')))
def test_hierarchal_app_specific_model(self):
models = app_loading.haystack_get_models('hierarchal_app_django.HierarchalAppModel')
self.assertIsInstance(models, (list, GeneratorType))
self.assertSetEqual(set(str(i._meta) for i in models),
set(('hierarchal_app_django.hierarchalappmodel', )))
class AppWithoutModelsTests(TestCase):
# Confirm that everything works if an app is enabled
def test_simple_view(self):
url = reverse('app-without-models:simple-view')
resp = self.client.get(url)
self.assertEqual(resp.content.decode('utf-8'), 'OK')
| fisle/django-haystack | test_haystack/test_app_loading.py | Python | bsd-3-clause | 2,358 |
#!/usr/bin/env python3
import sys
import os
import functools
from xml.sax.saxutils import escape
import xml.etree.ElementTree as ET
# on msys, use crlf output
nl = None
if sys.platform == 'msys':
nl = "\r\n"
if len(sys.argv) <= 1:
print('Usage: python3 {} filename.ui [filename2.ui ...]'.format(sys.argv[0]))
sys.exit(0)
def sort_prop_key(a):
# declare all non-<property> to be equal, and also greater than to all <property>
# because sorted() is stable this means we only rearrange properties relative to each other
# and put them at the front, the rest are kept in-order as-is
if a.tag != "property":
return "z"
return "a" + a.get('name')
def sort_grid_key(a):
# same with non-items in a grid layout, but at the front (these are e.g. properties)
if a.tag != "item":
return "a"
return "z{:08}{:08}".format(int(a.get('row')), int(a.get('column')))
def canonicalise_ui(elem):
# sort properties in alphabetical order. Unclear if Qt creator
# has a fixed order for these, but it seems like it might?
#elem[:] = sorted(elem, key=sort_prop_key)
if elem.tag == "layout" and elem.get('class') == 'QGridLayout':
elem[:] = sorted(elem, key=sort_grid_key)
for e in elem:
canonicalise_ui(e)
def write_ui_xml(f, elem, indent):
f.write(' ' * indent)
f.write('<{}'.format(elem.tag))
for k,v in elem.items():
f.write(' {}="{}"'.format(k,v))
if elem.text or len(elem) > 0:
f.write('>')
if elem.text and len(elem.text.strip()) > 0:
f.write(escape(elem.text).replace('"', '"'))
if len(elem) > 0:
f.write('\n')
for e in elem:
iconset_tail = write_ui_xml(f, e, indent+1)
if not iconset_tail:
f.write(' ' * indent)
f.write('</{}>'.format(elem.tag))
# hack for weird iconset formatting
if elem.tail is None or len(elem.tail.strip()) == 0:
f.write('\n')
else:
f.write(elem.tail.strip())
return True
else:
f.write('/>\n'.format(elem.tag))
return False
for filename in sys.argv[1:]:
print("Formatting {}...".format(filename))
uifile = os.path.abspath(filename)
ui = ET.parse(uifile)
canonicalise_ui(ui.getroot())
with open(uifile, mode='w', newline=nl, encoding='utf-8') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
write_ui_xml(f, ui.getroot(), 0)
| Zorro666/renderdoc | qrenderdoc/sort_qt_ui.py | Python | mit | 2,505 |
# Required imports to run this file
import matplotlib.pyplot as plt
import numpy as np
# weighted matrix
def weighted_matrix(point: np.mat, training_data_x: np.mat, bandwidth: float) -> np.mat:
"""
Calculate the weight for every point in the
data set. It takes training_point , query_point, and tau
Here Tau is not a fixed value it can be varied depends on output.
tau --> bandwidth
xmat -->Training data
point --> the x where we want to make predictions
>>> weighted_matrix(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68],
... [24.59,25.69]]), 0.6)
matrix([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000],
[0.00000000e+000, 0.00000000e+000, 0.00000000e+000],
[0.00000000e+000, 0.00000000e+000, 0.00000000e+000]])
"""
# m is the number of training samples
m, n = np.shape(training_data_x)
# Initializing weights as identity matrix
weights = np.mat(np.eye(m))
# calculating weights for all training examples [x(i)'s]
for j in range(m):
diff = point - training_data_x[j]
weights[j, j] = np.exp(diff * diff.T / (-2.0 * bandwidth**2))
return weights
def local_weight(
point: np.mat, training_data_x: np.mat, training_data_y: np.mat, bandwidth: float
) -> np.mat:
"""
Calculate the local weights using the weight_matrix function on training data.
Return the weighted matrix.
>>> local_weight(np.array([1., 1.]),np.mat([[16.99, 10.34], [21.01,23.68],
... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6)
matrix([[0.00873174],
[0.08272556]])
"""
weight = weighted_matrix(point, training_data_x, bandwidth)
W = (training_data_x.T * (weight * training_data_x)).I * (
training_data_x.T * weight * training_data_y.T
)
return W
def local_weight_regression(
training_data_x: np.mat, training_data_y: np.mat, bandwidth: float
) -> np.mat:
"""
Calculate predictions for each data point on axis.
>>> local_weight_regression(np.mat([[16.99, 10.34], [21.01,23.68],
... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6)
array([1.07173261, 1.65970737, 3.50160179])
"""
m, n = np.shape(training_data_x)
ypred = np.zeros(m)
for i, item in enumerate(training_data_x):
ypred[i] = item * local_weight(
item, training_data_x, training_data_y, bandwidth
)
return ypred
def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat:
"""
Function used for loading data from the seaborn splitting into x and y points
>>> pass # this function has no doctest
"""
import seaborn as sns
data = sns.load_dataset(dataset_name)
col_a = np.array(data[cola_name]) # total_bill
col_b = np.array(data[colb_name]) # tip
mcol_a = np.mat(col_a)
mcol_b = np.mat(col_b)
m = np.shape(mcol_b)[1]
one = np.ones((1, m), dtype=int)
# horizontal stacking
training_data_x = np.hstack((one.T, mcol_a.T))
return training_data_x, mcol_b, col_a, col_b
def get_preds(training_data_x: np.mat, mcol_b: np.mat, tau: float) -> np.ndarray:
"""
Get predictions with minimum error for each training data
>>> get_preds(np.mat([[16.99, 10.34], [21.01,23.68],
... [24.59,25.69]]),np.mat([[1.01, 1.66, 3.5]]), 0.6)
array([1.07173261, 1.65970737, 3.50160179])
"""
ypred = local_weight_regression(training_data_x, mcol_b, tau)
return ypred
def plot_preds(
training_data_x: np.mat,
predictions: np.ndarray,
col_x: np.ndarray,
col_y: np.ndarray,
cola_name: str,
colb_name: str,
) -> plt.plot:
"""
This function used to plot predictions and display the graph
>>> pass #this function has no doctest
"""
xsort = training_data_x.copy()
xsort.sort(axis=0)
plt.scatter(col_x, col_y, color="blue")
plt.plot(
xsort[:, 1],
predictions[training_data_x[:, 1].argsort(0)],
color="yellow",
linewidth=5,
)
plt.title("Local Weighted Regression")
plt.xlabel(cola_name)
plt.ylabel(colb_name)
plt.show()
if __name__ == "__main__":
training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip")
predictions = get_preds(training_data_x, mcol_b, 0.5)
plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip")
| TheAlgorithms/Python | machine_learning/local_weighted_learning/local_weighted_learning.py | Python | mit | 4,446 |
"""add max_nb_crowfly_by_mode, this parameter is used by distributed scenario
Revision ID: 483639f1f00
Revises: 4cd2ff722a7c
Create Date: 2018-08-21 11:48:08.941764
"""
# revision identifiers, used by Alembic.
revision = '483639f1f00'
down_revision = '4cd2ff722a7c'
from alembic import op
import sqlalchemy as sa
def upgrade():
import json
op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', sa.PickleType(pickler=json), nullable=True))
def downgrade():
op.drop_column('instance', 'max_nb_crowfly_by_mode')
| kinnou02/navitia | source/tyr/migrations/versions/483639f1f00_add_max_nb_crowfly_by_mode.py | Python | agpl-3.0 | 537 |
"""Configuration for imitation.scripts.train_adversarial."""
import os
import sacred
from imitation.policies import base
from imitation.scripts.config.common import DEFAULT_INIT_RL_KWARGS
from imitation.util import util
train_adversarial_ex = sacred.Experiment("train_adversarial", interactive=True)
@train_adversarial_ex.config
def train_defaults():
env_name = "CartPole-v1" # environment to train on
total_timesteps = 1e5 # Num of environment transitions to sample
algorithm = "gail" # Either "airl" or "gail"
n_expert_demos = None # Num demos used. None uses every demo possible
n_episodes_eval = 50 # Num of episodes for final mean ground truth return
# Number of environments in VecEnv, must evenly divide gen_batch_size
num_vec = 8
# Use SubprocVecEnv rather than DummyVecEnv (generally faster if num_vec>1)
parallel = True
max_episode_steps = None # Set to positive int to limit episode horizons
# Kwargs for initializing GAIL and AIRL
algorithm_kwargs = dict(
shared=dict(
expert_batch_size=1024, # Number of expert samples per discriminator update
# Number of discriminator updates after each round of generator updates
n_disc_updates_per_round=4,
),
airl={},
gail={},
)
# Kwargs for initializing {GAIL,AIRL}DiscrimNet
discrim_net_kwargs = dict(shared={}, airl={}, gail={})
# Modifies the __init__ arguments for the imitation policy
init_rl_kwargs = dict(
policy_class=base.FeedForward32Policy,
**DEFAULT_INIT_RL_KWARGS,
)
gen_batch_size = 2048 # Batch size for generator updates
log_root = os.path.join("output", "train_adversarial") # output directory
checkpoint_interval = 0 # Num epochs between checkpoints (<0 disables)
init_tensorboard = False # If True, then write Tensorboard logs
rollout_hint = None # Used to generate default rollout_path
data_dir = "data/" # Default data directory
@train_adversarial_ex.config
def aliases_default_gen_batch_size(algorithm_kwargs, gen_batch_size):
# Setting generator buffer capacity and discriminator batch size to
# the same number is equivalent to not using a replay buffer at all.
# "Disabling" the replay buffer seems to improve convergence speed, but may
# come at a cost of stability.
algorithm_kwargs["shared"]["gen_replay_buffer_capacity"] = gen_batch_size
@train_adversarial_ex.config
def calc_n_steps(num_vec, gen_batch_size):
init_rl_kwargs = dict(n_steps=gen_batch_size // num_vec)
@train_adversarial_ex.config
def paths(env_name, log_root, rollout_hint, data_dir):
log_dir = os.path.join(
log_root, env_name.replace("/", "_"), util.make_unique_timestamp()
)
# Recommended that user sets rollout_path manually.
# By default we guess the named config associated with `env_name`
# and attempt to load rollouts from `data/expert_models/`.
if rollout_hint is None:
rollout_hint = env_name.split("-")[0].lower()
rollout_path = os.path.join(
data_dir, "expert_models", f"{rollout_hint}_0", "rollouts", "final.pkl"
)
# Training algorithm named configs
@train_adversarial_ex.named_config
def gail():
"""Quick alias for algorithm=gail"""
algorithm = "gail"
@train_adversarial_ex.named_config
def airl():
"""Quick alias for algorithm=airl"""
algorithm = "airl"
# Shared settings
MUJOCO_SHARED_LOCALS = dict(discrim_net_kwargs=dict(airl=dict(entropy_weight=0.1)))
ANT_SHARED_LOCALS = dict(
total_timesteps=3e7,
max_episode_steps=500, # To match `inverse_rl` settings.
algorithm_kwargs=dict(shared=dict(expert_batch_size=8192)),
gen_batch_size=16384,
)
# Classic RL Gym environment named configs
@train_adversarial_ex.named_config
def acrobot():
env_name = "Acrobot-v1"
rollout_hint = "acrobot"
@train_adversarial_ex.named_config
def cartpole():
env_name = "CartPole-v1"
rollout_hint = "cartpole"
discrim_net_kwargs = {"gail": {"normalize_images": False}}
@train_adversarial_ex.named_config
def seals_cartpole():
env_name = "seals/CartPole-v0"
# seals and vanilla CartPole have the same expert trajectories.
rollout_hint = "cartpole"
discrim_net_kwargs = {"gail": {"normalize_images": False}}
@train_adversarial_ex.named_config
def mountain_car():
env_name = "MountainCar-v0"
rollout_hint = "mountain_car"
@train_adversarial_ex.named_config
def seals_mountain_car():
env_name = "seals/MountainCar-v0"
rollout_hint = "mountain_car" # TODO(shwang): Use seals/MountainCar-v0 rollouts.
@train_adversarial_ex.named_config
def pendulum():
env_name = "Pendulum-v0"
rollout_hint = "pendulum"
# Standard MuJoCo Gym environment named configs
@train_adversarial_ex.named_config
def ant():
locals().update(**MUJOCO_SHARED_LOCALS)
locals().update(**ANT_SHARED_LOCALS)
env_name = "Ant-v2"
rollout_hint = "ant"
@train_adversarial_ex.named_config
def half_cheetah():
locals().update(**MUJOCO_SHARED_LOCALS)
env_name = "HalfCheetah-v2"
rollout_hint = "half_cheetah"
total_timesteps = 2e6
@train_adversarial_ex.named_config
def hopper():
locals().update(**MUJOCO_SHARED_LOCALS)
# TODO(adam): upgrade to Hopper-v3?
env_name = "Hopper-v2"
rollout_hint = "hopper"
@train_adversarial_ex.named_config
def humanoid():
locals().update(**MUJOCO_SHARED_LOCALS)
env_name = "Humanoid-v2"
rollout_hint = "humanoid"
total_timesteps = 4e6
@train_adversarial_ex.named_config
def reacher():
env_name = "Reacher-v2"
rollout_hint = "reacher"
@train_adversarial_ex.named_config
def swimmer():
locals().update(**MUJOCO_SHARED_LOCALS)
env_name = "Swimmer-v2"
rollout_hint = "swimmer"
total_timesteps = 2e6
@train_adversarial_ex.named_config
def walker():
locals().update(**MUJOCO_SHARED_LOCALS)
env_name = "Walker2d-v2"
rollout_hint = "walker"
# Custom Gym environment named configs
@train_adversarial_ex.named_config
def two_d_maze():
locals().update(**MUJOCO_SHARED_LOCALS)
env_name = "imitation/TwoDMaze-v0"
rollout_hint = "two_d_maze"
@train_adversarial_ex.named_config
def custom_ant():
locals().update(**MUJOCO_SHARED_LOCALS)
# Watch out -- ANT_SHARED_LOCALS could erroneously erase nested dict keys from
# MUJOCO_SHARED_LOCALS because `locals().update()` doesn't merge dicts
# "Sacred-style".
locals().update(**ANT_SHARED_LOCALS)
env_name = "imitation/CustomAnt-v0"
rollout_hint = "custom_ant"
@train_adversarial_ex.named_config
def disabled_ant():
locals().update(**MUJOCO_SHARED_LOCALS)
locals().update(**ANT_SHARED_LOCALS)
env_name = "imitation/DisabledAnt-v0"
rollout_hint = "disabled_ant"
# Debug configs
@train_adversarial_ex.named_config
def fast():
"""Minimize the amount of computation.
Useful for test cases.
"""
# Need a minimum of 10 total_timesteps for adversarial training code to pass
# "any update happened" assertion inside training loop.
total_timesteps = 10
n_expert_demos = 1
n_episodes_eval = 1
algorithm_kwargs = dict(
shared=dict(
expert_batch_size=1,
n_disc_updates_per_round=4,
)
)
gen_batch_size = 2
parallel = False # easier to debug with everything in one process
max_episode_steps = 5
# SB3 RL seems to need batch size of 2, otherwise it runs into numeric
# issues when computing multinomial distribution during predict()
num_vec = 2
init_rl_kwargs = dict(batch_size=2)
| humancompatibleai/imitation | src/imitation/scripts/config/train_adversarial.py | Python | mit | 7,627 |
import numpy as np
import reaclib
ihe4 = 0
ic12 = 1
nnuc = 2
A = np.zeros((nnuc), dtype=np.int32)
A[ihe4] = 4
A[ic12] = 12
def c12_gaa_he4(tf):
# c12 --> he4 + he4 + he4
rate = 0.0
# fy05rv
rate += np.exp( 34.9561 + -85.4472*tf.T9i + -23.57*tf.T913i + 20.4886*tf.T913
+ -12.9882*tf.T9 + -20.0*tf.T953 + 0.83333*tf.lnT9)
# fy05nv
rate += np.exp( 45.7734 + -84.4227*tf.T9i + -37.06*tf.T913i + 29.3493*tf.T913
+ -115.507*tf.T9 + -10.0*tf.T953 + 1.66667*tf.lnT9)
# fy05rv
rate += np.exp( 22.394 + -88.5493*tf.T9i + -13.49*tf.T913i + 21.4259*tf.T913
+ -1.34769*tf.T9 + 0.0879816*tf.T953 + -10.1653*tf.lnT9)
return rate
def he4_aag_c12(tf):
# he4 + he4 + he4 --> c12
rate = 0.0
# fy05n
rate += np.exp( -0.971052 + -37.06*tf.T913i + 29.3493*tf.T913
+ -115.507*tf.T9 + -10.0*tf.T953 + -1.33333*tf.lnT9)
# fy05r
rate += np.exp( -24.3505 + -4.12656*tf.T9i + -13.49*tf.T913i + 21.4259*tf.T913
+ -1.34769*tf.T9 + 0.0879816*tf.T953 + -13.1653*tf.lnT9)
# fy05r
rate += np.exp( -11.7884 + -1.02446*tf.T9i + -23.57*tf.T913i + 20.4886*tf.T913
+ -12.9882*tf.T9 + -20.0*tf.T953 + -2.16667*tf.lnT9)
return rate
def rhs(t, Y, rho, T):
tf = reaclib.Tfactors(T)
lambda_c12_gaa_he4 = c12_gaa_he4(tf)
lambda_he4_aag_c12 = he4_aag_c12(tf)
dYdt = np.zeros((nnuc), dtype=np.float64)
dYdt[ihe4] = (
-3*0.166666666667*rho**2*Y[ihe4]**3*lambda_he4_aag_c12
+3*Y[ic12]*lambda_c12_gaa_he4
)
dYdt[ic12] = (
-Y[ic12]*lambda_c12_gaa_he4
+0.166666666667*rho**2*Y[ihe4]**3*lambda_he4_aag_c12
)
return dYdt
| zingale/pyreaclib | test/standard/triple-alpha/triple-alpha_rhs.py | Python | bsd-3-clause | 1,755 |
__version__ = "2.4.2"
| explosion/srsly | srsly/about.py | Python | mit | 22 |
# Time: O(n)
# Space: O(10) = O(1)
# You are playing the following Bulls and Cows game with your friend:
# You write a 4-digit secret number and ask your friend to guess it,
# each time your friend guesses a number, you give a hint, the hint
# tells your friend how many digits are in the correct positions
# (called "bulls") and how many digits are in the wrong positions
# (called "cows"), your friend will use those hints to find out the
# secret number.
#
# For example:
#
# Secret number: 1807
# Friend's guess: 7810
# Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.)
# According to Wikipedia: "Bulls and Cows (also known as Cows and Bulls
# or Pigs and Bulls or Bulls and Cleots) is an old code-breaking mind or
# paper and pencil game for two or more players, predating the similar
# commercially marketed board game Mastermind. The numerical version of
# the game is usually played with 4 digits, but can also be played with
# 3 or any other number of digits."
#
# Write a function to return a hint according to the secret number and
# friend's guess, use A to indicate the bulls and B to indicate the cows,
# in the above example, your function should return 1A3B.
#
# You may assume that the secret number and your friend's guess only contain
# digits, and their lengths are always equal.
#
import operator
# One pass solution.
from collections import defaultdict, Counter
from itertools import izip, imap
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
A, B = 0, 0
s_lookup, g_lookup = defaultdict(int), defaultdict(int)
for s, g in izip(secret, guess):
if s == g:
A += 1
else:
if s_lookup[g]:
s_lookup[g] -= 1
B += 1
else:
g_lookup[g] += 1
if g_lookup[s]:
g_lookup[s] -= 1
B += 1
else:
s_lookup[s] += 1
return "%dA%dB" % (A, B)
# Two pass solution.
class Solution2(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
A = sum(imap(operator.eq, secret, guess))
B = sum((Counter(secret) & Counter(guess)).values()) - A
return "%dA%dB" % (A, B)
| kamyu104/LeetCode | Python/bulls-and-cows.py | Python | mit | 2,461 |
from serverdensity.wrapper.jsonobject import JsonObject
class ServiceStatus(JsonObject):
PATHS = {
'overall': '/service-monitor/meta/{}',
'location': '/service-monitor/last/{}'
}
def _validation(self, data):
"""Service Status has no post endpoints and need no validation"""
pass
def overall(self, _id, **kwargs):
return self.__class__(self.api.get(url=self.PATHS['overall'].format(_id), **kwargs))
def location(self, _id, **kwargs):
result = self.api.get(url=self.PATHS['location'].format(_id), **kwargs)
return [self.__class__(item) for item in result]
| serverdensity/sd-python-wrapper | serverdensity/wrapper/service_status.py | Python | mit | 636 |
# Copyright 2008 Divmod, Inc. See LICENSE file for details
"""
Tests for L{xmantissa.people}.
"""
from __future__ import division
import warnings
from string import lowercase
from twisted.python.reflect import qual
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from formless import nameToLabel
from nevow.tags import div, slot
from nevow.flat import flatten
from nevow.athena import expose, LiveElement
from nevow.page import renderer, Element
from nevow.testutil import FakeRequest
from nevow.taglibrary import tabbedPane
from nevow import context
from epsilon import extime
from epsilon.extime import Time
from epsilon.structlike import record
from epsilon.hotfix import require
require('twisted', 'trial_assertwarns')
from axiom.store import Store, AtomicFile
from axiom.dependency import installOn
from axiom.item import Item
from axiom.attributes import text, AND
from axiom.errors import DeletionDisallowed
from axiom import tags
from axiom.userbase import LoginSystem
from axiom.plugins.axiom_plugins import Create
from axiom.plugins.mantissacmd import Mantissa
from xmantissa.test.rendertools import renderLiveFragment, TagTestingMixin
from xmantissa.scrolltable import UnsortableColumn, ScrollingElement
from xmantissa.offering import installOffering
from xmantissa import people
from xmantissa.people import (
Organizer, Person, EmailAddress, AddPersonFragment, ImportPeopleWidget,
Mugshot, PersonDetailFragment, PhoneNumber, PhoneNumberContactType,
ReadOnlyPhoneNumberView, PersonScrollingFragment, OrganizerFragment,
EditPersonView, BaseContactType, EmailContactType, _normalizeWhitespace,
PostalAddress, PostalContactType, VIPPersonContactType, _PersonVIPStatus,
getPersonURL, _stringifyKeys, makeThumbnail, _descriptiveIdentifier,
ReadOnlyContactInfoView, PersonSummaryView, MugshotUploadForm,
ORGANIZER_VIEW_STATES, MugshotResource, Notes, NotesContactType,
ContactGroup, AllPeopleFilter, VIPPeopleFilter, TaggedPeopleFilter,
MugshotURLColumn, _objectToName, ContactInfoOrganizerPlugin,
PersonPluginView, _ElementWrapper, _organizerPluginName,
SimpleReadOnlyView)
from xmantissa.webapp import PrivateApplication
from xmantissa.liveform import (
TEXT_INPUT, InputError, Parameter, LiveForm, ListChangeParameter,
ListChanges, CreateObject, EditObject, FormParameter, ChoiceParameter,
TEXTAREA_INPUT)
from xmantissa.ixmantissa import (
IOrganizerPlugin, IContactType, IWebTranslator, IPeopleFilter, IColumn)
from xmantissa.signup import UserInfo
from xmantissa.test.peopleutil import (
PeopleFilterTestMixin, StubContactType, StubOrganizerPlugin,
StubOrganizer, StubPerson, StubTranslator)
from xmantissa.plugins.baseoff import baseOffering
# the number of non-plugin IContactType implementations provided by Mantissa.
builtinContactTypeCount = 5
# the number of non-plugin IPeopleFilter implementations provided by Mantissa
builtinPeopleFilterCount = 2
class AllPeopleFilterTests(PeopleFilterTestMixin, unittest.TestCase):
"""
Tests for L{AllPeopleFilter}.
"""
peopleFilterClass = AllPeopleFilter
peopleFilterName = 'All'
def test_queryComparison(self):
"""
L{AllPeopleFilter}'s query comparison should include all people.
"""
self.assertIdentical(
self.peopleFilterClass().getPeopleQueryComparison(Store()),
None)
class VIPPeopleFilterTests(PeopleFilterTestMixin, unittest.TestCase):
"""
Tests for L{VIPPeopleFilter}.
"""
peopleFilterClass = VIPPeopleFilter
peopleFilterName = 'VIP'
def test_queryComparison(self):
"""
L{VIPPeopleFilter}'s query comparison should include only VIP people.
"""
self.assertComparisonEquals(Person.vip == True)
class TaggedPeopleFilterTests(unittest.TestCase):
"""
Tests for L{TaggedPeopleFilter}.
"""
# this TestCase doesn't inherit from PeopleFilterTestMixin because of the
# constructor argument and more complicated query.
def test_implementsInterface(self):
"""
L{TaggedPeopleFilter} should provide L{IPeopleFilter}.
"""
self.assertTrue(
IPeopleFilter.providedBy(TaggedPeopleFilter(u'tag')))
def test_filterName(self):
"""
Our L{TaggedPeopleFilter}'s I{filterName} should be the tag passed to
its constructor.
"""
self.assertEqual(
TaggedPeopleFilter(u'test_filterName').filterName,
u'test_filterName')
def test_queryComparison(self):
"""
L{TaggedPeopleFilter}'s query comparison should include only people
who have had a certain tag applied to them.
"""
actualComparison = TaggedPeopleFilter(
u'test_queryOrdering').getPeopleQueryComparison(Store())
expectedComparison = AND(
tags.Tag.object == Person.storeID,
tags.Tag.name == u'test_queryOrdering')
# none of the Axiom query objects have meaningful equality
# comparisons, but their string representations are just as good to
# compare.
self.assertEqual(
str(actualComparison), str(expectedComparison))
def emptyMantissaSiteStore():
"""
Create and return a site store with the base mantissa offering installed
on it.
"""
site = Store()
installOffering(site, baseOffering, None)
return site
def emptyMantissaUserStore():
"""
Create a site store with the base mantissa offering installed on it and
return an empty store which has that as its parent.
"""
site = emptyMantissaSiteStore()
user = Store()
user.parent = site
return user
class PeopleUtilitiesTestCase(unittest.TestCase):
"""
Tests for module-level utility functions in L{xmantissa.people}.
"""
def test_stringifyKeys(self):
"""
Verify that L{_stringifyKeys} returns a dictionary which is the same
as the input except for having C{str} keys.
"""
input = {u'a': u'b', u'b': u'c'}
output = _stringifyKeys(input)
self.assertEqual(len(output), 2)
keys = output.keys()
self.failUnless(isinstance(keys[0], str))
self.failUnless(isinstance(keys[1], str))
self.assertEqual(sorted(keys), ['a', 'b'])
self.failUnless(isinstance(output['a'], unicode))
self.failUnless(isinstance(output['b'], unicode))
self.assertEqual(output['a'], u'b')
self.assertEqual(output['b'], u'c')
def _makeThumbnailPairs(self, inputSizes, outputSize):
"""
Generate a collection of L{makeThumbnail} input/output image pairs in
various formats, for the given input sizes.
"""
try:
from PIL import Image
except ImportError:
raise unittest.SkipTest('PIL is not available')
formatsToModes = {
'JPEG': ['L', 'RGB'],
'PNG': ['1', 'L', 'P', 'RGB', 'RGBA'],
}
modesToWhite = {
'1': 1,
'L': 0xFF,
'P': 0xFF,
'RGB': (0xFF, 0xFF, 0xFF),
'RGBA': (0xFF, 0xFF, 0xFF, 0xFF),
}
for format in formatsToModes:
for mode in formatsToModes[format]:
for inputSize in inputSizes:
cause = ('Image.new(%r, %r) via %s'
% (mode, inputSize, format))
(inFile, outFile) = (self.mktemp(), self.mktemp())
# Input image...
image = Image.new(mode, inputSize)
# Plot pixels along the diagonal to provoke aliasing.
for i in xrange(min(inputSize)):
image.putpixel((i, i), modesToWhite[mode])
image.save(file(inFile, 'w'), format)
self.assertEqual(Image.open(inFile).mode, mode, cause)
untouchedInput = file(inFile).read()
# Output image...
makeThumbnail(file(inFile), file(outFile, 'w'),
outputSize, format)
self.assertEqual(file(inFile).read(), untouchedInput, cause)
yield (Image.open(inFile), Image.open(outFile), cause)
def test_makeThumbnail(self):
"""
L{makeThumbnail} should scale images, preserving their aspect ratio, and
expanding their color space if necessary.
"""
sizes = [(x, y) for x in [30, 60, 120]
for y in [30, 60, 120]
if 60 < max(x, y)]
for (input, output, cause) in self._makeThumbnailPairs(sizes, 60):
(x1, y1) = input.size
(x2, y2) = output.size
self.assertEquals(max(x2, y2), 60, cause)
self.assertEquals(x2/y2, x1/y1, cause)
expectedMode = {'1': 'L', 'P': 'RGB'}.get(input.mode, input.mode)
self.assertEquals(output.mode, expectedMode, cause)
self.assertEquals(output.format, input.format, cause)
def test_makeThumbnailNoResize(self):
"""
L{makeThumbnail} should leave images under thumbnail size unchanged.
"""
sizes = [(x, y) for x in [30, 60]
for y in [30, 60]]
for (input, output, cause) in self._makeThumbnailPairs(sizes, 60):
self.assertEquals(output.size, input.size, cause)
self.assertEquals(output.mode, input.mode, cause)
self.assertEquals(output.format, input.format, cause)
def test_objectToName(self):
"""
L{_objectToName} should be able to figure out a helpful name more
readable than the class name of an object.
"""
class MyNeatClass:
pass
self.assertEqual(_objectToName(MyNeatClass()), u'My Neat Class')
def test_objectToNameObject(self):
"""
Similar to L{test_objectToName}, but for classes derived from
C{object}.
"""
class MyNeatClass(object):
pass
self.assertEqual(_objectToName(MyNeatClass()), u'My Neat Class')
def test_descriptiveIdentifier(self):
"""
Verify that L{_descriptiveIdentifier} returns the result of the
C{descriptiveIdentifier} method if its passed an object that defines
one.
"""
identifier = u'lol identifier'
class MyContactType:
def descriptiveIdentifier(self):
return identifier
self.assertEqual(
_descriptiveIdentifier(MyContactType()), identifier)
def test_noDescriptiveIdentifier(self):
"""
Verify that L{_descriptiveIdentifier} returns a sensible identifier
based on the class name of the object it is passed, and issues a
warning, if the object doesn't implement C{descriptiveIdentifier}.
"""
class MyContactType:
pass
self.assertEqual(
_descriptiveIdentifier(MyContactType()),
_objectToName(MyContactType()))
self.assertWarns(
PendingDeprecationWarning,
"IContactType now has the 'descriptiveIdentifier'"
" method, xmantissa.test.test_people.MyContactType"
" did not implement it",
people.__file__,
lambda: _descriptiveIdentifier(MyContactType()))
def test_organizerPluginName(self):
"""
L{_organizerPluginName} should return the value of the plugin's
I{name} attribute if it is set.
"""
_name = u'organizer plugin name!'
class OrganizerPlugin:
name = _name
self.assertEqual(_organizerPluginName(OrganizerPlugin()), _name)
def test_noOrganizerPluginName(self):
"""
L{_organizerPluginName} should figure out a reasonable default, and
issue a warning if the given plugin doesn't define a I{name}
attribute.
"""
class NoOrganizerPluginName:
pass
self.assertEqual(
_organizerPluginName(NoOrganizerPluginName()),
_objectToName(NoOrganizerPluginName()))
self.assertWarns(
PendingDeprecationWarning,
"IOrganizerPlugin now has the 'name' attribute and"
" xmantissa.test.test_people.NoOrganizerPluginName"
" does not define it",
people.__file__,
lambda: _organizerPluginName(NoOrganizerPluginName()))
class MugshotUploadFormTestCase(unittest.TestCase):
"""
Tests for L{MugshotUploadForm}.
"""
def setUp(self):
"""
Construct a L{Person}, suitable for passing to L{MugshotUploadForm}'s
constructor.
"""
user = emptyMantissaUserStore()
# can't use mock objects because we need ITemplateNameResolver to
# render MugshotUploadForm
self.organizer = Organizer(store=user)
installOn(self.organizer, user)
self.person = Person(store=user, organizer=self.organizer)
def test_callback(self):
"""
Verify that L{MugshotUploadForm} calls the supplied callback after a
successful POST.
"""
cbGotMugshotArgs = []
def cbGotMugshot(contentType, file):
cbGotMugshotArgs.append((contentType, file))
form = MugshotUploadForm(self.person, cbGotMugshot)
theContentType = 'image/tiff'
theFile = object()
class FakeUploadField:
type = theContentType
file = theFile
request = FakeRequest()
request.method = 'POST'
request.fields = {'uploaddata': FakeUploadField}
ctx = context.PageContext(
tag=form, parent=context.RequestContext(
tag=request))
form.renderHTTP(ctx)
self.assertEqual(
cbGotMugshotArgs,
[(u'image/tiff', theFile)])
def test_smallerMugshotURL(self):
"""
L{MugshotUploadForm.render_smallerMugshotURL} should return the
correct URL.
"""
form = MugshotUploadForm(self.person, None)
self.assertEqual(
form.render_smallerMugshotURL(None, None),
self.organizer.linkToPerson(self.person) + '/mugshot/smaller')
class MugshotTestCase(unittest.TestCase):
"""
Tests for L{Mugshot}.
"""
def _doFromFileTest(self, store, person):
"""
Verify that the L{Mugshot} returned from L{Mugshot.fromFile} has the
correct attribute values.
"""
newBody = store.newFilePath('newBody')
newSmallerBody = store.newFilePath('newSmallerBody')
newFormat = u'TIFF'
def _makeThumbnail(cls, inputFile, person, format, smaller):
if smaller:
return newSmallerBody
return newBody
originalMakeThumbnail = Mugshot.makeThumbnail
try:
Mugshot.makeThumbnail = classmethod(_makeThumbnail)
mugshot = Mugshot.fromFile(
person, file(self.mktemp(), 'w'), newFormat)
finally:
Mugshot.makeThumbnail = originalMakeThumbnail
# and no others should have been created
self.assertEqual(store.count(Mugshot), 1)
# the item should have been updated with the paths returned from our
# fake Mugshot.makeThumbnail()
self.assertEqual(mugshot.body, newBody)
self.assertEqual(mugshot.smallerBody, newSmallerBody)
# the 'person' attribute should be unchanged
self.assertIdentical(mugshot.person, person)
# the format attribute should be updated
self.assertEqual(mugshot.type, u'image/' + newFormat)
return mugshot
def test_fromFileExistingMugshot(self):
"""
Verify that L{Mugshot.fromFile} will update the attributes on an
existing L{Mugshot} item for the given person, if one exists.
"""
store = Store(filesdir=self.mktemp())
person = Person(store=store)
mugshot = Mugshot(
store=store,
type=u'JPEG',
body=store.newFilePath('body'),
smallerBody=store.newFilePath('smallerBody'),
person=person)
self.assertIdentical(
self._doFromFileTest(store, person),
mugshot)
def test_fromFileNoMugshot(self):
"""
Verify that L{Mugshot.fromFile} creates a new L{Mugshot} for the given
person, if one does not exist.
"""
store = Store(filesdir=self.mktemp())
person = Person(store=store)
self._doFromFileTest(store, person)
def _doMakeThumbnailTest(self, smaller):
"""
Verify that L{Mugshot.makeThumbnail} passes the correct arguments to
L{makeThumbnail}, when passed the given value for the C{smaller}
argument.
"""
makeThumbnailCalls = []
def _makeThumbnail(
inputFile, outputFile, thumbnailSize, outputFormat='jpeg'):
makeThumbnailCalls.append((
inputFile, outputFile, thumbnailSize, outputFormat))
store = Store(filesdir=self.mktemp())
person = Person(store=store)
inputFile = file(self.mktemp(), 'w')
inputFormat = 'JPEG'
originalMakeThumbnail = people.makeThumbnail
try:
people.makeThumbnail = _makeThumbnail
thumbnailPath = Mugshot.makeThumbnail(
inputFile, person, inputFormat, smaller)
finally:
people.makeThumbnail = originalMakeThumbnail
self.assertEqual(len(makeThumbnailCalls), 1)
(gotInputFile, outputFile, thumbnailSize, outputFormat) = (
makeThumbnailCalls[0])
self.assertEqual(gotInputFile, inputFile)
if smaller:
self.assertEqual(thumbnailSize, Mugshot.smallerSize)
else:
self.assertEqual(thumbnailSize, Mugshot.size)
self.assertEqual(outputFormat, inputFormat)
self.assertTrue(isinstance(outputFile, AtomicFile))
# it should return the right path
self.assertEqual(outputFile.finalpath, thumbnailPath)
def test_makeThumbnail(self):
"""
Verify that L{Mugshot.makeThumbnail} passes the correct arguments to
L{makeThumbnail}.
"""
self._doMakeThumbnailTest(smaller=False)
def test_makeThumbnailSmaller(self):
"""
Like L{test_makeThumbnail}, but for when the method is asked to make a
smaller-sized thumbnail.
"""
self._doMakeThumbnailTest(smaller=True)
def test_placeholderForPerson(self):
"""
L{Mugshot.placeholderForPerson} should return a correctly-initialized
L{Mugshot} for the given person.
"""
store = Store(self.mktemp())
organizer = Organizer(store=store)
installOn(organizer, store)
person = organizer.createPerson(u'Alice')
mugshot = Mugshot.placeholderForPerson(person)
self.assertTrue(isinstance(mugshot, Mugshot))
self.assertIdentical(mugshot.store, None)
self.assertIdentical(mugshot.person, person)
self.assertEqual(mugshot.type, u'image/png')
imageDir = FilePath(people.__file__).parent().child(
'static').child('images')
self.assertEqual(
mugshot.body, imageDir.child('mugshot-placeholder.png'))
self.assertEqual(
mugshot.smallerBody,
imageDir.child('mugshot-placeholder-smaller.png'))
class WhitespaceNormalizationTests(unittest.TestCase):
"""
Tests for L{_normalizeWhitespace}.
"""
def test_empty(self):
"""
L{_normalizeWhitespace} should return an empty string for an empty
string.
"""
self.assertEqual(_normalizeWhitespace(u''), u'')
def test_spaces(self):
"""
L{_normalizeWhitespace} should return an empty string for a string
consisting only of whitespace.
"""
self.assertEqual(_normalizeWhitespace(u' \t\v'), u'')
def test_leadingSpace(self):
"""
L{_normalizeWhitespace} should remove leading whitespace in its result.
"""
self.assertEqual(_normalizeWhitespace(u' x'), u'x')
def test_trailingSpace(self):
"""
L{_normalizeWhitespace} should remove trailing whitespace in its result.
"""
self.assertEqual(_normalizeWhitespace(u'x '), u'x')
def test_multipleSpace(self):
"""
L{_normalizeWhitespace} should replace occurrences of contiguous
whitespace characters with a single space character.
"""
self.assertEqual(_normalizeWhitespace(u'x x'), u'x x')
class BaseContactTests(unittest.TestCase):
"""
Tests for the utility base-class L{BaseContactType}.
"""
def test_uniqueIdentifier(self):
"""
L{BaseContactType.uniqueIdentifier} should return a unicode string
giving the fully-qualified Python name of the class of the instance it
is called on.
"""
class Dummy(BaseContactType):
pass
identifier = Dummy().uniqueIdentifier()
self.assertTrue(isinstance(identifier, unicode))
self.assertEqual(identifier, __name__ + '.' + Dummy.__name__)
def test_getEditFormForPerson(self):
"""
L{BaseContactType.getEditFormForPerson} should return C{None}.
"""
class Stub(BaseContactType):
def getParameters(self, person):
return [object()]
self.assertIdentical(Stub().getEditFormForPerson(Person()), None)
def test_getContactGroup(self):
"""
L{BaseContactType.getContactGroup} should return C{None}.
"""
self.assertIdentical(
BaseContactType().getContactGroup(object()), None)
class EmailAddressTests(unittest.TestCase):
"""
Tests for L{EmailAddress}.
"""
def test_deletedWithPerson(self):
"""
An L{EmailAddress} should be deleted when the L{Person} it is
associated with is deleted.
"""
store = Store()
person = Person(store=store)
email = EmailAddress(
store=store, person=person, address=u'testuser@example.com')
person.deleteFromStore()
self.assertEqual(store.query(EmailAddress).count(), 0)
class PostalAddressTests(unittest.TestCase):
"""
Tests for L{PostalAddress}.
"""
def test_deletedWithPerson(self):
"""
A L{PostalAddress} should be deleted when the L{Person} it is
associated with is deleted.
"""
store = Store()
person = Person(store=store)
address = PostalAddress(
store=store, person=person, address=u'123 Street Rd')
person.deleteFromStore()
self.assertEqual(store.query(PostalAddress).count(), 0)
class ContactTestsMixin(object):
"""
Define tests common to different L{IContactType} implementations.
Mix this in to a L{unittest.TestCase} and bind C{self.contactType} to the
L{IContactType} provider in C{setUp}.
"""
def test_providesContactType(self):
"""
C{self.contactType} should provide L{IContactType}.
"""
self.assertTrue(IContactType.providedBy(self.contactType))
# I would really like to use verifyObject here. However, the
# **parameters in IContactType.editContactItem causes it to fail for
# reasonably conformant implementations.
# self.assertTrue(verifyObject(IContactType, self.contactType))
def test_organizerIncludesIt(self):
"""
L{Organizer.getContactTypes} should include an instance of our contact
type in its return value.
"""
organizer = Organizer(store=self.store)
self.assertTrue([
contactType
for contactType
in organizer.getContactTypes()
if isinstance(contactType, self.contactType.__class__)])
class EmailContactTests(unittest.TestCase, ContactTestsMixin):
"""
Tests for the email address parameters defined by L{EmailContactType}.
"""
def setUp(self):
self.store = Store()
self.contactType = EmailContactType(self.store)
def test_descriptiveIdentifier(self):
"""
L{EmailContactType.descriptiveIdentifier} should be "Email Address".
"""
self.assertEqual(
self.contactType.descriptiveIdentifier(), u'Email Address')
def test_allowsMultipleContactItems(self):
"""
L{EmailContactType.allowMultipleContactItems} should be C{True}.
"""
self.assertTrue(self.contactType.allowMultipleContactItems)
def test_createContactItem(self):
"""
L{EmailContactType.createContactItem} should create an L{EmailAddress}
instance with the supplied values.
"""
person = Person(store=self.store)
contactItem = self.contactType.createContactItem(
person, email=u'user@example.com')
emails = list(self.store.query(EmailAddress))
self.assertEqual(emails, [contactItem])
self.assertEqual(contactItem.address, u'user@example.com')
self.assertIdentical(contactItem.person, person)
def test_createContactItemWithEmptyString(self):
"""
L{EmailContactType.createContactItem} shouldn't create an
L{EmailAddress} instance if it is given an empty string for the
address.
"""
person = Person(store=self.store)
contactItem = self.contactType.createContactItem(
person, email=u'')
emails = list(self.store.query(EmailAddress))
self.assertIdentical(contactItem, None)
self.assertEqual(len(emails), 0)
def test_createContactItemRejectsDuplicate(self):
"""
L{EmailContactType.createContactItem} should raise an exception if it
is given an email address already associated with an existing
L{EmailAddress} item.
"""
email = u'user@example.com'
person = Person(store=self.store)
emailAddress = EmailAddress(
store=self.store, person=person, address=email)
self.assertRaises(
ValueError,
self.contactType.createContactItem,
person, email=email)
def test_editContactItem(self):
"""
L{EmailContactType.editContactItem} should update the address field of
the L{EmailAddress} it is passed.
"""
person = Person(store=self.store)
emailAddress = EmailAddress(
store=self.store, person=person, address=u'wrong')
self.contactType.editContactItem(
emailAddress, email=u'user@example.com')
self.assertEqual(emailAddress.address, u'user@example.com')
def test_editContactItemAcceptsSame(self):
"""
L{EmailContactType.editContactItem} should update the address field of
the L{EmailAddress} it is passed, even if it is passed the same value
which is already set on the item.
"""
address = u'user@example.com'
person = Person(store=self.store)
emailAddress = EmailAddress(
store=self.store, person=person, address=address)
self.contactType.editContactItem(
emailAddress, email=address)
self.assertEqual(emailAddress.address, address)
def test_editContactItemRejectsDuplicate(self):
"""
L{EmailContactType.editContactItem} should raise an exception if it is
given an email address already associated with a different
L{EmailAddress} item.
"""
person = Person(store=self.store)
existing = EmailAddress(
store=self.store, person=person, address=u'user@example.com')
editing = EmailAddress(
store=self.store, person=person, address=u'user@example.net')
self.assertRaises(
ValueError,
self.contactType.editContactItem,
editing, email=existing.address)
# It should be possible to set an EmailAddress's address attribute to
# its current value, though.
address = editing.address
self.contactType.editContactItem(editing, email=address)
self.assertEqual(editing.address, address)
def test_getParameters(self):
"""
L{EmailContactType.getParameters} should return a C{list} of
L{LiveForm} parameters for an email address.
"""
(email,) = self.contactType.getParameters(None)
self.assertEqual(email.name, 'email')
self.assertEqual(email.default, '')
def test_getParametersWithDefaults(self):
"""
L{EmailContactType.getParameters} should return a C{list} of
L{LiveForm} parameters with default values supplied from the
L{EmailAddress} item it is passed.
"""
person = Person(store=self.store)
(email,) = self.contactType.getParameters(
EmailAddress(store=self.store, person=person,
address=u'user@example.com'))
self.assertEqual(email.name, 'email')
self.assertEqual(email.default, u'user@example.com')
def test_coerce(self):
"""
L{EmailContactType.coerce} should return a dictionary mapping
C{'email'} to the email address passed to it.
"""
self.assertEqual(
self.contactType.coerce(email=u'user@example.com'),
{'email': u'user@example.com'})
def test_getReadOnlyView(self):
"""
L{EmailContactType.getReadOnlyView} should return a
L{SimpleReadOnlyView} wrapped around the given contact item.
"""
contact = EmailAddress(address=u'', person=Person())
view = self.contactType.getReadOnlyView(contact)
self.assertTrue(isinstance(view, SimpleReadOnlyView))
self.assertIdentical(view.attribute, EmailAddress.address)
self.assertIdentical(view.contactItem, contact)
class VIPPersonContactTypeTestCase(unittest.TestCase):
"""
Tests for L{VIPPersonContactType}.
"""
def setUp(self):
"""
Create a L{Person} and a L{VIPPersonContactType}.
"""
self.person = Person(vip=False)
self.contactType = VIPPersonContactType()
def test_providesContactType(self):
"""
L{VIPPersonContactType} should provide L{IContactType}.
"""
self.assertTrue(IContactType.providedBy(self.contactType))
def test_createContactItem(self):
"""
L{VIPPersonContactType.createContactItem} should set the C{vip}
attribute of the given person to the specified value, and return a
L{_PersonVIPStatus} wrapping the person.
"""
contactItem = self.contactType.createContactItem(
self.person, True)
self.assertTrue(isinstance(contactItem, _PersonVIPStatus))
self.assertIdentical(contactItem.person, self.person)
self.assertTrue(self.person.vip)
contactItem = self.contactType.createContactItem(
self.person, False)
self.assertTrue(isinstance(contactItem, _PersonVIPStatus))
self.assertIdentical(contactItem.person, self.person)
self.assertFalse(self.person.vip)
def test_editContactItem(self):
"""
L{VIPPersonContactType.editContactItem} should set the C{vip}
attribute of the wrapped person to the specified value.
"""
self.contactType.editContactItem(
_PersonVIPStatus(self.person), True)
self.assertTrue(self.person.vip)
self.contactType.editContactItem(
_PersonVIPStatus(self.person), False)
self.assertFalse(self.person.vip)
def test_getParametersNoPerson(self):
"""
L{VIPPersonContactType.getParameters} should return a parameter with a
default of C{False} when it's passed C{None}.
"""
params = self.contactType.getParameters(None)
self.assertEqual(len(params), 1)
param = params[0]
self.assertFalse(param.default)
def test_getParametersPerson(self):
"""
L{VIPPersonContactType.getParameters} should return a parameter with
the correct default when it's passed a L{_PersonVIPStatus} wrapping a
person.
"""
params = self.contactType.getParameters(
_PersonVIPStatus(self.person))
self.assertEqual(len(params), 1)
param = params[0]
self.assertFalse(param.default)
self.person.vip = True
params = self.contactType.getParameters(
_PersonVIPStatus(self.person))
self.assertEqual(len(params), 1)
param = params[0]
self.assertTrue(param.default)
def test_getReadOnlyView(self):
"""
L{VIPPersonContactType.getReadOnlyView} should return something which
flattens to the empty string.
"""
view = self.contactType.getReadOnlyView(
_PersonVIPStatus(self.person))
self.assertEqual(flatten(view), '')
class PostalContactTests(unittest.TestCase, ContactTestsMixin):
"""
Tests for snail-mail address contact information represented by
L{PostalContactType}.
"""
def setUp(self):
"""
Create a L{Store}, L{PostalContactType}, and L{Person} for use by
tests.
"""
self.store = Store()
self.person = Person(store=self.store)
self.contactType = PostalContactType()
def test_descriptiveIdentifier(self):
"""
L{PostalContactType.descriptiveIdentifier} should be "Postal Address".
"""
self.assertEqual(
self.contactType.descriptiveIdentifier(), u'Postal Address')
def test_allowsMultipleContactItems(self):
"""
L{PostalContactType.allowMultipleContactItems} should be C{True}.
"""
self.assertTrue(self.contactType.allowMultipleContactItems)
def test_createContactItem(self):
"""
L{PostalContactType.createContactItem} should create a L{PostalAddress}
instance with the supplied values.
"""
contactItem = self.contactType.createContactItem(
self.person, address=u'123 Street Rd')
addresses = list(self.store.query(PostalAddress))
self.assertEqual(addresses, [contactItem])
self.assertEqual(contactItem.address, u'123 Street Rd')
self.assertIdentical(contactItem.person, self.person)
def test_createContactItemWithEmptyString(self):
"""
L{PostalContactType.createContactItem} shouldn't create a
L{PostalAddress} instance if it is given an empty string for the
address.
"""
contactItem = self.contactType.createContactItem(
self.person, address=u'')
addresses = list(self.store.query(PostalAddress))
self.assertIdentical(contactItem, None)
self.assertEqual(len(addresses), 0)
def test_editContactItem(self):
"""
L{PostalContactType.editContactItem} should update the address field of
the L{PostalAddress} it is passed.
"""
postalAddress = PostalAddress(
store=self.store, person=self.person, address=u'wrong')
self.contactType.editContactItem(
postalAddress, address=u'123 Street Rd')
self.assertEqual(postalAddress.address, u'123 Street Rd')
def test_getParameters(self):
"""
L{PostalContactType.getParameters} should return a C{list} of
L{LiveForm} parameters for a mailing address.
"""
(address,) = self.contactType.getParameters(None)
self.assertEqual(address.name, 'address')
self.assertEqual(address.default, '')
def test_getParametersWithDefaults(self):
"""
L{PostalContactType.getParameters} should return a C{list} of
L{LiveForm} parameters with default values supplied from the
L{PostalAddress} item it is passed.
"""
(address,) = self.contactType.getParameters(
PostalAddress(store=self.store, person=self.person,
address=u'123 Street Rd'))
self.assertEqual(address.name, 'address')
self.assertEqual(address.default, u'123 Street Rd')
def test_getContactItems(self):
"""
L{PostalContactType.getContactItems} should return a C{list} of all
the L{PostalAddress} instances associated with the specified person.
"""
firstAddress = PostalAddress(
store=self.store, person=self.person, address=u'123 Street Rd')
secondAddress = PostalAddress(
store=self.store, person=self.person, address=u'456 Street Rd')
anotherPerson = Person(store=self.store)
anotherAddress = PostalAddress(
store=self.store, person=anotherPerson, address=u'789 Street Rd')
self.assertEqual(
list(self.contactType.getContactItems(self.person)),
[firstAddress, secondAddress])
def test_coerce(self):
"""
L{PostalContactType.coerce} should return a dictionary mapping
C{'address'} to the postal address passed to it.
"""
self.assertEqual(
self.contactType.coerce(address=u'123 Street Rd'),
{'address': u'123 Street Rd'})
def test_getReadOnlyView(self):
"""
L{PostalContactType.getReadOnlyView} should return a
L{SimpleReadOnlyView} wrapped around the given contact item.
"""
contact = PostalAddress(address=u'', person=Person())
view = self.contactType.getReadOnlyView(contact)
self.assertTrue(isinstance(view, SimpleReadOnlyView))
self.assertIdentical(view.contactItem, contact)
self.assertIdentical(view.attribute, PostalAddress.address)
class PhoneNumberContactTypeTestCase(unittest.TestCase, ContactTestsMixin):
"""
Tests for L{PhoneNumberContactType}.
"""
def setUp(self):
"""
Create a store, L{PhoneNumberContactType} and L{Person}.
"""
self.store = Store()
self.person = Person(store=self.store)
self.contactType = PhoneNumberContactType()
def test_descriptiveIdentifier(self):
"""
L{PhoneNumberContactType.descriptiveIdentifier} should be "Phone
Number".
"""
self.assertEqual(
self.contactType.descriptiveIdentifier(), u'Phone Number')
def test_allowsMultipleContactItems(self):
"""
L{PhoneNumberContactType.allowMultipleContactItems} should be C{True}.
"""
self.assertTrue(self.contactType.allowMultipleContactItems)
def test_createContactItem(self):
"""
L{PhoneNumberContactType.createContactItem} should create a
L{PhoneNumber} item with the supplied value.
"""
contactItem = self.contactType.createContactItem(
self.person,
label=PhoneNumber.LABELS.HOME,
number=u'123456')
numbers = list(self.store.query(PhoneNumber))
self.assertEqual(numbers, [contactItem])
self.assertEqual(
contactItem.label, PhoneNumber.LABELS.HOME)
self.assertEqual(contactItem.number, u'123456')
self.assertIdentical(contactItem.person, self.person)
def test_createContactItemWithEmptyString(self):
"""
L{PhoneNumberContactType.createContactItem} shouldn't create an item
if it's passed an empty number.
"""
self.assertIdentical(
self.contactType.createContactItem(
self.person,
label=PhoneNumber.LABELS.HOME,
number=u''),
None)
self.assertEqual(self.store.query(PhoneNumber).count(), 0)
def test_editContactItem(self):
"""
L{PhoneNumberContactType.editContactItem} should update the I{number}
and I{label} attributes of the given item.
"""
contactItem = PhoneNumber(
store=self.store,
person=self.person,
label=PhoneNumber.LABELS.HOME,
number=u'123456')
self.contactType.editContactItem(
contactItem,
label=PhoneNumber.LABELS.WORK,
number=u'654321')
self.assertEqual(
contactItem.label, PhoneNumber.LABELS.WORK)
self.assertEqual(contactItem.number, u'654321')
def test_getParameters(self):
"""
L{PhoneNumberContactType.getParameters} should return a list
containing two parameters.
"""
parameters = self.contactType.getParameters(None)
self.assertEqual(len(parameters), 2)
(labelParam, numberParam) = parameters
self.assertTrue(isinstance(labelParam, ChoiceParameter))
self.assertEqual(labelParam.name, 'label')
self.assertEqual(
[c.value for c in labelParam.choices],
PhoneNumber.LABELS.ALL_LABELS)
self.assertTrue(isinstance(numberParam, Parameter))
self.assertEqual(numberParam.name, 'number')
self.assertEqual(numberParam.default, '')
self.assertEqual(numberParam.type, TEXT_INPUT)
def test_getParametersWithDefault(self):
"""
L{PhoneNumberContactType.getParameters} should correctly default the
returned parameter if its passed a contact item.
"""
contactItem = PhoneNumber(
store=self.store,
person=self.person,
label=PhoneNumber.LABELS.HOME,
number=u'123456')
parameters = self.contactType.getParameters(contactItem)
self.assertEqual(len(parameters), 2)
(labelParam, numberParam) = parameters
selectedOptions = []
for choice in labelParam.choices:
if choice.selected:
selectedOptions.append(choice.value)
self.assertEqual(selectedOptions, [contactItem.label])
self.assertEqual(numberParam.default, contactItem.number)
def test_getContactItems(self):
"""
L{PhoneNumberContactType.getContactItems} should return only
L{PhoneNumber} items associated with the given person.
"""
otherPerson = Person(store=self.store)
PhoneNumber(
store=self.store, person=otherPerson, number=u'123455')
expectedNumbers = [
PhoneNumber(
store=self.store, person=self.person, number=u'123456'),
PhoneNumber(
store=self.store, person=self.person, number=u'123457')]
self.assertEqual(
list(self.contactType.getContactItems(self.person)),
expectedNumbers)
def test_getReadOnlyView(self):
"""
L{PhoneNumberContactType.getReadOnlyView} should return a
correctly-initialized L{ReadOnlyPhoneNumberView}.
"""
contactItem = PhoneNumber(
store=self.store, person=self.person, number=u'123456')
view = self.contactType.getReadOnlyView(contactItem)
self.assertTrue(isinstance(view, ReadOnlyPhoneNumberView))
self.assertIdentical(view.phoneNumber, contactItem)
class NotesContactTypeTestCase(unittest.TestCase, ContactTestsMixin):
"""
Tests for L{NotesContactType}.
"""
def setUp(self):
"""
Create a store, L{NotesContactType} and L{Person}.
"""
self.store = Store()
self.person = Person(store=self.store)
self.contactType = NotesContactType()
def test_descriptiveIdentifier(self):
"""
L{NotesContactType.descriptiveIdentifier} should be "Notes".
"""
self.assertEqual(
self.contactType.descriptiveIdentifier(), u'Notes')
def test_allowsMultipleContactItems(self):
"""
L{NotesContactType.allowMultipleContactItems} should be C{False}.
"""
self.assertFalse(self.contactType.allowMultipleContactItems)
def test_createContactItem(self):
"""
L{NotesContactType.createContactItem} should create a
L{Notes} item with the supplied value.
"""
contactItem = self.contactType.createContactItem(
self.person, notes=u'some notes')
notes = list(self.store.query(Notes))
self.assertEqual(notes, [contactItem])
self.assertEqual(contactItem.notes, u'some notes')
self.assertIdentical(contactItem.person, self.person)
def test_createContactItemWithEmptyString(self):
"""
L{NotesContactType.createContactItem} shouldn't create an item
if it's passed an empty string.
"""
self.assertIdentical(
self.contactType.createContactItem(
self.person, notes=u''),
None)
self.assertEqual(self.store.query(Notes).count(), 0)
def test_editContactItem(self):
"""
L{NotesContactType.editContactItem} should update the I{notes}
attribute of the given item.
"""
contactItem = Notes(
store=self.store,
person=self.person,
notes=u'some notes')
self.contactType.editContactItem(
contactItem,
notes=u'revised notes')
self.assertEqual(contactItem.notes, u'revised notes')
def test_getParameters(self):
"""
L{NotesContactType.getParameters} should return a list
containing a single parameter.
"""
parameters = self.contactType.getParameters(None)
self.assertEqual(len(parameters), 1)
param = parameters[0]
self.assertTrue(isinstance(param, Parameter))
self.assertEqual(param.name, 'notes')
self.assertEqual(param.default, '')
self.assertEqual(param.type, TEXTAREA_INPUT)
self.assertEqual(param.label, u'Notes')
def test_getParametersWithDefault(self):
"""
L{NotesContactType.getParameters} should correctly default the
returned parameter if it's passed a contact item.
"""
contactItem = Notes(
store=self.store,
person=self.person,
notes=u'some notes')
parameters = self.contactType.getParameters(contactItem)
self.assertEqual(len(parameters), 1)
self.assertEqual(parameters[0].default, contactItem.notes)
def test_getContactItems(self):
"""
L{NotesContactType.getContactItems} should return only
the L{Notes} item associated with the given person.
"""
Notes(store=self.store,
person=Person(store=self.store),
notes=u'notes')
expectedNotes = [
Notes(store=self.store,
person=self.person,
notes=u'some notes')]
self.assertEqual(
list(self.contactType.getContactItems(self.person)),
expectedNotes)
def test_getContactItemsCreates(self):
"""
L{NotesContactType.getContactItems} should create a L{Notes} item for
the given person, if one does not exist.
"""
# sanity check
self.assertEqual(self.store.query(Notes).count(), 0)
contactItems = self.contactType.getContactItems(self.person)
self.assertEqual(len(contactItems), 1)
self.assertEqual(contactItems, list(self.store.query(Notes)))
self.assertEqual(contactItems[0].notes, u'')
self.assertIdentical(contactItems[0].person, self.person)
def test_getReadOnlyView(self):
"""
L{NotesContactType.getReadOnlyView} should return a
correctly-initialized L{SimpleReadOnlyView}.
"""
contactItem = Notes(
store=self.store, person=self.person, notes=u'notes')
view = self.contactType.getReadOnlyView(contactItem)
self.assertTrue(isinstance(view, SimpleReadOnlyView))
self.assertIdentical(view.attribute, Notes.notes)
self.assertIdentical(view.contactItem, contactItem)
class ReadOnlyPhoneNumberViewTestCase(unittest.TestCase, TagTestingMixin):
"""
Tests for L{ReadOnlyPhoneNumberView}.
"""
def test_number(self):
"""
The I{number} renderer of L{ReadOnlyPhoneNumberView} should return the
value of the wrapped L{PhoneNumber}'s C{number} attribute.
"""
contactItem = PhoneNumber(
person=Person(), number=u'123456')
view = ReadOnlyPhoneNumberView(contactItem)
value = renderer.get(view, 'number')(None, div)
self.assertTag(value, 'div', {}, [contactItem.number])
def test_label(self):
"""
The I{label} renderer of L{ReadOnlyPhoneNumberView} should return the
value of the wrapped L{PhoneNumber}'s C{label} attribute.
"""
contactItem = PhoneNumber(
person=Person(),
label=PhoneNumber.LABELS.WORK,
number=u'123456')
view = ReadOnlyPhoneNumberView(contactItem)
value = renderer.get(view, 'label')(None, div)
self.assertTag(value, 'div', {}, [contactItem.label])
class PeopleModelTestCase(unittest.TestCase):
"""
Tests for the model parts of the person organizer code.
"""
def setUp(self):
"""
Create a bunch of people with names beginning with various letters.
"""
self.store = Store()
self.organizer = Organizer(store=self.store)
installOn(self.organizer, self.store)
letters = lowercase.decode('ascii')
for firstPrefix, lastPrefix in zip(letters, reversed(letters)):
name = u'Alice ' + lastPrefix + u'Jones'
person = Person(
store=self.store,
organizer=self.organizer,
created=Time(),
name=name)
def test_getPeopleFilters(self):
"""
L{Organizer.getPeopleFilters} should return an iterable of all of the
L{IPeopleFilter} plugins available in the store.
"""
firstPeopleFilters = [object(), object()]
firstContactPowerup = StubOrganizerPlugin(
store=self.store, peopleFilters=firstPeopleFilters)
self.store.powerUp(
firstContactPowerup, IOrganizerPlugin, priority=1)
secondPeopleFilters = [object()]
secondContactPowerup = StubOrganizerPlugin(
store=self.store, peopleFilters=secondPeopleFilters)
self.store.powerUp(
secondContactPowerup, IOrganizerPlugin, priority=0)
self.assertEqual(
list(self.organizer.getPeopleFilters())[
builtinPeopleFilterCount:],
firstPeopleFilters + secondPeopleFilters)
def test_getPeopleFiltersTags(self):
"""
L{Organizer.getPeopleFilters} should include one L{TaggedPeopleFilter}
for each tag which has been applied to a person.
"""
personTags = list(u'xac')
catalog = tags.Catalog(store=self.store)
for personTag in personTags:
catalog.tag(Person(store=self.store), personTag)
peopleFilters = list(self.organizer.getPeopleFilters())[
builtinPeopleFilterCount:]
self.assertEqual(len(peopleFilters), len(personTags))
for (peopleFilter, personTag) in zip(peopleFilters, sorted(personTags)):
self.assertTrue(isinstance(peopleFilter, TaggedPeopleFilter))
self.assertEqual(peopleFilter.filterName, personTag)
def test_createPerson(self):
"""
L{Organizer.createPerson} should instantiate and return a L{Person} item
with the specified nickname, a reference to the creating L{Organizer},
and a creation timestamp set to the current time.
"""
nickname = u'test person'
beforeCreation = extime.Time()
person = self.organizer.createPerson(nickname)
afterCreation = extime.Time()
self.assertEqual(person.name, nickname)
self.assertIdentical(person.organizer, self.organizer)
self.assertTrue(beforeCreation <= person.created <= afterCreation)
self.assertFalse(person.vip)
def test_createPersonDuplicateNickname(self):
"""
L{Organizer.createPerson} raises an exception when passed a nickname
which is already associated with a L{Person} in the database.
"""
nickname = u'test person'
self.organizer.createPerson(nickname)
self.assertRaises(
ValueError,
self.organizer.createPerson, nickname)
def test_caseInsensitiveName(self):
"""
L{Person.name} should not be case-sensitive.
"""
name = u'alice'
store = Store()
person = Person(store=store, name=name.upper())
self.assertEqual(
list(store.query(Person, Person.name == name.lower())),
[person])
def test_editPersonChangesName(self):
"""
L{Organizer.editPerson} should change the I{name} of the given
L{Person}.
"""
person = self.organizer.createPerson(u'alice')
self.organizer.editPerson(person, u'bob', [])
self.assertEqual(person.name, u'bob')
def test_editPersonEditsContactInfo(self):
"""
L{Organizer.editPerson} should call I{editContactItem} on each element
of the edits sequence it is passed.
"""
person = self.organizer.createPerson(u'alice')
contactType = StubContactType((), None, None)
contactItem = object()
contactInfo = {u'foo': u'bar'}
self.organizer.editPerson(
person,
u'alice',
[(contactType, ListChanges(
[], [EditObject(contactItem, contactInfo)], []))])
self.assertEqual(
contactType.editedContacts,
[(contactItem, contactInfo)])
def test_editPersonEditsUnrepeatableContactInfo(self):
"""
Like L{test_editPersonEditsContactInfo}, but for the case where the
contact type doesn't support multiple contact items.
"""
person = self.organizer.createPerson(u'alice')
contactItem = object()
contactType = StubContactType(
(), None, contactItems=[contactItem],
allowMultipleContactItems=False)
contactInfo = {u'foo': u'bar'}
self.organizer.editPerson(
person, u'alice', [(contactType, contactInfo)])
self.assertEqual(
contactType.editedContacts,
[(contactItem, contactInfo)])
def test_editPersonCreatesContactInfo(self):
"""
L{Organizer.editPerson} should call I{createContactItem} on each
element in the create sequence it is passed.
"""
person = self.organizer.createPerson(u'alice')
contactType = StubContactType((), None, None, createContactItems=True)
contactInfo = {u'foo': u'bar'}
createdObjects = []
def setter(createdObject):
createdObjects.append(createdObject)
self.organizer.editPerson(
person,
u'alice',
[(contactType, ListChanges(
[CreateObject(contactInfo, setter)], [], []))])
self.assertEqual(
contactType.createdContacts, [(person, contactInfo)])
self.assertEqual(createdObjects, [(person, contactInfo)])
def test_editPersonContactCreationNotification(self):
"""
Contact items created through L{Organizer.editPerson} should be sent
to L{IOrganizerPlugin.contactItemCreated} for all L{IOrganizerPlugin}
powerups on the store.
"""
contactType = StubContactType((), None, None, createContactItems=True)
contactInfo = {u'foo': u'bar'}
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(u'alice')
self.organizer.editPerson(
person, person.name,
[(contactType,
ListChanges([CreateObject(contactInfo,
lambda obj: None)],
[], []))])
self.assertEqual(
observer.createdContactItems, [(person, contactInfo)])
def test_editPersonContactEditNotification(self):
"""
Contact items edit through L{Organizer.editPerson} should be sent to
L{IOrganizerPlugin.contactItemEdited} for all L{IOrganizerPlugin}
powerups on the store.
"""
contactType = StubContactType((), None, None)
contactItem = object()
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(u'alice')
self.organizer.editPerson(
person, person.name,
[(contactType,
ListChanges([], [EditObject(contactItem, {})], []))])
self.assertEqual(
observer.editedContactItems, [contactItem])
def test_editPersonDeletesContactInfo(self):
"""
L{Organizer.editPerson} should call L{deleteFromStore} on each element
in the delete sequence it is passed.
"""
class DeletableObject(object):
deleted = False
def deleteFromStore(self):
self.deleted = True
person = self.organizer.createPerson(u'alice')
contactType = StubContactType((), None, None)
contactItem = DeletableObject()
self.organizer.editPerson(
person,
u'alice',
[(contactType, ListChanges([], [], [contactItem]))])
self.assertTrue(contactItem.deleted)
def test_editPersonDuplicateNickname(self):
"""
L{Organizer.editPerson} raises an exception when passed a nickname
which is already associated with a different L{Person} in the database.
"""
alice = self.organizer.createPerson(u'alice')
bob = self.organizer.createPerson(u'bob')
self.assertRaises(ValueError,
self.organizer.editPerson, bob, alice.name, [])
def test_editPersonSameName(self):
"""
L{Organizer.editPerson} allows the new nickname it is passed to be the
same as the existing name for the given L{Person}.
"""
alice = self.organizer.createPerson(u'alice')
self.organizer.editPerson(alice, alice.name, [])
self.assertEqual(alice.name, u'alice')
def test_editPersonNotifiesPlugins(self):
"""
L{Organizer.editPerson} should call C{personNameChanged} on all
L{IOrganizerPlugin} powerups on the store.
"""
nickname = u'test person'
newname = u'alice'
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(nickname)
self.organizer.editPerson(person, newname, [])
self.assertEqual(
observer.renamedPeople,
[(newname, nickname)])
def test_createVeryImportantPerson(self):
"""
L{Organizer.createPerson} should set L{Person.vip} to match the value
it is passed for the C{vip} parameter, and issue a deprecation
warning.
"""
self.assertWarns(
DeprecationWarning,
"Usage of Organizer.createPerson's 'vip' parameter is deprecated",
people.__file__,
lambda: self.organizer.createPerson(u'alice', True))
alice = self.store.findUnique(Person, Person.name == u'alice')
self.assertTrue(alice.vip)
def test_createPersonNoVIP(self):
"""
L{Organizer.createPerson} shouldn't issue a warning if no C{vip}
argument is passed.
"""
originalWarnExplicit = warnings.warn_explicit
def warnExplicit(*args):
self.fail('Organizer.createPerson warned us: %r' % (args[0],))
try:
warnings.warn_explicit = warnExplicit
person = self.organizer.createPerson(u'alice')
finally:
warnings.warn_explicit = originalWarnExplicit
def test_noMugshot(self):
"""
L{Person.getMugshot} should call L{Mugshot.placeholderForPerson} when
called on a L{Person} without a stored L{Mugshot}.
"""
people = []
thePlaceholder = object()
def placeholderForPerson(person):
people.append(person)
return thePlaceholder
person = Person(store=self.store)
originalPlaceholderForPerson = Mugshot.placeholderForPerson
try:
Mugshot.placeholderForPerson = staticmethod(
placeholderForPerson)
getMugshotResult = person.getMugshot()
finally:
Mugshot.placeholderForPerson = originalPlaceholderForPerson
self.assertIdentical(getMugshotResult, thePlaceholder)
self.assertEqual(people, [person])
def test_getMugshot(self):
"""
L{Person.getMugshot} should return the L{Mugshot} item which refers to
the person on which it is called when one exists.
"""
store = Store(filesdir=self.mktemp())
person = Person(store=store)
image = Mugshot(
store=store, type=u'image/png',
body=store.filesdir.child('a'),
smallerBody=store.filesdir.child('b'),
person=person)
self.assertIdentical(person.getMugshot(), image)
def test_deletePerson(self):
"""
L{Organizer.deletePerson} should delete the specified person from the
store.
"""
person = Person(store=self.store)
self.organizer.deletePerson(person)
self.assertEqual(self.store.query(Person, Person.storeID == person.storeID).count(), 0)
def test_getOrganizerPlugins(self):
"""
L{Organizer.getOrganizerPlugins} should return an iterator of the
installed L{IOrganizerPlugin} powerups.
"""
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
plugins = list(self.organizer.getOrganizerPlugins())
self.assertEqual(plugins[:-1], [observer])
self.assertTrue(
isinstance(plugins[-1], ContactInfoOrganizerPlugin))
def test_createContactItemNotifiesPlugins(self):
"""
L{Organizer.createContactItem} should call L{contactItemCreated} on
all L{IOrganizerPlugin} powerups on the store.
"""
nickname = u'test person'
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(nickname)
contactType = StubContactType((), None, None)
parameters = {'key': u'value'}
contactItem = self.organizer.createContactItem(
contactType, person, parameters)
self.assertEqual(len(observer.createdContactItems), 1)
[(observedPerson, observedParameters)] = observer.createdContactItems
self.assertIdentical(person, observedPerson)
self.assertEqual(parameters, observedParameters)
def test_notificationSkippedForUncreatedContactItems(self):
"""
L{Organizer.createContactItem} should not call L{contactItemCreated}
on any L{IOrganizerPlugin} powerups on the store if
L{IContactType.createContactItem} returns C{None} to indicate that it
is not creating a contact item.
"""
nickname = u'test person'
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(nickname)
contactType = StubContactType((), None, None, False)
parameters = {'key': u'value'}
contactItem = self.organizer.createContactItem(
contactType, person, parameters)
self.assertEqual(observer.createdContactItems, [])
def test_editContactItemNotifiesPlugins(self):
"""
L{Organizer.editContactItem} should call L{contactItemEdited} on all
L{IOrganizerPlugin} powerups in the store.
"""
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
contactType = StubContactType((), None, None)
contactItem = object()
self.organizer.editContactItem(contactType, contactItem, {})
self.assertEqual(observer.editedContactItems, [contactItem])
def test_createPersonNotifiesPlugins(self):
"""
L{Organizer.createPerson} should call L{personCreated} on all
L{IOrganizerPlugin} powerups on the store.
"""
nickname = u'test person'
observer = StubOrganizerPlugin(store=self.store)
self.store.powerUp(observer, IOrganizerPlugin)
person = self.organizer.createPerson(nickname)
self.assertEqual(observer.createdPeople, [person])
def test_organizerPluginWithoutPersonCreated(self):
"""
L{IOrganizerPlugin} powerups which don't have the C{personCreated}
method should not cause problems with L{Organizer.createPerson} (The
method was added after the interface was initially defined so there may
be implementations which have not yet been updated).
"""
store = Store()
class OldOrganizerPlugin(object):
"""
An L{IOrganizerPlugin} which does not implement C{getContactTypes}.
"""
getOrganizerPlugins = Organizer.getOrganizerPlugins.im_func
plugins = [OldOrganizerPlugin(), StubOrganizerPlugin(createdPeople=[])]
Organizer.getOrganizerPlugins = lambda self: plugins
try:
organizer = Organizer(store=store)
person = organizer.createPerson(u'nickname')
finally:
Organizer.getOrganizerPlugins = getOrganizerPlugins
self.assertEqual(plugins[1].createdPeople,
[organizer.storeOwnerPerson, person])
def test_getContactTypes(self):
"""
L{Organizer.getContactTypes} should return an iterable of all the
L{IContactType} plugins available on the store.
"""
firstContactTypes = [object(), object()]
firstContactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=firstContactTypes)
self.store.powerUp(
firstContactPowerup, IOrganizerPlugin, priority=1)
secondContactTypes = [object()]
secondContactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=secondContactTypes)
self.store.powerUp(
secondContactPowerup, IOrganizerPlugin, priority=0)
self.assertEqual(
list(self.organizer.getContactTypes())[builtinContactTypeCount:],
firstContactTypes + secondContactTypes)
def test_getContactTypesOldMethod(self):
"""
L{Organizer.getContactTypes} should emit a warning if it encounters an
implementation which defines the C{getEditorialForm} method.
"""
contactType = StubContactType([], None, [])
contactType.getEditorialForm = lambda _: None
powerup = StubOrganizerPlugin(
store=self.store, contactTypes=[contactType])
self.store.powerUp(powerup, IOrganizerPlugin)
self.assertWarns(
DeprecationWarning,
"The IContactType %s defines the 'getEditorialForm'"
" method, which is deprecated. 'getEditFormForPerson'"
" does something vaguely similar." % (StubContactType,),
people.__file__,
lambda: list(self.organizer.getContactTypes()))
def test_getContactTypesNewMethod(self):
"""
L{Organizer.getContactTypes} should emit a warning if it encounters an
implementation which doesn't define the C{getEditFormForPerson}
method.
"""
contactType = StubContactType([], None, [])
contactType.getEditFormForPerson = None
powerup = StubOrganizerPlugin(
store=self.store, contactTypes=[contactType])
self.store.powerUp(powerup, IOrganizerPlugin)
self.assertWarns(
PendingDeprecationWarning,
"IContactType now has the 'getEditFormForPerson'"
" method, but %s did not implement it." % (
StubContactType,),
people.__file__,
lambda: list(self.organizer.getContactTypes()))
def test_groupReadOnlyViews(self):
"""
L{Organizer.groupReadOnlyViews} should correctly group the read-only
views of all available contact items.
"""
groupOneContactItems = [object(), object(), object()]
groupOneContactTypes = [
StubContactType([], None, groupOneContactItems[:1],
contactGroup=ContactGroup('One')),
StubContactType([], None, groupOneContactItems[1:],
contactGroup=ContactGroup('One'))]
groupTwoContactItems = [object()]
groupTwoContactTypes = [
StubContactType([], None, groupTwoContactItems,
contactGroup=ContactGroup('Two'))]
plugin = StubOrganizerPlugin(
store=self.store,
contactTypes=groupTwoContactTypes + groupOneContactTypes)
self.store.powerUp(plugin, IOrganizerPlugin)
person = Person(store=self.store)
grouped = self.organizer.groupReadOnlyViews(person)
for contactType in groupOneContactTypes + groupTwoContactTypes:
self.assertEqual(contactType.queriedPeople, [person])
self.assertEqual(sorted(grouped.keys()), [None, 'One', 'Two'])
self.assertEqual(
[view.item for view in grouped['One']], groupOneContactItems)
self.assertEqual(
[view.item for view in grouped['Two']], groupTwoContactItems)
# builtin (groupless) contact type stuff.
builtinContactTypes = list(self.organizer.getContactTypes())[
:builtinContactTypeCount]
self.assertEqual(
len(grouped[None]),
sum(len(list(contactType.getContactItems(person)))
for contactType in builtinContactTypes))
def test_organizerPluginWithoutContactTypes(self):
"""
L{IOrganizerPlugin} powerups which don't have the C{getContactTypes}
method should not cause problems with L{Organizer.getContactTypes} (The
method was added after the interface was initially defined so there may
be implementations which have not yet been updated).
"""
class OldOrganizerPlugin(object):
"""
An L{IOrganizerPlugin} which does not implement C{getContactTypes}.
"""
getOrganizerPlugins = Organizer.getOrganizerPlugins.im_func
Organizer.getOrganizerPlugins = lambda self: [OldOrganizerPlugin()]
try:
organizer = Organizer()
contactTypes = list(organizer.getContactTypes())
finally:
Organizer.getOrganizerPlugins = getOrganizerPlugins
self.assertEqual(contactTypes[builtinContactTypeCount:], [])
def test_getContactCreationParameters(self):
"""
L{Organizer.getContactCreationParameters} should return a list
containing a L{ListChangeParameter} for each contact type available
in the system which allows multiple contact items.
"""
contactTypes = [StubContactType(
(), None, None,
allowMultipleContactItems=True,
theDescriptiveIdentifier=u'Very Descriptive')]
contactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=contactTypes)
self.store.powerUp(contactPowerup, IOrganizerPlugin)
parameters = list(self.organizer.getContactCreationParameters())
self.assertEqual(len(parameters), builtinContactTypeCount + 1)
self.assertTrue(
isinstance(parameters[builtinContactTypeCount], ListChangeParameter))
self.assertEqual(
parameters[builtinContactTypeCount].modelObjectDescription,
u'Very Descriptive')
self.assertEqual(
parameters[builtinContactTypeCount].name,
qual(StubContactType))
def test_getContactCreationParametersUnrepeatable(self):
"""
L{Organizer.getContactCreationParameters} should return a list
containing a L{FormParameter} for each contact type which doesn't
support multiple contact items.
"""
contactTypeParameters = [Parameter('foo', TEXT_INPUT, lambda x: None)]
contactTypes = [StubContactType(
contactTypeParameters, None, None,
allowMultipleContactItems=False)]
contactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=contactTypes)
self.store.powerUp(contactPowerup, IOrganizerPlugin)
parameters = list(self.organizer.getContactCreationParameters())
liveFormParameter = parameters[builtinContactTypeCount]
self.assertTrue(isinstance(liveFormParameter, FormParameter))
self.assertEqual(liveFormParameter.name, qual(StubContactType))
liveForm = liveFormParameter.form
self.assertTrue(isinstance(liveForm, LiveForm))
self.assertEqual(liveForm.parameters, contactTypeParameters)
def test_getContactEditorialParameters(self):
"""
L{Organizer.getContactEditorialParameters} should return a list
containing a L{ListChangeParameter} for each contact type available in
the system which supports multiple contact items.
"""
contactTypes = [StubContactType(
(), None, [], theDescriptiveIdentifier=u'So Descriptive')]
contactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=contactTypes)
self.store.powerUp(contactPowerup, IOrganizerPlugin)
person = self.organizer.createPerson(u'nickname')
parameters = list(self.organizer.getContactEditorialParameters(person))
self.assertIdentical(
parameters[builtinContactTypeCount][0], contactTypes[0])
self.failUnless(
isinstance(
parameters[builtinContactTypeCount][1],
ListChangeParameter))
self.assertEqual(
parameters[builtinContactTypeCount][1].modelObjectDescription,
u'So Descriptive')
def test_getContactEditorialParametersNone(self):
"""
The L{ListChangeParameter} returned by
L{Organizer.getContactEditorialParameters} for a particular
L{IContactType} should not have a model object or defaults dict if the
L{IContactType} indicates that the contact item is immutable (by
returning C{None} from its C{getParameters} implementation).
"""
class PickyContactType(StubContactType):
def getParameters(self, contactItem):
return self.parameters[contactItem]
mutableContactItem = object()
immutableContactItem = object()
makeParam = lambda default=None: Parameter(
'foo', TEXT_INPUT, lambda x: None, default=default)
contactType = PickyContactType(
{mutableContactItem: [makeParam('the default')],
None: [makeParam(None)],
immutableContactItem: None},
None,
[mutableContactItem, immutableContactItem])
contactTypes = [contactType]
contactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=contactTypes)
self.store.powerUp(contactPowerup, IOrganizerPlugin)
person = self.organizer.createPerson(u'nickname')
parameters = list(
self.organizer.getContactEditorialParameters(person))
(gotContactType, parameter) = parameters[builtinContactTypeCount]
self.assertEqual(parameter.modelObjects, [mutableContactItem])
self.assertEqual(parameter.defaults, [{'foo': 'the default'}])
def test_getContactEditorialParametersUnrepeatable(self):
"""
L{Organizer.getContactEditorialParameters} should return a list
containing a L{FormParameter} for each contact type available in the
system which doesn't support multiple contact items.
"""
contactTypeParameters = [Parameter('foo', TEXT_INPUT, lambda x: x)]
contactTypes = [StubContactType(
contactTypeParameters, None, [None],
allowMultipleContactItems=False)]
contactPowerup = StubOrganizerPlugin(
store=self.store, contactTypes=contactTypes)
self.store.powerUp(contactPowerup, IOrganizerPlugin)
person = self.organizer.createPerson(u'nickname')
parameters = list(self.organizer.getContactEditorialParameters(person))
(contactType, liveFormParameter) = parameters[builtinContactTypeCount]
self.assertIdentical(contactType, contactTypes[0])
self.assertTrue(isinstance(liveFormParameter, FormParameter))
self.assertEqual(liveFormParameter.name, qual(StubContactType))
liveForm = liveFormParameter.form
self.assertTrue(isinstance(liveForm, LiveForm))
self.assertEqual(liveForm.parameters, contactTypeParameters)
def test_getContactEditorialParametersDefaults(self):
"""
L{Organizer.getContactEditorialParameters} should return some
parameters with correctly initialized lists of defaults and model
objects.
"""
person = self.organizer.createPerson(u'nickname')
contactItems = [PostalAddress(store=self.store, person=person, address=u'1'),
PostalAddress(store=self.store, person=person, address=u'2')]
editParameters = list(self.organizer.getContactEditorialParameters(person))
(editType, editParameter) = editParameters[2]
self.assertEqual(
editParameter.defaults, [{u'address': u'1'}, {u'address': u'2'}])
self.assertEqual(
editParameter.modelObjects, contactItems)
def test_navigation(self):
"""
L{Organizer.getTabs} should return a single tab, 'People', that points
to itself.
"""
tabs = self.organizer.getTabs()
self.assertEqual(len(tabs), 1)
tab = tabs[0]
self.assertEqual(tab.name, "People")
self.assertEqual(tab.storeID, self.organizer.storeID)
self.assertEqual(tab.children, ())
self.assertEqual(tab.authoritative, True)
self.assertEqual(tab.linkURL, None)
def test_getPeopleTags(self):
"""
L{Organizer.getPeopleTags} should return a set containing each tag
which has been applied to a L{Person}.
"""
alice = self.organizer.createPerson(u'Alice')
frank = self.organizer.createPerson(u'Frank')
catalog = tags.Catalog(store=self.store)
catalog.tag(alice, u'person')
catalog.tag(frank, u'person')
catalog.tag(alice, u'girl')
catalog.tag(frank, u'boy')
# tag the organizer for laughs
catalog.tag(self.organizer, u'organizer')
self.assertEqual(
self.organizer.getPeopleTags(),
set(('person', 'girl', 'boy')))
class POBox(Item):
number = text()
def _keyword(contactType):
return contactType.uniqueIdentifier().encode('ascii')
CONTACT_EMAIL = u'jlp@starship.enterprise'
CONTACT_ADDRESS = u'123 Street Rd'
def createAddPersonContactInfo(store):
"""
Create a structure suitable to be passed to AddPersonFragment.addPerson.
Since the structure keeps changing slightly, this lets some tests be
independent of those details and so avoids requiring them to change every
time the structure does.
"""
return {
_keyword(EmailContactType(store)): ListChanges(
[CreateObject({u'email': CONTACT_EMAIL}, lambda x: None)],
[], []),
_keyword(PostalContactType()): ListChanges(
[CreateObject({u'address': CONTACT_ADDRESS}, lambda x: None)],
[], [])}
class PeopleTests(unittest.TestCase):
def setUp(self):
"""
Create an in-memory store and organizer.
"""
self.user = emptyMantissaUserStore()
self.organizer = Organizer(store=self.user)
installOn(self.organizer, self.user)
def testPersonCreation(self):
beforeCreation = extime.Time()
p = self.organizer.personByName(u'testuser')
afterCreation = extime.Time()
self.assertEquals(p.name, u'testuser')
self.failUnless(
beforeCreation <= p.created <= afterCreation,
"not (%r <= %r <= %r)" % (beforeCreation, p.created, afterCreation))
# Make sure people from that organizer don't collide with
# people from a different organizer
another = Organizer(store=self.user)
q = another.personByName(u'testuser')
self.failIfIdentical(p, q)
self.assertEquals(q.name, u'testuser')
# And make sure people within a single Organizer don't trample
# on each other.
notQ = another.personByName(u'nottestuser')
self.failIfIdentical(q, notQ)
self.assertEquals(q.name, u'testuser')
self.assertEquals(notQ.name, u'nottestuser')
def test_getEmailAddresses(self):
"""
Verify that getEmailAddresses yields the associated email address
strings for a person.
"""
p = Person(store=self.user)
EmailAddress(store=self.user, person=p, address=u'a@b.c')
EmailAddress(store=self.user, person=p, address=u'c@d.e')
# Ordering is undefined, so let's use a set.
self.assertEquals(set(p.getEmailAddresses()),
set([u'a@b.c', u'c@d.e']))
def test_getEmailAddress(self):
"""
Verify that getEmailAddress yields the only associated email address
for a person if it is the only one.
"""
p = Person(store=self.user)
EmailAddress(store=self.user, person=p, address=u'a@b.c')
self.assertEquals(p.getEmailAddress(), u'a@b.c')
def testPersonRetrieval(self):
name = u'testuser'
firstPerson = self.organizer.personByName(name)
self.assertIdentical(firstPerson, self.organizer.personByName(name))
def test_docFactory(self):
"""
L{AddPersonFragment.docFactory.load} should not return C{None}.
"""
self.assertNotIdentical(
AddPersonFragment(self.organizer).docFactory.load(),
None)
def test_addPerson(self):
"""
L{AddPersonFragment.addPerson} should add the person.
"""
name = u'Billy Spade'
addPerson = AddPersonFragment(self.organizer)
addPerson.addPerson(name)
self.assertEqual(
self.user.query(Person, Person.name == name).count(), 1)
def test_addPersonParameters(self):
"""
L{AddPersonFragment.render_addPersonForm} should return a L{LiveForm}
with several fixed parameters.
"""
addPersonFrag = AddPersonFragment(self.organizer)
# Whatever is in _baseParameters should end up in the resulting form's
# parameters. Explicitly define _baseParameters here so that changes
# to the actual value don't affect this test. The actual value is
# effectively a declaration, so the only thing one could test about it
# is that it is equal to itself, anyway.
addPersonFrag._baseParameters = baseParameters = [
Parameter('foo', TEXT_INPUT, unicode, 'Foo')]
addPersonForm = addPersonFrag.render_addPersonForm(None, None)
self.assertEqual(addPersonForm.parameters, baseParameters)
def test_addPersonValueError(self):
"""
L{AddPersonFragment.addPerson} raises L{InputError} if
L{Organizer.createPerson} raises a L{ValueError}.
"""
addPersonFragment = AddPersonFragment(self.organizer)
def stubCreatePerson(*a, **kw):
raise ValueError("Stub nickname rejection")
object.__setattr__(self.organizer, 'createPerson', stubCreatePerson)
exception = self.assertRaises(
InputError, addPersonFragment.addPerson, u'nickname')
self.assertEqual(exception.args, ("Stub nickname rejection",))
self.assertTrue(isinstance(exception.args[0], unicode))
def test_linkToPerson(self):
"""
L{Organizer.linkToPerson} generates an URL that is the same as linking
to the private person item.
"""
privapp = self.user.findUnique(PrivateApplication)
p = Person(store=self.user)
self.assertEqual(self.organizer.linkToPerson(p),
privapp.linkTo(p.storeID))
def test_urlForViewState(self):
"""
L{Organizer.urlForViewState} should generate a valid, correctly quoted
url.
"""
organizerURL = IWebTranslator(self.user).linkTo(
self.organizer.storeID)
person = self.organizer.createPerson(u'A Person')
self.assertEqual(
str(self.organizer.urlForViewState(
person, ORGANIZER_VIEW_STATES.EDIT)),
organizerURL + '?initial-person=A%20Person&initial-state=edit')
class PersonDetailFragmentTests(unittest.TestCase):
"""
Tests for L{xmantissa.people.PersonDetailFragment}.
"""
def test_mugshotUploadForm(self):
"""
L{PersonDetailFragment}'s I{mugshotUploadForm} child should return a
L{MugshotUploadForm}.
"""
person = StubPerson([])
person.organizer = StubOrganizer()
fragment = PersonDetailFragment(person)
(resource, segments) = fragment.locateChild(
None, ('mugshotUploadForm',))
self.assertTrue(isinstance(resource, MugshotUploadForm))
self.assertIdentical(resource.person, person)
def test_getPersonURL(self):
"""
Test that L{getPersonURL} returns the URL for the Person.
"""
person = StubPerson([])
person.organizer = StubOrganizer()
self.assertEqual(getPersonURL(person), "/person/Alice")
def test_mugshotChild(self):
"""
L{PersonDetailFragment}'s I{mugshot} child should return a
L{MugshotResource} wrapping the result of calling
L{Person.getMugshot}.
"""
theMugshot = object()
class StubMugshotPerson(StubPerson):
organizer = StubOrganizer()
def getMugshot(self):
return theMugshot
fragment = PersonDetailFragment(StubMugshotPerson([]))
(res, segments) = fragment.locateChild(None, ('mugshot',))
self.assertTrue(isinstance(res, MugshotResource))
self.assertIdentical(res.mugshot, theMugshot)
self.assertEqual(segments, ())
class PersonScrollingFragmentTests(unittest.TestCase):
"""
Tests for L{PersonScrollingFragment}.
"""
def setUp(self):
"""
Make an L{Organizer}.
"""
self.store = Store()
self.organizer = Organizer(store=self.store)
installOn(self.organizer, self.store)
def test_scrollingAttributes(self):
"""
L{PersonScrollingFragment} should have the attributes its base class
wants to use.
"""
baseConstraint = object()
fragment = PersonScrollingFragment(
self.organizer, baseConstraint, Person.name,
StubTranslator(None, None))
self.assertIdentical(fragment.baseConstraint, baseConstraint)
self.assertIdentical(
fragment.currentSortColumn.sortAttribute(), Person.name)
self.assertIdentical(fragment.itemType, Person)
self.assertEqual(len(fragment.columns), 3)
self.assertEqual(fragment.columns['name'], Person.name)
self.assertTrue(isinstance(fragment.columns['vip'], UnsortableColumn))
self.assertEqual(fragment.columns['vip'].attribute, Person.vip)
self.assertTrue(
isinstance(fragment.columns['mugshotURL'], MugshotURLColumn))
self.assertIdentical(
fragment.columns['mugshotURL'].organizer, self.organizer)
def test_initialArguments(self):
"""
L{PersonScrollingFragment.getInitialArguments} should include the
store owner person's name in its result.
"""
storeOwnerPersonName = u'Store Owner'
self.organizer.storeOwnerPerson.name = storeOwnerPersonName
fragment = PersonScrollingFragment(
self.organizer, object(), Person.name, StubTranslator(None, None))
self.assertEqual(
fragment.getInitialArguments(),
(ScrollingElement.getInitialArguments(fragment)
+ [storeOwnerPersonName]))
def test_filterByFilter(self):
"""
L{PersonScrollingFragment.filterByFilter} should change the scrolltable's
base constraint to the query comparison of the named filter.
"""
queryComparison = object()
class MockPeopleFilter:
def getPeopleQueryComparison(_self, store):
self.assertIdentical(store, self.store)
return queryComparison
fragment = PersonScrollingFragment(
self.organizer,
object(),
Person.name,
StubTranslator(None, None))
fragment.filters = {
u'test_filterByFilter': MockPeopleFilter()}
filterByFilter = expose.get(fragment, 'filterByFilter')
filterByFilter(u'test_filterByFilter')
self.assertIdentical(
fragment.baseConstraint, queryComparison)
class OrganizerFragmentTests(unittest.TestCase):
"""
Tests for L{OrganizerFragment}.
@ivar contactTypes: A list of L{StubContactType} instances which will be
returned by the C{getContactTypes} method of the stub organizer used by
these tests.
@ivar organizer: The L{StubOrganizer} which is used by these tests.
@ivar fragment: An L{OrganizerFragment} to test.
@ivar deletedPeople: A list of the arguments which have been passed to the
C{deletePerson} method of L{organizer}.
"""
def setUp(self):
"""
Create an L{OrganizerFragment} wrapped around a double for
L{Organizer}.
"""
deletedPeople = []
contactTypes = []
self.store = Store()
self.contactTypes = contactTypes
self.organizer = StubOrganizer(
self.store, contactTypes, deletedPeople)
self.fragment = OrganizerFragment(self.organizer)
self.deletedPeople = deletedPeople
def test_head(self):
"""
L{OrganizerFragment.head} should return C{None}.
"""
self.assertIdentical(self.fragment.head(), None)
def test_peopleTable(self):
"""
L{OrganizerFragment}'s I{peopleTable} renderer should return a
L{PersonScrollingFragment}.
"""
peopleTableRenderer = renderer.get(self.fragment, 'peopleTable')
scroller = peopleTableRenderer(None, None)
self.assertTrue(isinstance(scroller, PersonScrollingFragment))
def test_peopleFilters(self):
"""
L{OrganizerFragment}'s I{peopleFilters} renderer should return an
instance of its tag's I{filter} pattern for each filter, except the
first, which should use the I{selected-filter} pattern.
"""
filterNames = list('acyx')
peopleFilters = [record('filterName')(name) for name in filterNames]
self.organizer.peopleFilters = peopleFilters
peopleFiltersRenderer = renderer.get(self.fragment, 'peopleFilters')
tag = div[
div(usedpattern='filter', pattern='filter')[slot('name')],
div(usedpattern='selected-filter',
pattern='selected-filter')[slot('name')]]
patterns = list(peopleFiltersRenderer(None, tag))
self.assertEqual(len(patterns), len(peopleFilters))
selectedPattern = patterns.pop(0)
selectedFilterName = filterNames.pop(0)
self.assertEqual(
selectedPattern.slotData, {'name': selectedFilterName})
self.assertEqual(
selectedPattern.attributes['usedpattern'], 'selected-filter')
for (pattern, filterName) in zip(patterns, filterNames):
self.assertEqual(pattern.slotData, {'name': filterName})
self.assertEqual(pattern.attributes['usedpattern'], 'filter')
def test_getAddPerson(self):
"""
L{OrganizerFragment.getAddPerson} should return an
L{AddPersonFragment}.
"""
addPersonFragment = expose.get(self.fragment, 'getAddPerson')()
self.assertTrue(isinstance(addPersonFragment, AddPersonFragment))
self.assertIdentical(addPersonFragment.organizer, self.organizer)
self.assertIdentical(addPersonFragment.fragmentParent, self.fragment)
def test_getImportPeople(self):
"""
L{OrganizerFragment.getImportPeople} should return an
L{ImportPeopleWidget}.
"""
widget = expose.get(self.fragment, 'getImportPeople')()
self.assertTrue(isinstance(widget, ImportPeopleWidget))
self.assertIdentical(widget.organizer, self.organizer)
self.assertIdentical(widget.fragmentParent, self.fragment)
def test_getEditPerson(self):
"""
L{OrganizerFragment.getEditPerson} should return an
L{EditPersonView}.
"""
name = u'testuser'
person = Person()
self.organizer.people[name] = person
editPersonFragment = expose.get(
self.fragment, 'getEditPerson')(name)
self.assertTrue(isinstance(editPersonFragment, EditPersonView))
self.assertIdentical(editPersonFragment.person, person)
self.assertIdentical(editPersonFragment.fragmentParent, self.fragment)
def test_deletePerson(self):
"""
L{OrganizerFragment.deletePerson} should call
L{Organizer.deletePerson}.
"""
name = u'testuser'
person = Person()
self.organizer.people[name] = person
expose.get(self.fragment, 'deletePerson', None)(name)
self.assertEqual(self.fragment.organizer.deletedPeople, [person])
def test_getPersonPluginWidget(self):
"""
L{OrganizerFragment.getPersonPluginWidget} should return a
L{PersonPluginView} for the named person.
"""
name = u'testuser'
person = Person()
self.organizer.people[name] = person
self.organizer.organizerPlugins = plugins = [object()]
widget = expose.get(
self.fragment, 'getPersonPluginWidget')(name)
self.assertTrue(isinstance(widget, PersonPluginView))
self.assertEqual(widget.plugins, plugins)
self.assertIdentical(widget.person, person)
self.assertIdentical(widget.fragmentParent, self.fragment)
def test_initialArgumentsNoInitialPerson(self):
"""
When L{Organizer.initialPerson} is C{None},
L{Organizer.getInitialArguments} should be a one-element tuple
containing the name of the store owner person.
"""
storeOwnerPersonName = u'Alice'
self.organizer.storeOwnerPerson = Person(
name=storeOwnerPersonName)
self.assertEqual(
self.fragment.getInitialArguments(),
(storeOwnerPersonName,))
def test_initialArgumentsInitialPerson(self):
"""
When L{Organizer.initialPerson} is not C{None},
L{Organizer.getInitialArguments} should be a three-element tuple
containing the name of the store owner person, the name of the initial
person, and the initial view state.
"""
storeOwnerPersonName = u'Alice'
initialPersonName = u'Bob'
initialState = ORGANIZER_VIEW_STATES.EDIT
self.organizer.storeOwnerPerson = Person(
name=storeOwnerPersonName)
initialPerson = Person(name=initialPersonName)
fragment = OrganizerFragment(
self.organizer, initialPerson, initialState)
self.assertEqual(
fragment.getInitialArguments(),
(storeOwnerPersonName, initialPersonName, initialState))
class OrganizerFragmentBeforeRenderTestCase(unittest.TestCase):
"""
Tests for L{OrganizerFragment.beforeRender}. These tests require more
expensive setup than is provided by L{OrganizerFragmentTests}.
"""
def setUp(self):
"""
Make a substore with a L{PrivateApplication} and an L{Organizer}.
"""
self.siteStore = Store(filesdir=self.mktemp())
def siteStoreTxn():
Mantissa().installSite(self.siteStore, u"example.com", u"", False)
userAccount = Create().addAccount(
self.siteStore,
u'testuser',
u'example.com',
u'password')
self.userStore = userAccount.avatars.open()
self.siteStore.transact(siteStoreTxn)
def userStoreTxn():
self.organizer = Organizer(store=self.userStore)
installOn(self.organizer, self.userStore)
self.fragment = OrganizerFragment(self.organizer)
self.userStore.transact(userStoreTxn)
def _makeContextWithRequestArgs(self, args):
"""
Make a context which contains a request with args C{args}.
"""
request = FakeRequest()
request.args = args
return context.PageContext(
tag=None, parent=context.RequestContext(
tag=request))
def test_validPersonAndValidState(self):
"""
L{OrganizerFragment.beforeRender} should correctly initialize the
L{OrganizerFragment} if a valid person name and valid initial view
state are present in the query args.
"""
person = self.organizer.createPerson(u'Andr\xe9')
self.fragment.beforeRender(
self._makeContextWithRequestArgs(
{'initial-person': [person.name.encode('utf-8')],
'initial-state': [ORGANIZER_VIEW_STATES.EDIT.encode('utf-8')]}))
self.assertIdentical(self.fragment.initialPerson, person)
self.assertEqual(self.fragment.initialState, ORGANIZER_VIEW_STATES.EDIT)
def test_invalidPersonAndValidState(self):
"""
L{OrganizerFragment.beforeRender} shouldn't modify the
L{OrganizerFragment} if an invalid person name and valid view state
are present in the query args.
"""
self.fragment.beforeRender(
self._makeContextWithRequestArgs(
{'initial-person': ['Alice'],
'initial-state': [ORGANIZER_VIEW_STATES.EDIT.encode('utf-8')]}))
self.assertIdentical(self.fragment.initialPerson, None)
self.assertIdentical(self.fragment.initialState, None)
def test_validPersonAndInvalidState(self):
"""
Similar to L{test_invalidPersonAndValidState}, but for a valid person
name and invalid initial view state.
"""
person = self.organizer.createPerson(u'Alice')
for args in [{'initial-person': ['Alice']},
{'initial-person': ['Alice'],
'initial-state': [u'\xe9dit'.encode('utf-8')]}]:
self.fragment.beforeRender(self._makeContextWithRequestArgs(args))
self.assertIdentical(self.fragment.initialPerson, None)
self.assertIdentical(self.fragment.initialState, None)
class AddPersonFragmentTests(unittest.TestCase):
"""
Tests for L{AddPersonFragment}.
"""
def test_jsClass(self):
"""
L{AddPersonFragment} should have a customized C{jsClass} in order to
expose methods on its L{LiveForm}.
"""
self.assertEqual(AddPersonFragment.jsClass, u'Mantissa.People.AddPerson')
def test_renders(self):
"""
An L{AddPersonFragment} should be renderable.
"""
user = emptyMantissaUserStore()
installOn(PrivateApplication(store=user), user)
organizer = Organizer(store=user)
fragment = AddPersonFragment(organizer)
result = renderLiveFragment(fragment)
self.assertTrue(isinstance(result, str))
def test_addPersonFormRenderer(self):
"""
L{AddPersonFragment.render_addPersonForm} should return a L{LiveForm}
with a customized I{jsClass} attribute.
"""
store = Store()
organizer = Organizer(store=store)
fragment = AddPersonFragment(organizer)
form = fragment.render_addPersonForm(None, None)
self.assertTrue(isinstance(form, LiveForm))
self.assertEqual(form.jsClass, u'Mantissa.People.AddPersonForm')
class ImportPeopleWidgetTests(unittest.TestCase):
"""
Tests for L{ImportPeopleWidget}.
"""
def test_parseAddresses(self):
"""
L{_parseAddresses} should extract valid-looking names and addresses.
"""
def _assert(input, expected):
self.assertEqual(ImportPeopleWidget._parseAddresses(input),
expected)
# Empty
for s in [u'', u' ', u'<>', u',', u'<>, <>']:
_assert(s, [])
# Name defaulting to local-part
_assert(u'alice@example.com', [(u'alice', u'alice@example.com')])
_assert(u' alice@example.com, ', [(u'alice', u'alice@example.com')])
# Separators and display names
for sep in u', ', u'\n', u', foo <>, ':
_assert(sep.join([u'alice@example.com', u'bob@example.com']),
[(u'alice', u'alice@example.com'),
(u'bob', u'bob@example.com')])
_assert(sep.join([u'<Alice.Allison@example.com>',
u'Alice Allison <alice@example.com>',
u'"Bob Boberton" <bob@example.com>']),
[(u'Alice.Allison', u'Alice.Allison@example.com'),
(u'Alice Allison', u'alice@example.com'),
(u'Bob Boberton', u'bob@example.com')])
def test_importAddresses(self):
"""
L{ImportPeopleWidget.importAddresses} should create entries for the
given addresses (ignoring names/addresses that exist already).
"""
store = Store()
organizer = Organizer(store=store)
owner = organizer.storeOwnerPerson
importFragment = ImportPeopleWidget(organizer)
self.assertEqual(list(store.query(Person)), [owner])
importFragment.importAddresses([])
self.assertEqual(list(store.query(Person)), [owner])
addresses = [(u'Alice', u'alice@example.com'),
(u'Bob', u'bob@example.com')]
# Import twice to check idempotency, and make sure both the name and
# address are checked.
for input in [addresses, addresses, [(u'Alice', u'chaff'),
(u'chaff', u'bob@example.com')]]:
importFragment.importAddresses(input)
self.assertEqual(set((p.name, p.getEmailAddress())
for p in store.query(Person)
if p is not owner),
set(addresses))
class ReadOnlyContactInfoViewTestCase(unittest.TestCase):
"""
Tests for L{ReadOnlyContactInfoView}.
"""
def test_personSummary(self):
"""
The I{personSummary} renderer should return a L{PersonSummaryView}
for the wrapped person.
"""
person = Person()
personSummary = renderer.get(
ReadOnlyContactInfoView(person),
'personSummary',
None)
fragment = personSummary(None, None)
self.assertTrue(isinstance(fragment, PersonSummaryView))
self.assertIdentical(fragment.person, person)
def test_contactInfo(self):
"""
The I{contactInfo} renderer should return the suitiably-transformed
result of calling L{Organizer.groupReadOnlyViews}.
"""
person = StubPerson([])
readOnlyViews = [div(), div(), div()]
person.organizer = StubOrganizer(
groupedReadOnlyViews={
'One': readOnlyViews[:1],
None: readOnlyViews[1:]})
contactInfo = renderer.get(
ReadOnlyContactInfoView(person),
'contactInfo',
None)
tag = div[
div(pattern='contact-group')[
slot('name'), slot('views')]]
result = list(contactInfo(None, tag))
self.assertEqual(
person.organizer.groupedReadOnlyViewPeople, [person])
self.assertEqual(len(result), 2)
grouplessReadOnlyViews = result[0]
self.assertEqual(len(grouplessReadOnlyViews), 2)
self.assertEqual(grouplessReadOnlyViews, readOnlyViews[1:])
contactGroupPattern = result[1]
self.assertEqual(
contactGroupPattern.slotData['name'], 'One')
self.assertEqual(
contactGroupPattern.slotData['views'], readOnlyViews[:1])
class PersonSummaryViewTestCase(unittest.TestCase):
"""
Tests for L{PersonSummaryView}.
"""
def test_mugshotURL(self):
"""
The I{mugshotURL} renderer should return the correct URL if the person
has a mugshot.
"""
store = Store(self.mktemp())
organizer = Organizer(store=store)
installOn(organizer, store)
person = Person(store=store, organizer=organizer)
Mugshot(
store=store,
person=person,
body=store.newFilePath(u'body'),
smallerBody=store.newFilePath(u'smallerBody'),
type=u'image/jpeg')
mugshotURL = renderer.get(
PersonSummaryView(person), 'mugshotURL', None)
self.assertEqual(
mugshotURL(None, None),
organizer.linkToPerson(person) + '/mugshot/smaller')
def test_mugshotURLNoMugshot(self):
"""
The I{mugshotURL} renderer should return the correct URL if the person
has no mugshot.
"""
store = Store()
organizer = Organizer(store=store)
installOn(organizer, store)
person = Person(store=store, organizer=organizer)
mugshotURL = renderer.get(
PersonSummaryView(person),
'mugshotURL',
None)
self.assertEqual(
mugshotURL(None, None),
organizer.linkToPerson(person) + '/mugshot/smaller')
def test_personName(self):
"""
The I{personName} renderer should return the display name of the
wrapped person.
"""
name = u'A Person Name'
personName = renderer.get(
PersonSummaryView(Person(store=Store(), name=name)),
'personName',
None)
self.assertEqual(personName(None, None), name)
def test_vipStatus(self):
"""
The I{vipStatus} renderer should return its tag if the wrapped person
is a VIP.
"""
vipStatus = renderer.get(
PersonSummaryView(Person(store=Store(), vip=True)),
'vipStatus',
None)
tag = object()
self.assertIdentical(vipStatus(None, tag), tag)
def test_vipStatusNoVip(self):
"""
The I{vipStatus} renderer should return the empty string if the
wrapped person is not a VIP.
"""
vipStatus = renderer.get(
PersonSummaryView(Person(store=Store(), vip=False)),
'vipStatus',
None)
self.assertEqual(vipStatus(None, None), '')
class EditPersonViewTests(unittest.TestCase):
"""
Tests for L{EditPersonView}.
"""
def setUp(self):
"""
Create an L{EditPersonView} wrapped around a stub person and stub organizer.
"""
self.contactType = StubContactType((), None, None)
self.contactParameter = ListChangeParameter(
u'blah', [], [], modelObjects=[])
self.person = StubPerson(None)
self.person.organizer = self.organizer = StubOrganizer(
contactTypes=[self.contactType],
contactEditorialParameters={self.person: [
(self.contactType, self.contactParameter)]})
self.view = EditPersonView(self.person)
def test_editContactItems(self):
"""
L{EditPersonView.editContactItems} should take a dictionary mapping
parameter names to values and update its person's contact information
in a transaction.
"""
transactions = []
transaction = record('function args kwargs')
class StubStore(object):
def transact(self, f, *a, **kw):
transactions.append(transaction(f, a, kw))
self.person.store = StubStore()
contactType = StubContactType((), None, None)
self.view.contactTypes = {'contactTypeName': contactType}
MODEL_OBJECT = object()
# Submit the form
submission = object()
self.view.editContactItems(u'nick', contactTypeName=submission)
# A transaction should happen, and nothing should change until it's
# run.
self.assertEqual(len(transactions), 1)
self.assertEqual(self.person.name, StubPerson.name)
self.assertEqual(contactType.editedContacts, [])
# Okay run it.
transactions[0].function(
*transactions[0].args, **transactions[0].kwargs)
self.assertEqual(
self.person.organizer.editedPeople,
[(self.person, u'nick', [(contactType, submission)])])
def test_editorialContactForms(self):
"""
L{EditPersonView.editorialContactForms} should return an instance of
L{EditorialContactForms} for the wrapped L{Person} as a child of the
tag it is passed.
"""
editorialContactForms = renderer.get(
self.view, 'editorialContactForms')
tag = div()
forms = editorialContactForms(None, tag)
self.assertEqual(forms.tagName, 'div')
self.assertEqual(forms.attributes, {})
self.assertEqual(len(forms.children), 1)
form = forms.children[0]
self.assertTrue(isinstance(form, LiveForm))
self.assertEqual(form.callable, self.view.editContactItems)
self.assertEqual(form.parameters[1:], [self.contactParameter])
self.assertIdentical(form.fragmentParent, self.view)
self.assertEqual(
self.view.contactTypes[form.parameters[1].name],
self.contactType)
def test_mugshotFormURL(self):
"""
The I{mugshotFormURL} renderer of L{EditPersonView} should return the
correct URL.
"""
mugshotFormURLRenderer = renderer.get(
self.view, 'mugshotFormURL')
self.assertEqual(
mugshotFormURLRenderer(None, None),
'/person/Alice/mugshotUploadForm')
def test_renderable(self):
"""
L{EditPersonView} should be renderable in the typical manner.
"""
# XXX I have no hope of asserting anything meaningful about the return
# value of renderLiveFragment. However, even calling it at all pointed
# out that: there was no docFactory; the fragmentName didn't reference
# an extant template; the LiveForm had no fragment parent (for which I
# also updated test_editorialContactForms to do a direct
# assertion). -exarkun
user = emptyMantissaUserStore()
installOn(PrivateApplication(store=user), user)
organizer = Organizer(store=user)
installOn(organizer, user)
person = organizer.createPerson(u'Alice')
markup = renderLiveFragment(EditPersonView(person))
self.assertIn(self.view.jsClass, markup)
def test_makeEditorialLiveForms(self):
"""
L{EditPersonView.makeEditorialLiveForms} should make a single liveform
with the correct parameters if no contact types specify custom edit
forms.
"""
liveForms = self.view.makeEditorialLiveForms()
self.assertEqual(len(liveForms), 1)
liveForm = liveForms[0]
self.assertEqual(len(liveForm.parameters), 2)
nameParam = liveForm.parameters[0]
self.assertEqual(nameParam.name, 'nickname')
self.assertEqual(nameParam.default, self.person.name)
self.assertEqual(nameParam.type, TEXT_INPUT)
contactParam = liveForm.parameters[1]
self.assertIdentical(contactParam, self.contactParameter)
def test_makeEditorialLiveFormsCustom(self):
"""
Contact types with custom forms should have their forms included in
the result of L{EditPersonView.makeEditorialLiveForms}.
"""
theEditorialForm = LiveForm(lambda: None, ())
self.contactType.editorialForm = theEditorialForm
liveForms = self.view.makeEditorialLiveForms()
self.assertEqual(len(liveForms), 2)
liveForm = liveForms[1]
self.assertIdentical(liveForm, theEditorialForm)
self.assertEqual(self.contactType.editedContacts, [self.person])
def test_makeEditorialLiveFormsNoMethod(self):
"""
L{EditPersonView.makeEditorialLiveForms} should work with contact
types which don't define a C{getEditFormForPerson}.
"""
self.contactType.getEditFormForPerson = None
(form,) = self.view.makeEditorialLiveForms()
self.assertIdentical(
form.parameters[1], self.contactParameter)
class StoreOwnerPersonTestCase(unittest.TestCase):
"""
Tests for L{Organizer._makeStoreOwnerPerson} and related functionality.
"""
def test_noStore(self):
"""
L{Organizer.storeOwnerPerson} should be C{None} if the L{Organizer}
doesn't live in a store.
"""
self.assertIdentical(Organizer().storeOwnerPerson, None)
def test_emptyStore(self):
"""
Test that when an L{Organizer} is inserted into an empty store,
L{Organizer.storeOwnerPerson} is set to a L{Person} with an empty
string for a name.
"""
store = Store()
organizer = Organizer(store=store)
self.failUnless(organizer.storeOwnerPerson)
self.assertIdentical(organizer.storeOwnerPerson.organizer, organizer)
self.assertEqual(organizer.storeOwnerPerson.name, u'')
def test_differentStoreOwner(self):
"""
Test that when an L{Organizer} is passed a C{storeOwnerPerson}
explicitly, it does not create any additional L{Person} items.
"""
store = Store()
person = Person(store=store)
organizer = Organizer(store=store, storeOwnerPerson=person)
self.assertIdentical(store.findUnique(Person), person)
self.assertIdentical(organizer.storeOwnerPerson, person)
def test_storeOwnerDeletion(self):
"""
Verify that we fail if we attempt to delete
L{Organizer.storeOwnerPerson}.
"""
store = Store()
organizer = Organizer(store=store)
self.assertRaises(
DeletionDisallowed, organizer.storeOwnerPerson.deleteFromStore)
def test_personNameFromUserInfo(self):
"""
The L{Person} created to be the store owner by L{Organizer} should have
its I{name} attribute set to a string computed from the L{UserInfo}
item.
"""
name = u'Joe Rogers'
store = Store()
UserInfo(store=store, realName=name)
organizer = Organizer(store=store)
self.assertEqual(organizer.storeOwnerPerson.name, name)
def test_personEmailFromUserInfo(self):
"""
The L{Person} created to be the store owner by L{Organizer} should have
an L{EmailAddress} item created with an address computed from the
available 'email' login methods.
(In the course of doing so, make sure that it creates them correctly
and notifies the organizer plugins of the L{EmailAddress} item's
existence.)
"""
siteStore = Store()
ls = LoginSystem(store=siteStore)
# It should NOT consider the login method created implicitly as a
# result of the signup process. Too bad that actually defaults to the
# 'email' protocol!
acct = ls.addAccount(u'jim.bean',
u'service.example.com',
u'nevermind',
internal=True)
userStore = acct.avatars.open()
acct.addLoginMethod(localpart=u'jim',
domain=u'bean.example.com',
protocol=u'email',
verified=False,
internal=False)
stub = StubOrganizerPlugin(store=userStore)
# This is _slightly_ unrealistic for real-world usage, because
# generally L{IOrganizerPlugin} providers will also just happen to
# depend on the organizer (and therefore won't get notified of this
# first item). However, nothing says they *need* to depend on it, and
# if they don't, the contact items should be created the proper,
# suggested way.
userStore.powerUp(stub, IOrganizerPlugin)
organizer = Organizer(store=userStore)
person = organizer.storeOwnerPerson
self.assertEqual(list(person.getEmailAddresses()),
[u'jim@bean.example.com'])
self.assertEqual(stub.createdPeople, [organizer.storeOwnerPerson])
self.assertEqual(stub.createdContactItems,
[userStore.findUnique(EmailAddress)])
class MugshotURLColumnTestCase(unittest.TestCase):
"""
Tests for L{MugshotURLColumn}.
"""
def test_interface(self):
"""
L{MugshotURLColumn} should provide L{IColumn}.
"""
self.assertNotIdentical(
IColumn(MugshotURLColumn(None, None), None),
None)
def test_extractValue(self):
"""
L{MugshotURLColumn.extractValue} should return the correct URL.
"""
organizer = StubOrganizer()
person = Person(name=u'test_extractValue')
self.assertEqual(
MugshotURLColumn(organizer, None).extractValue(None, person),
organizer.linkToPerson(person) + u'/mugshot/smaller')
def test_sortAttribute(self):
"""
L{MugshotURLColumn.sortAttribute} should return C{None}.
"""
self.assertIdentical(
MugshotURLColumn(None, None).sortAttribute(), None)
def test_getType(self):
"""
L{MugshotURLColumn.getType} should return C{text}.
"""
self.assertEqual(MugshotURLColumn(None, None).getType(), 'text')
def test_toComparableValue(self):
"""
L{MugshotURLColumn.toComparableValue} should throw
L{NotImplementedError}.
"""
self.assertRaises(
NotImplementedError,
MugshotURLColumn(None, None).toComparableValue,
u'/person/xyz/mugshot/smaller')
class ContactInfoOrganizerPluginTestCase(unittest.TestCase):
"""
Tests for L{ContactInfoOrganizerPlugin}.
"""
def test_name(self):
"""
L{ContactInfoOrganizerPlugin.name} should be set.
"""
self.assertEqual(ContactInfoOrganizerPlugin.name, u'Contact')
def test_personalize(self):
"""
L{ContactInfoOrganizerPlugin.personalize} should return a
L{ReadOnlyContactInfoView}.
"""
plugin = ContactInfoOrganizerPlugin()
person = Person()
result = plugin.personalize(person)
self.assertTrue(isinstance(result, ReadOnlyContactInfoView))
self.assertIdentical(result.person, person)
def test_getContactTypes(self):
"""
L{ContactInfoOrganizerPlugin} shouldn't supply any contact types.
"""
plugin = ContactInfoOrganizerPlugin()
self.assertEqual(plugin.getContactTypes(), ())
def test_getPeopleFilters(self):
"""
L{ContactInfoOrganizerPlugin} shouldn't supply any people filters.
"""
plugin = ContactInfoOrganizerPlugin()
self.assertEqual(plugin.getPeopleFilters(), ())
class PersonPluginViewTestCase(unittest.TestCase):
"""
Tests for L{PersonPluginView}.
"""
def _doGetPluginWidgetTest(self, personalization):
"""
Set up a L{PersonPluginView} and try to request the given personalized
view from it using I{getPluginWidget}, returning the result.
"""
person = Person()
thePlugin = StubOrganizerPlugin(
store=Store(), name=u'test_getPluginWidget2')
thePlugin.personalization = personalization
plugins = [StubOrganizerPlugin(name=u'test_getPluginWidget1'),
thePlugin]
view = PersonPluginView(plugins, person)
getPluginWidget = expose.get(view, 'getPluginWidget')
result = getPluginWidget('test_getPluginWidget2')
self.assertEqual(thePlugin.personalizedPeople, [person])
return result
def test_getPluginWidget(self):
"""
L{PersonPluginView}'s I{getPluginWidget} remote method should return
the appropriate view.
"""
personalization = LiveElement()
self.assertIdentical(
self._doGetPluginWidgetTest(personalization),
personalization)
def test_getPluginWidgetLegacy(self):
"""
L{PersonPluginView}'s I{getPluginWidget} remote method should wrap the
view with L{_ElementWrapper} if it's not a L{LiveElement}.
"""
personalization = Element()
result = self._doGetPluginWidgetTest(personalization)
self.assertTrue(isinstance(result, _ElementWrapper))
self.assertIdentical(result.wrapped, personalization)
def test_pluginTabbbedPane(self):
"""
L{PersonPluginView}'s I{pluginTabbedPane} renderer return a
correctly-configured L{tabbedPane.TabbedPaneFragment}.
"""
store = Store()
pluginNames = [
u'test_pluginTabbbedPane1', u'test_pluginTabbbedPane2']
view = PersonPluginView(
[StubOrganizerPlugin(
store=store, name=name) for name in pluginNames],
Person())
view.plugins[0].personalization = personalization = LiveElement()
pluginTabbedPaneRenderer = renderer.get(
view, 'pluginTabbedPane', None)
tag = div[div(pattern='pane-body',
secret='test_pluginTabbbedPane')]
frag = pluginTabbedPaneRenderer(None, tag)
self.assertTrue(isinstance(frag, tabbedPane.TabbedPaneFragment))
self.assertEqual(frag.jsClass, u'Mantissa.People.PluginTabbedPane')
(tabNames, paneBodies) = zip(*frag.pages)
self.assertEqual(list(tabNames), pluginNames)
self.assertIdentical(paneBodies[0], personalization)
self.assertEqual(
paneBodies[1].attributes['secret'],
'test_pluginTabbbedPane')
class ElementWrapperTestCase(unittest.TestCase):
"""
Tests for L{_ElementWrapper}.
"""
def test_element(self):
"""
L{_ElementWrapper}'s I{element} renderer should render the wrapped
element.
"""
elem = Element()
live = _ElementWrapper(elem)
elementRenderer = renderer.get(live, 'element', None)
self.assertIdentical(elementRenderer(None, None), elem)
class SimpleReadOnlyViewTestCase(unittest.TestCase):
"""
Tests for L{SimpleReadOnlyView}.
"""
def test_attributeName(self):
"""
L{SimpleReadOnlyView}'s C{attributeName} renderer should return the
correct value.
"""
view = SimpleReadOnlyView(Person.name, Person())
attributeNameRenderer = renderer.get(view, 'attributeName')
self.assertEqual(
attributeNameRenderer(None, None),
nameToLabel('Person'))
def test_attributeValue(self):
"""
L{SimpleReadOnlyView}'s C[attributeValue} renderer should return the
correct value.
"""
name = u'test_attributeValue'
view = SimpleReadOnlyView(Person.name, Person(name=name))
attributeValueRenderer = renderer.get(view, 'attributeValue')
self.assertEqual(attributeValueRenderer(None, None), name)
| twisted/mantissa | xmantissa/test/test_people.py | Python | mit | 124,903 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
import cgi
import codecs
import cPickle
import decimal
import os
import traceback
from datetime import datetime
from distutils.util import strtobool
from optparse import OptionParser
from time import mktime
from voltdbclient import VoltColumn, VoltTable, FastSerializer
__quiet = True
def highlight(s, flag):
if not isinstance(s, basestring):
s = str(s)
return flag and "<span style=\"color: red\">%s</span>" % (s) or s
def as_html_unicode_string(s):
if isinstance(s, list):
return '[' + ", ".join(as_html_unicode_string(x) for x in s) + ']'
elif isinstance(s, basestring):
return "'" + s.encode('ascii', 'xmlcharrefreplace') + "'"
else:
return str(s)
def generate_table_str(res, key):
source = res[key].get("Result")
if not source:
return ""
highlights = res.get("highlight")
if isinstance(highlights, list):
key_highlights = highlights
else:
key_highlights = res.get("highlight_" + key)
result = []
result.append(highlight("column count: %d" % (len(source.columns)), "Columns" == highlights))
result.append(highlight("row count: %d" % (len(source.tuples)), "Tuples" == highlights))
result.append("cols: " + ", ".join(map(lambda x: str(x), source.columns)))
result.append("rows -")
if isinstance(key_highlights, list):
for j in xrange(len(source.tuples)):
result.append(highlight(as_html_unicode_string(source.tuples[j]), j in key_highlights))
else:
result.extend(map(lambda x: as_html_unicode_string(x), source.tuples))
tablestr = "<br />".join(result)
return tablestr
def generate_modified_query(cmpdb, sql, modified_sql):
result = ''
mod_sql = modified_sql.get(sql.rstrip(';'), None)
if mod_sql:
result = '<p>Modified SQL query, as sent to ' + str(cmpdb) + ':</p><h2>' + str(mod_sql) + '</h2>'
return result
def generate_detail(name, item, output_dir, cmpdb, modified_sql):
if output_dir == None:
return
details = """
<html>
<head>
<title>Detail of "%s"</title>
<style>
td {width: 50%%}
</style>
</head>
<body>
<h2>%s</h2>
%s
<table cellpadding=3 cellspacing=1 border=1>
<tr>
<th>VoltDB Response</th>
<th>%s Response</th>
</tr>
<tr>
<td>%s</td>
<td>%s</td>
</tr>
<tr>
<td>%s</td>
<td>%s</td>
</tr>
<tr>
<td>%s</td>
<td>%s</td>
</tr>
</table>
</body>
</html>
""" % (cgi.escape(item["SQL"]).encode('ascii', 'xmlcharrefreplace'),
cgi.escape(item["SQL"]).encode('ascii', 'xmlcharrefreplace'),
generate_modified_query(cmpdb, cgi.escape(item["SQL"]).encode('ascii', 'xmlcharrefreplace'), modified_sql),
cmpdb,
highlight(item["jni"]["Status"], "Status" == item.get("highlight")),
highlight(item["cmp"]["Status"], "Status" == item.get("highlight")),
item["jni"].get("Info") or "",
item["cmp"].get("Info") or "",
generate_table_str(item, "jni"),
generate_table_str(item, "cmp") )
filename = "%s.html" % (item["id"])
fd = open(os.path.join(output_dir, filename), "w")
fd.write(details.encode("utf-8"))
fd.close()
return filename
def safe_print(s):
if not __quiet:
print s
def print_section(name, mismatches, output_dir, cmpdb, modified_sql):
result = """
<h2>%s: %d</h2>
<table cellpadding=3 cellspacing=1 border=1>
<tr>
<th>ID</th>
<th>SQL Statement</th>
<th>VoltDB Status</th>
<th>%s Status</th>
</tr>
""" % (name, len(mismatches), cmpdb)
temp = []
for i in mismatches:
safe_print(i["SQL"])
detail_page = generate_detail(name, i, output_dir, cmpdb, modified_sql)
jniStatus = i["jni"]["Status"]
if jniStatus < 0:
jniStatus = "Error: " + `jniStatus`
cmpdbStatus = i["cmp"]["Status"]
if cmpdbStatus < 0:
cmpdbStatus = "Error: " + `cmpdbStatus`
temp.append("""
<tr>
<td>%s</td>
<td><a href="%s">%s</a></td>
<td>%s</td>
<td>%s</td>
</tr>""" % (i["id"],
detail_page,
cgi.escape(i["SQL"]).encode('ascii', 'xmlcharrefreplace'),
jniStatus,
cmpdbStatus))
result += ''.join(temp)
result += """
</table>
"""
return result
def time_diff_close_enough(time1, time2, within_minutes):
"""Test whether two datetimes (TIMESTAMP's) are:
1. within a specified number of minutes of each other; and
2. within a specified number (the same number) of minutes of right now.
If both are true, then they are deemed to be "close enough", on the
assumption that they were each set to NOW() or CURRENT_TIMESTAMP(), and
the difference is because VoltDB and its comparison database (HSQL or
PostgreSQL) called that function at slightly different times.
"""
time_diff_in_minutes = (time1 - time2).total_seconds() / 60
if abs(time_diff_in_minutes) > within_minutes:
return False
time_diff_in_minutes = (time2 - datetime.now()).total_seconds() / 60
if abs(time_diff_in_minutes) > within_minutes:
return False
return True
def is_different(x, cntonly, within_minutes):
"""Notes the attributes that are different. Since the whole table will be
printed out as a single string.
the first line is column count,
the second line is row count,
the third line is column names and types,
followed by rows.
"""
jni = x["jni"]
cmp = x["cmp"]
# JNI returns a variety of negative error result values that we
# can't easily match with the HSqlDB backend. Reject only pairs
# of status values where one of them wasn't an error.
if jni["Status"] != cmp["Status"]:
if int(jni["Status"]) > 0 or int(cmp["Status"]) > 0:
x["highlight"] = "Status"
# print "DEBUG is_different -- one error (0 or less)"
return True
# print "DEBUG is_different -- just different errors (0 or less)"
return False;
if int(jni["Status"]) <= 0:
# print "DEBUG is_different -- same error (0 or less)"
return False;
# print "DEBUG is_different -- same non-error Status? : ", jni["Status"]
jniResult = jni["Result"]
cmpResult = cmp["Result"]
if (not jniResult) or (not cmpResult):
x["highlight"] = "Result"
# print "DEBUG is_different -- lacked expected result(s)"
return True
# Disable column type checking for now because VoltDB and HSqlDB don't
# promote int types in the same way.
# if jniResult.columns != cmpResult.columns:
# x["highlight"] = "Columns"
# return True
jniColumns = jniResult.columns
cmpColumns = cmpResult.columns
nColumns = len(jniColumns)
if nColumns != len(cmpColumns):
x["highlight"] = "Columns"
return True;
# print "DEBUG is_different -- got same column lengths? ", nColumns
jniTuples = jniResult.tuples
cmpTuples = cmpResult.tuples
if len(jniTuples) != len(cmpTuples):
x["highlight"] = "Tuples"
x["highlight_jni"] = []
x["highlight_cmp"] = []
# print "DEBUG is_different -- got different numbers of tuples?"
for ii in xrange(len(jniTuples)):
if jniTuples[ii] not in cmpTuples:
x["highlight_jni"].append(ii)
for ii in xrange(len(cmpTuples)):
if cmpTuples[ii] not in jniTuples:
x["highlight_cmp"].append(ii)
return True
# print "DEBUG is_different -- got same numbers of tuples?", len(jniTuples), "namely ", jniTuples
if cntonly:
# print "DEBUG is_different -- count only got FALSE return"
return False # The results are close enough to pass a count-only check
for ii in xrange(len(jniTuples)):
if jniTuples[ii] == cmpTuples[ii]:
continue
# Work around any false value differences caused by default type differences.
# These differences are "properly" ignored by the
# Decimal/float != implementation post-python 2.6.
column_problem = False # hope for the best.
for jj in xrange(nColumns):
if jniTuples[ii][jj] == cmpTuples[ii][jj]:
continue
if (within_minutes and jniColumns[jj].type == FastSerializer.VOLTTYPE_TIMESTAMP and
time_diff_close_enough(jniTuples[ii][jj], cmpTuples[ii][jj], within_minutes)):
continue
if (jniColumns[jj].type == FastSerializer.VOLTTYPE_FLOAT and
cmpColumns[jj].type == FastSerializer.VOLTTYPE_DECIMAL):
if decimal.Decimal(str(jniTuples[ii][jj])) == cmpTuples[ii][jj]:
### print "INFO is_different -- Needed float-to-decimal help"
continue
print "INFO is_different -- float-to-decimal conversion did not help convert between values:" , \
"jni:(" , jniTuples[ii][jj] , ") and cmp:(" , cmpTuples[ii][jj] , ")."
print "INFO is_different -- float-to-decimal conversion stages:" , \
" from jniTuples[ii][jj] of type:" , type(jniTuples[ii][jj]) , \
" to cmpTuples[ii][jj] of type:" , type(cmpTuples[ii][jj]) , \
" via str(jniTuples[ii][jj]):" , str(jniTuples[ii][jj]) , " of type: " , type(str(jniTuples[ii][jj])) , \
" via decimal.Decimal(str(jniTuples[ii][jj])):" , decimal.Decimal(str(jniTuples[ii][jj])) , " of type: " , type(decimal.Decimal(str(jniTuples[ii][jj])))
column_problem = True
if column_problem:
# print "DEBUG is_different -- appending difference highlight? ", ii
if not x.get("highlight"):
x["highlight"] = []
x["highlight"].append(ii)
if x.get("highlight"):
return True
# print "DEBUG is_different -- got FALSE return"
return False
def usage(prog_name):
print """
Usage:
\t%s report [-o output_dir] [-f true/false] [-a]
Generates HTML reports based on the given report files. The generated reports
contain the SQL statements which caused different responses on both backends.
""" % (prog_name)
def generate_html_reports(suite, seed, statements_path, cmpdb_path, jni_path,
output_dir, report_invalid, report_all, extra_stats='',
cmpdb='HSqlDB', modified_sql_path=None,
max_mismatches=0, within_minutes=0, cntonly=False):
if output_dir != None and not os.path.exists(output_dir):
os.makedirs(output_dir)
statements_file = open(statements_path, "rb")
cmpdb_file = open(cmpdb_path, "rb")
jni_file = open(jni_path, "rb")
if modified_sql_path:
modified_sql_file = codecs.open(modified_sql_path, encoding='utf-8')
else:
modified_sql_file = None
modified_sql = {}
failures = 0
count = 0
mismatches = []
crashed = []
voltdb_npes = []
cmpdb_npes = []
invalid = []
all_results = []
try:
while True:
try:
statement = cPickle.load(statements_file)
# print "DEBUG loaded statement ", statement
except EOFError:
break
notFound = False
try:
jni = cPickle.load(jni_file)
except EOFError as e:
notFound = True
jni = {'Status': -99, 'Exception': 'None', 'Result': None,
'Info': '<p style="color:red">RESULT NOT FOUND! Probably due to a VoltDB crash!</p>'}
try:
cdb = cPickle.load(cmpdb_file)
except EOFError as e:
notFound = True
cdb = {'Status': -98, 'Exception': 'None', 'Result': None,
'Info': '<p style="color:red">RESULT NOT FOUND! Probably due to a ' + cmpdb + ' backend crash!</p>'}
count += 1
if int(jni["Status"]) != 1:
failures += 1
if report_invalid:
invalid.append(statement)
statement["jni"] = jni
statement["cmp"] = cdb
if notFound:
crashed.append(statement)
elif is_different(statement, cntonly, within_minutes):
mismatches.append(statement)
if ('NullPointerException' in str(jni)):
voltdb_npes.append(statement)
if ('NullPointerException' in str(cdb)):
cmpdb_npes.append(statement)
if report_all:
all_results.append(statement)
except EOFError as e:
raise IOError("Not enough results for generated statements: %s" % str(e))
statements_file.close()
cmpdb_file.close()
jni_file.close()
if modified_sql_file:
try:
while True:
try:
orig_sql = modified_sql_file.readline().rstrip('\n').replace('original SQL: ', '')
mod_sql = modified_sql_file.readline().rstrip('\n').replace('modified SQL: ', '')
if orig_sql and mod_sql:
modified_sql[cgi.escape(orig_sql).encode('ascii', 'xmlcharrefreplace')] \
= cgi.escape(mod_sql).encode('ascii', 'xmlcharrefreplace')
else:
break
except EOFError as e:
break
modified_sql_file.close()
except Exception as e:
traceback.print_exc()
raise IOError("Unable to read modified SQL file: %s\n %s" % (modified_sql_path, str(e)))
topLines = getTopSummaryLines(cmpdb, False)
currentTime = datetime.now().strftime("%A, %B %d, %I:%M:%S %p")
keyStats = createSummaryInHTML(count, failures, len(mismatches), len(voltdb_npes),
len(cmpdb_npes), extra_stats, seed, max_mismatches)
report = """
<html>
<head>
<title>SQL Coverage Test Report</title>
<style>
h2 {text-transform: uppercase}
</style>
</head>
<body>
<h2>Test Suite Name: %s</h2>
<h4>Random Seed: <b>%d</b></h4>
<p>This report was generated on <b>%s</b></p>
<table border=1>
%s
""" % (suite, seed, currentTime, topLines)
report += """
<tr>%s</tr>
</table>
""" % (keyStats)
def key(x):
return int(x["id"])
if(len(mismatches) > 0):
sorted(mismatches, cmp=cmp, key=key)
report += print_section("Mismatched Statements", mismatches, output_dir, cmpdb, modified_sql)
if(len(crashed) > 0):
sorted(crashed, cmp=cmp, key=key)
report += print_section("Statements Missing Results, due to a Crash<br>(the first one probably caused the crash)", crashed, output_dir, cmpdb, modified_sql)
if(len(voltdb_npes) > 0):
sorted(voltdb_npes, cmp=cmp, key=key)
report += print_section("Statements That Cause a NullPointerException (NPE) in VoltDB", voltdb_npes, output_dir, cmpdb, modified_sql)
if(len(cmpdb_npes) > 0):
sorted(cmpdb_npes, cmp=cmp, key=key)
report += print_section("Statements That Cause a NullPointerException (NPE) in " + cmpdb, cmpdb_npes, output_dir, cmpdb, modified_sql)
if report_invalid and (len(invalid) > 0):
report += print_section("Invalid Statements", invalid, output_dir, cmpdb, modified_sql)
if report_all:
report += print_section("Total Statements", all_results, output_dir, cmpdb, modified_sql)
report += """
</body>
</html>
"""
if output_dir != None:
summary = open(os.path.join(output_dir, "index.html"), "w")
summary.write(report.encode("utf-8"))
summary.close()
results = {}
results["mis"] = len(mismatches)
results["keyStats"] = keyStats
return results
def getTopSummaryLines(cmpdb, includeAll=True):
topLines = "<tr>"
if includeAll:
topLines += "<td rowspan=2 align=center>Test Suite</td>"
topLines += """
<td colspan=5 align=center>SQL Statements</td>
<td colspan=5 align=center>Test Failures</td>
<td colspan=5 align=center>SQL Statements per Pattern</td>
<td colspan=5 align=center>Time (min:sec)</td>
</tr><tr>
<td>Valid</td><td>Valid %%</td>
<td>Invalid</td><td>Invalid %%</td>
<td>Total</td>
<td>Mismatched</td><td>Mismatched %%</td>
<td>NPE's(V)</td><td>NPE's(%s)</td><td>Crashes</td>
<td>Minimum</td><td>Maximum</td><td># Inserts</td><td># Patterns</td><td># Unresolved</td>
<td>Generating SQL</td><td>VoltDB</td><td>%s</td>
""" % (cmpdb[:1], cmpdb)
if includeAll:
topLines += "<td>Comparing</td><td>Total</td>"
topLines += "</tr>"
return topLines
def createSummaryInHTML(count, failures, misses, voltdb_npes, cmpdb_npes,
extra_stats, seed, max_misses=0):
passed = count - (failures + misses)
passed_ps = fail_ps = cell4misPct = cell4misCnt = color = None
count_color = fail_color = ""
if (count < 1):
count_color = " bgcolor=#FFA500" # orange
if (failures == 0):
fail_ps = "0.00%"
else:
percent = (failures/float(max(count, 1))) * 100
fail_ps = str("{0:.2f}".format(percent)) + "%"
if (percent > 50):
fail_color = " bgcolor=#FFFF00" # yellow
if (misses == 0):
cell4misPct = "<td align=right>0.00%</td>"
cell4misCnt = "<td align=right>0</td>"
else:
color = "#FF0000" # red
if misses <= max_misses:
color = "#FFA500" # orange
mis_ps = "{0:.2f}".format((misses/float(max(count, 1))) * 100)
cell4misPct = "<td align=right bgcolor=" + color + ">" + mis_ps + "%</td>"
cell4misCnt = "<td align=right bgcolor=" + color + ">" + str(misses) + "</td>"
misRow = cell4misCnt + cell4misPct
if (voltdb_npes > 0):
color = "#FF0000" # red
voltNpeRow = "<td align=right bgcolor=" + color + ">" + str(voltdb_npes) + "</td>"
else:
voltNpeRow = "<td align=right>0</td>"
if (cmpdb_npes > 0):
color = "#FFA500" # orange
cmpNpeRow = "<td align=right bgcolor=" + color + ">" + str(cmpdb_npes) + "</td>"
else:
cmpNpeRow = "<td align=right>0</td>"
if (passed == count and passed > 0):
passed_ps = "100.00%"
else:
passed_ps = str("{0:.2f}".format((passed/float(max(count, 1))) * 100)) + "%"
stats = """
<td align=right>%d</td>
<td align=right>%s</td>
<td align=right>%d</td>
<td align=right%s>%s</td>
<td align=right%s>%d</td>
%s%s%s%s</tr>
""" % (passed, passed_ps, failures, fail_color, fail_ps, count_color, count, misRow, voltNpeRow, cmpNpeRow, extra_stats)
return stats
def generate_summary(output_dir, statistics, cmpdb='HSqlDB'):
fd = open(os.path.join(output_dir, "index.html"), "w")
topLines = getTopSummaryLines(cmpdb)
content = """
<html>
<head>
<title>SQL Coverage Test Summary</title>
<style>
h2 {text-transform: uppercase}
</style>
</head>
<body>
<h2>SQL Coverage Test Summary Grouped By Suites:</h2>
<h3>Random Seed: %d</h3>
<table border=1>
%s
""" % (statistics["seed"], topLines)
def bullets(name, stats):
return "<tr><td><a href=\"%s/index.html\">%s</a></td>%s</tr>" % \
(name, name, stats)
for suiteName in sorted(statistics.iterkeys()):
if(suiteName != "seed" and suiteName != "totals"):
content += bullets(suiteName, statistics[suiteName])
content += "<tr><td>Totals</td>%s</tr>\n</table>" % statistics["totals"]
content += """
<table border=0><tr><td>Key:</td></tr>
<tr><td align=right bgcolor=#FF0000>Red</td><td>table elements indicate a test failure(s), due to a mismatch between VoltDB and %s results, a crash,
or an NPE in VoltDB (or, an <i>extremely</i> slow test suite).</td></tr>
<tr><td align=right bgcolor=#FFA500>Orange</td><td>table elements indicate a strong warning, for something that should be looked into (e.g. a pattern
that generated no SQL queries, an NPE in %s, or a <i>very</i> slow test suite), but no test failures
(or only "known" failures).</td></tr>
<tr><td align=right bgcolor=#FFFF00>Yellow</td><td>table elements indicate a mild warning, for something you might want to improve (e.g. a pattern
that generated a very large number of SQL queries, or a somewhat slow test suite).</td></tr>
<tr><td align=right bgcolor=#D3D3D3>Gray</td><td>table elements indicate data that was not computed, due to a crash.</td></tr>
<tr><td colspan=2>NPE's(V): number of NullPointerExceptions while running against VoltDB.</td></tr>
<tr><td colspan=2>NPE's(%s): number of NullPointerExceptions while running against %s (likely in VoltDB's %s backend code).</td></tr>
</table>
</body>
</html>
""" % (cmpdb, cmpdb, cmpdb[:1], cmpdb, cmpdb)
fd.write(content)
fd.close()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-o", "--output", dest="output_dir",
help="The directory to put all the output HTML files.")
parser.add_option("-f", "--flag", dest="flag",
help="true to print out matching statements, "
"false to print out mismatching statements")
parser.add_option("-a", action="store_true", dest="all", default=False,
help="Whether or not to report all statements")
(options, args) = parser.parse_args()
if len(args) != 1:
usage(sys.argv[0])
exit(-1)
is_matching = False
fd = open(args[0], "rb")
data = fd.read()
fd.close()
if options.flag != None:
__quiet = False
is_matching = strtobool(options.flag)
generate_html_reports("suite name", data, options.output_dir, options.all, is_matching)
| deerwalk/voltdb | tests/sqlcoverage/SQLCoverageReport.py | Python | agpl-3.0 | 22,887 |
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class TenantUsagesNegativeTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setUpClass(cls):
super(TenantUsagesNegativeTestJSON, cls).setUpClass()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
cls.identity_client = cls._get_identity_admin_client()
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_empty_tenant_id(self):
# Get usage for a specific tenant empty
params = {'start': self.start,
'end': self.end}
self.assertRaises(exceptions.NotFound,
self.adm_client.get_tenant_usage,
'', params)
@test.attr(type=['negative', 'gate'])
def test_get_usage_tenant_with_invalid_date(self):
# Get usage for tenant with invalid date
params = {'start': self.end,
'end': self.start}
self.assertRaises(exceptions.BadRequest,
self.adm_client.get_tenant_usage,
self.client.tenant_id, params)
@test.attr(type=['negative', 'gate'])
def test_list_usage_all_tenants_with_non_admin_user(self):
# Get usage for all tenants with non admin user
params = {'start': self.start,
'end': self.end,
'detailed': int(bool(True))}
self.assertRaises(exceptions.Unauthorized,
self.client.list_tenant_usages, params)
class TenantUsagesNegativeTestXML(TenantUsagesNegativeTestJSON):
_interface = 'xml'
| cloudbase/lis-tempest | tempest/api/compute/admin/test_simple_tenant_usage_negative.py | Python | apache-2.0 | 2,648 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib.parse
from typing import List
from unittest import mock
import pytest
from airflow.models import DagBag, DagRun, Log, TaskInstance
from airflow.utils import dates, timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.www import app
from airflow.www.views import action_has_dag_edit_access
from tests.test_utils.db import clear_db_runs
from tests.test_utils.www import check_content_in_response
EXAMPLE_DAG_DEFAULT_DATE = dates.days_ago(2)
@pytest.fixture(scope="module")
def dagbag():
DagBag(include_examples=True, read_dags_from_db=False).sync_to_db()
return DagBag(include_examples=True, read_dags_from_db=True)
@pytest.fixture(scope="module")
def bash_dag(dagbag):
return dagbag.get_dag('example_bash_operator')
@pytest.fixture(scope="module")
def sub_dag(dagbag):
return dagbag.get_dag('example_subdag_operator')
@pytest.fixture(scope="module")
def xcom_dag(dagbag):
return dagbag.get_dag('example_xcom')
@pytest.fixture(autouse=True)
def dagruns(bash_dag, sub_dag, xcom_dag):
bash_dagrun = bash_dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
data_interval=(EXAMPLE_DAG_DEFAULT_DATE, EXAMPLE_DAG_DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
sub_dagrun = sub_dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
data_interval=(EXAMPLE_DAG_DEFAULT_DATE, EXAMPLE_DAG_DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
xcom_dagrun = xcom_dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
data_interval=(EXAMPLE_DAG_DEFAULT_DATE, EXAMPLE_DAG_DEFAULT_DATE),
start_date=timezone.utcnow(),
state=State.RUNNING,
)
yield bash_dagrun, sub_dagrun, xcom_dagrun
clear_db_runs()
@action_has_dag_edit_access
def some_view_action_which_requires_dag_edit_access(*args) -> bool:
return True
def _check_last_log(session, dag_id, event, execution_date):
logs = (
session.query(
Log.dag_id,
Log.task_id,
Log.event,
Log.execution_date,
Log.owner,
Log.extra,
)
.filter(
Log.dag_id == dag_id,
Log.event == event,
Log.execution_date == execution_date,
)
.order_by(Log.dttm.desc())
.limit(5)
.all()
)
assert len(logs) >= 1
assert logs[0].extra
def test_action_logging_get(session, admin_client):
url = (
f'graph?dag_id=example_bash_operator&'
f'execution_date={urllib.parse.quote_plus(str(EXAMPLE_DAG_DEFAULT_DATE))}'
)
resp = admin_client.get(url, follow_redirects=True)
check_content_in_response('runme_1', resp)
# In mysql backend, this commit() is needed to write down the logs
session.commit()
_check_last_log(
session,
dag_id="example_bash_operator",
event="graph",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
)
def test_action_logging_post(session, admin_client):
form = dict(
task_id="runme_1",
dag_id="example_bash_operator",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
upstream="false",
downstream="false",
future="false",
past="false",
only_failed="false",
)
resp = admin_client.post("clear", data=form)
check_content_in_response(['example_bash_operator', 'Wait a minute'], resp)
# In mysql backend, this commit() is needed to write down the logs
session.commit()
_check_last_log(
session,
dag_id="example_bash_operator",
event="clear",
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
)
def test_calendar(admin_client, dagruns):
url = 'calendar?dag_id=example_bash_operator'
resp = admin_client.get(url, follow_redirects=True)
bash_dagrun, _, _ = dagruns
datestr = bash_dagrun.execution_date.date().isoformat()
expected = rf'{{\"date\":\"{datestr}\",\"state\":\"running\",\"count\":1}}'
check_content_in_response(expected, resp)
@pytest.mark.parametrize(
"class_type, no_instances, no_unique_dags",
[
(None, 0, 0),
(TaskInstance, 0, 0),
(TaskInstance, 1, 1),
(TaskInstance, 10, 1),
(TaskInstance, 10, 5),
(DagRun, 0, 0),
(DagRun, 1, 1),
(DagRun, 10, 1),
(DagRun, 10, 9),
],
)
def test_action_has_dag_edit_access(create_task_instance, class_type, no_instances, no_unique_dags):
unique_dag_ids = [f"test_dag_id_{nr}" for nr in range(no_unique_dags)]
tis: List[TaskInstance] = [
create_task_instance(
task_id=f"test_task_instance_{nr}",
execution_date=timezone.datetime(2021, 1, 1 + nr),
dag_id=unique_dag_ids[nr % len(unique_dag_ids)],
run_id=f"test_run_id_{nr}",
)
for nr in range(no_instances)
]
if class_type is None:
test_items = None
else:
test_items = tis if class_type == TaskInstance else [ti.get_dagrun() for ti in tis]
test_items = test_items[0] if len(test_items) == 1 else test_items
with app.create_app(testing=True).app_context():
with mock.patch("airflow.www.views.current_app.appbuilder.sm.can_edit_dag") as mocked_can_edit:
mocked_can_edit.return_value = True
assert not isinstance(test_items, list) or len(test_items) == no_instances
assert some_view_action_which_requires_dag_edit_access(None, test_items) is True
assert mocked_can_edit.call_count == no_unique_dags
clear_db_runs()
def test_action_has_dag_edit_access_exception():
with pytest.raises(ValueError):
some_view_action_which_requires_dag_edit_access(None, "some_incorrect_value")
| mistercrunch/airflow | tests/www/views/test_views_decorators.py | Python | apache-2.0 | 6,746 |
"""
All function in this module take and return :class:`bytes`
"""
import sys
from os import urandom as random_bytes
from struct import pack
from base64 import b64decode
from Cryptodome.Hash import MD5, SHA1, HMAC
from Cryptodome.PublicKey.RSA import import_key as rsa_import_key, construct as rsa_construct
from Cryptodome.Cipher import PKCS1_OAEP, PKCS1_v1_5
from Cryptodome.Cipher import AES as AES
class UniverseKey(object):
"""Public keys for Universes"""
Public = rsa_import_key(b64decode("""
MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDf7BrWLBBmLBc1OhSwfFkRf53T
2Ct64+AVzRkeRuh7h3SiGEYxqQMUeYKO6UWiSRKpI2hzic9pobFhRr3Bvr/WARvY
gdTckPv+T1JzZsuVcNfFjrocejN1oWI0Rrtgt4Bo+hOneoo3S57G9F1fOpn5nsQ6
6WOiu4gZKODnFMBCiQIBEQ==
"""))
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * pack('B', BS - len(s) % BS)
if sys.version_info < (3,):
unpad = lambda s: s[0:-ord(s[-1])]
else:
unpad = lambda s: s[0:-s[-1]]
def generate_session_key(hmac_secret=b''):
"""
:param hmac_secret: optional HMAC
:type hmac_secret: :class:`bytes`
:return: (session_key, encrypted_session_key) tuple
:rtype: :class:`tuple`
"""
session_key = random_bytes(32)
encrypted_session_key = PKCS1_OAEP.new(UniverseKey.Public, SHA1)\
.encrypt(session_key + hmac_secret)
return (session_key, encrypted_session_key)
def symmetric_encrypt(message, key):
iv = random_bytes(BS)
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_ecb(message, key):
return AES.new(key, AES.MODE_ECB).encrypt(pad(message))
def symmetric_encrypt_HMAC(message, key, hmac_secret):
prefix = random_bytes(3)
hmac = hmac_sha1(hmac_secret, prefix + message)
iv = hmac[:13] + prefix
return symmetric_encrypt_with_iv(message, key, iv)
def symmetric_encrypt_iv(iv, key):
return AES.new(key, AES.MODE_ECB).encrypt(iv)
def symmetric_encrypt_with_iv(message, key, iv):
encrypted_iv = symmetric_encrypt_iv(iv, key)
cyphertext = AES.new(key, AES.MODE_CBC, iv).encrypt(pad(message))
return encrypted_iv + cyphertext
def symmetric_decrypt(cyphertext, key):
iv = symmetric_decrypt_iv(cyphertext, key)
return symmetric_decrypt_with_iv(cyphertext, key, iv)
def symmetric_decrypt_ecb(cyphertext, key):
return unpad(AES.new(key, AES.MODE_ECB).decrypt(cyphertext))
def symmetric_decrypt_HMAC(cyphertext, key, hmac_secret):
""":raises: :class:`RuntimeError` when HMAC verification fails"""
iv = symmetric_decrypt_iv(cyphertext, key)
message = symmetric_decrypt_with_iv(cyphertext, key, iv)
hmac = hmac_sha1(hmac_secret, iv[-3:] + message)
if iv[:13] != hmac[:13]:
raise RuntimeError("Unable to decrypt message. HMAC does not match.")
return message
def symmetric_decrypt_iv(cyphertext, key):
return AES.new(key, AES.MODE_ECB).decrypt(cyphertext[:BS])
def symmetric_decrypt_with_iv(cyphertext, key, iv):
return unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(cyphertext[BS:]))
def hmac_sha1(secret, data):
return HMAC.new(secret, data, SHA1).digest()
def sha1_hash(data):
return SHA1.new(data).digest()
def md5_hash(data):
return MD5.new(data).digest()
def rsa_publickey(mod, exp):
return rsa_construct((mod, exp))
def pkcs1v15_encrypt(key, message):
return PKCS1_v1_5.new(key).encrypt(message)
| ValvePython/steam | steam/core/crypto.py | Python | mit | 3,356 |
#!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
def testCount():
"""
Test for initial player count,
player count after 1 and 2 players registered,
player count after players deleted.
"""
deleteMatches()
deletePlayers()
c = countPlayers()
if c == '0':
raise TypeError(
"countPlayers should return numeric zero, not string '0'.")
if c != 0:
raise ValueError("After deletion, countPlayers should return zero.")
print "1. countPlayers() returns 0 after initial deletePlayers() execution."
registerPlayer("Chandra Nalaar")
c = countPlayers()
if c != 1:
raise ValueError(
"After one player registers, countPlayers() should be 1. Got {c}".format(c=c))
print "2. countPlayers() returns 1 after one player is registered."
registerPlayer("Jace Beleren")
c = countPlayers()
if c != 2:
raise ValueError(
"After two players register, countPlayers() should be 2. Got {c}".format(c=c))
print "3. countPlayers() returns 2 after two players are registered."
deletePlayers()
c = countPlayers()
if c != 0:
raise ValueError(
"After deletion, countPlayers should return zero.")
print "4. countPlayers() returns zero after registered players are deleted.\n5. Player records successfully deleted."
def testStandingsBeforeMatches():
"""
Test to ensure players are properly represented in standings prior
to any matches being reported.
"""
deleteMatches()
deletePlayers()
registerPlayer("Melpomene Murray")
registerPlayer("Randy Schwartz")
standings = playerStandings()
if len(standings) < 2:
raise ValueError("Players should appear in playerStandings even before "
"they have played any matches.")
elif len(standings) > 2:
raise ValueError("Only registered players should appear in standings.")
if len(standings[0]) != 4:
raise ValueError("Each playerStandings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
raise ValueError(
"Newly registered players should have no matches or wins.")
if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]):
raise ValueError("Registered players' names should appear in standings, "
"even if they have no matches played.")
print "6. Newly registered players appear in the standings with no matches."
def testReportMatches():
"""
Test that matches are reported properly.
Test to confirm matches are deleted properly.
"""
deleteMatches()
deletePlayers()
registerPlayer("Bruno Walton")
registerPlayer("Boots O'Neal")
registerPlayer("Cathy Burton")
registerPlayer("Diane Grant")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
standings = playerStandings()
for (i, n, w, m) in standings:
if m != 1:
raise ValueError("Each player should have one match recorded.")
if i in (id1, id3) and w != 1:
raise ValueError("Each match winner should have one win recorded.")
elif i in (id2, id4) and w != 0:
raise ValueError("Each match loser should have zero wins recorded.")
print "7. After a match, players have updated standings."
deleteMatches()
standings = playerStandings()
if len(standings) != 4:
raise ValueError("Match deletion should not change number of players in standings.")
for (i, n, w, m) in standings:
if m != 0:
raise ValueError("After deleting matches, players should have zero matches recorded.")
if w != 0:
raise ValueError("After deleting matches, players should have zero wins recorded.")
print "8. After match deletion, player standings are properly reset.\n9. Matches are properly deleted."
def testPairings():
"""
Test that pairings are generated properly both before and after match reporting.
"""
deleteMatches()
deletePlayers()
registerPlayer("Twilight Sparkle")
registerPlayer("Fluttershy")
registerPlayer("Applejack")
registerPlayer("Pinkie Pie")
registerPlayer("Rarity")
registerPlayer("Rainbow Dash")
registerPlayer("Princess Celestia")
registerPlayer("Princess Luna")
standings = playerStandings()
[id1, id2, id3, id4, id5, id6, id7, id8] = [row[0] for row in standings]
pairings = swissPairings()
if len(pairings) != 4:
raise ValueError(
"For eight players, swissPairings should return 4 pairs. Got {pairs}".format(pairs=len(pairings)))
reportMatch(id1, id2)
reportMatch(id3, id4)
reportMatch(id5, id6)
reportMatch(id7, id8)
pairings = swissPairings()
if len(pairings) != 4:
raise ValueError(
"For eight players, swissPairings should return 4 pairs. Got {pairs}".format(pairs=len(pairings)))
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4), (pid5, pname5, pid6, pname6), (pid7, pname7, pid8, pname8)] = pairings
possible_pairs = set([frozenset([id1, id3]), frozenset([id1, id5]),
frozenset([id1, id7]), frozenset([id3, id5]),
frozenset([id3, id7]), frozenset([id5, id7]),
frozenset([id2, id4]), frozenset([id2, id6]),
frozenset([id2, id8]), frozenset([id4, id6]),
frozenset([id4, id8]), frozenset([id6, id8])
])
actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4]), frozenset([pid5, pid6]), frozenset([pid7, pid8])])
for pair in actual_pairs:
if pair not in possible_pairs:
raise ValueError(
"After one match, players with one win should be paired.")
print "10. After one match, players with one win are properly paired."
if __name__ == '__main__':
testCount()
testStandingsBeforeMatches()
testReportMatches()
testPairings()
print "Success! All tests pass!"
| ClareMorganRiddell/tournament | Vagrant/tournament_test.py | Python | mit | 6,297 |
# xmlpage.py
# Author: Richard Gibson
#
# Renders a page generated from an xml file. Currently renders /faq and /blog
#
import handler
import urllib2
import xml.etree.cElementTree as et
class XmlPage( handler.Handler ):
def get( self ):
user = self.get_user( )
if user == self.OVER_QUOTA_ERROR:
user = None
# Grab the optional query parameter and cast it as an int.
# Currently only used for /faq
q = self.request.get( 'q' )
try:
q = int( q )
except ValueError:
pass
# Get the name of the xml file from the URL path
path = self.request.path
if path.endswith('/'):
path = path[:-1] # Should now be, for example, '/faq' or '/blog'
# Read the xml file and store in a string
f = open( 'xml' + path + '.xml', 'r' )
tree = et.fromstring( f.read( ) )
f.close( )
# Build the list of dictionaries of lists from the xml file
xml = [ ]
for child in tree.findall( path[ 1: ] ):
d = dict( )
xml.append( d )
for item in child:
if d.get( item.tag ) is None:
d[ item.tag ] = [ item.text ]
else:
d[ item.tag ].append( item.text )
self.render( path[ 1: ] + ".html", user=user, xml=xml, q=q )
| rggibson/pb-tracker | xmlpage.py | Python | mit | 1,387 |
# -*- encoding: utf-8 -*-
"""Test class for UI functions against an isolated capsule"""
from robottelo.decorators import stubbed, tier3
from robottelo.test import UITestCase
class CapsuleTestCase(UITestCase):
"""Implements capsule tests in UI"""
@stubbed()
@tier3
def test_positive_errata_push(self):
"""@Test: User can push errata through to a client on
an isolated capsule
@Feature: Capsules
@Setup: Client on an isolated capsule; errata synced
on server
@Steps:
1. Attempt to install errata via Sat UI against client on an
isolated capsule - this is a satellite-initiated action.
@Assert: Errata can be installed.
@Status: Manual
"""
@stubbed()
@tier3
def test_positive_rpm_push(self):
"""@Test: User can install a new errata on a client through
an isolated capsule - this is a satellite-initiated action
@Feature: Capsules
@Setup: Client on an isolated capsule; rpms synced (RH,
custom content)
@Steps:
1. attempt to push an RPM install onto client connected to
isolated capsule - this is a satellite-initiated action.
@Assert: Package is installed
@Status: Manual
"""
@stubbed()
@tier3
def test_positive_puppet_push(self):
"""@Test: user can install new puppet module on a client
through an isolated capsule
@Feature: Capsules
@Setup: Client on an isolated capsule; puppet content synced
@Steps:
1. attempt to push a puppet module install initiated from
Satellite
@Assert: module is installed
@Status: Manual
"""
@stubbed()
@tier3
def test_positive_chost_selector(self):
"""@Test: User can choose, or is given an indication within
the content hosts UI, any referenced capsule in order to
learn/setup registration against said capsule(s).
@Feature: Capsules
@Setup: A satellite with at least one capsule configured.
@Steps:
1. Go to Hosts > Content Hosts > Register Content Host
2. Observe the section labeled 'Consuming Content From
A Capsule'
@Assert: capsule(s) appear in dropdown and the instructions
for using subscription-manager update accordingly when
choosing said capsule(s).
@Status: Manual
"""
| tkolhar/robottelo | tests/foreman/ui/test_capsule.py | Python | gpl-3.0 | 2,464 |
# Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from sys import argv
from os import system, getcwd, chdir
def setup_hg_repo(repo, rev):
ori_dir = getcwd();
chdir(repo);
system("git checkout -f " + rev);
system("git clean -f -d");
chdir(ori_dir);
def build_repo(repo, build_cmd):
cmd = build_cmd + " " + repo;
system(cmd);
def test_case(repo, test_dir, work_dir, test_cmd, i):
ret = set();
print "Testing: ", i,
cmd = test_cmd + " " + repo + " " + test_dir + " " + work_dir + " " + str(i) + " > __res";
#print cmd;
system(cmd);
f = open("__res", "r");
line = f.readline();
f.close();
s = line.strip();
if (s != ""):
v = int(s);
assert(v == i);
ret.add(v);
print "PASS";
else:
print "FAIL";
system("rm -rf __res");
return ret;
def test_repo(repo, test_dir, work_dir, test_cmd):
ret = set();
for i in range(1, 62):
a = test_case(repo, test_dir, work_dir, test_cmd, i);
ret = ret | a;
return ret;
build_cmd = argv[1];
test_cmd = argv[2];
src_dir = argv[3];
test_dir = argv[4];
new_rev = argv[5];
if (len(argv) < 7):
old_rev = new_rev + "^1";
else:
old_rev = argv[6];
out_file = "wireshark-rev-" + old_rev + "-" + new_rev + ".txt";
work_dir = "__tmp_" + new_rev;
system("mkdir " + work_dir);
repo1 = work_dir + "/tmp1";
repo2 = work_dir + "/tmp2";
tmp_test_dir = work_dir + "/tests";
system("cp -rf " + src_dir + " " + repo1);
system("cp -rf " + src_dir + " " + repo2);
system("cp -rf " + test_dir + " " + tmp_test_dir);
setup_hg_repo(repo1, new_rev);
setup_hg_repo(repo2, old_rev);
build_repo(repo1, build_cmd);
build_repo(repo2, build_cmd);
s1 = test_repo(repo1, tmp_test_dir, work_dir, test_cmd);
s2 = test_repo(repo2, tmp_test_dir, work_dir, test_cmd);
diff12 = s1 - s2;
diff21 = s2 - s1;
common = s1 & s2;
fout = open(out_file, "w");
print >>fout, "-";
print >>fout, "-";
outdiff = [];
for i in diff12:
bad = False;
for j in range(0, 5):
a = test_case(repo1, tmp_test_dir, work_dir, test_cmd, i);
b = test_case(repo2, tmp_test_dir, work_dir, test_cmd, i);
if (len(a) != 1):
bad = True;
break;
if (len(b) != 0):
bad = True;
break;
if not bad:
outdiff.append(i);
print >>fout, "Diff Cases: Tot", len(outdiff);
for i in outdiff:
print >>fout, i,
print >>fout;
print >>fout, "Positive Cases: Tot", len(common);
for i in common:
print >>fout, i,
print >>fout;
print >>fout, "Regression Cases: Tot", len(diff21);
for i in diff21:
print >>fout, i,
print >>fout;
fout.close();
system("rm -rf " + work_dir);
| jyi/ITSP | prophet-gpl/tools/wireshark-rev-test.py | Python | mit | 3,381 |
import datetime
import json
import random
import string
class RedisApiException(Exception):
def __init__(self, message, status_code, *args, **kwargs):
super(RedisApiException, self).__init__(message)
self.status_code = status_code
class DateTimeJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
if obj and hasattr(obj, 'to_json'):
obj = obj.to_json()
return super(DateTimeJSONEncoder, self).default(obj)
json_dumps = DateTimeJSONEncoder()
def generate_random_string(length=13, chars=string.ascii_letters + string.digits, ):
return ''.join(random.choice(chars) for i in range(length))
| voidfiles/lark | lark/ext/utils.py | Python | mit | 800 |
#!/usr/bin/python
from __future__ import print_function
#------------------------------------------------------------#
# This is the RCache class, used to cache the results of the #
# get_contents() method from the Entry class. #
#------------------------------------------------------------#
class RCache:
__mem = {}
def __init__(self):
self.__mem = {}
# Called as a wrapper for the get_contents() method of Entry objects.
# It checks its dictionary for the object which it's called upon
# (the key) and if it exists, it returns the cached value. Otherwise,
# it calls the object method, caches it and returns the value.
def execute(self, obj):
if obj not in self.__mem:
self.__mem[obj] = obj.get_contents()
return self.__mem[obj]
def clear(self):
self.__mem = {}
| djordje-kasagic/LHS-algorithm | cache.py | Python | mit | 864 |
#!/usr/bin/env python2.6
import lxml.html, re, HTMLParser
class InvalidArguments(Exception):
pass
class MetaHeaders:
def __init__(self, url=None, page=None,name='name',content='content', unescape_entities=False):
if page:
self.root = lxml.html.document_fromstring(page)
elif url:
self.root = lxml.html.parse(url).getroot()
else:
raise InvalidArguments, "Need a URL or an HTML page"
meta = {}
# Some sites (IEEE) triple escape entities, e.g., R&amp;#x0026;D
if unescape_entities:
htmldecoder = HTMLParser.HTMLParser()
for m in self.root.cssselect("meta"):
attr=m.attrib
if attr.has_key(name) and attr.has_key(content) and attr[content] != "":
k = attr[name]
v = attr[content].strip()
if unescape_entities:
v = htmldecoder.unescape(htmldecoder.unescape(v))
if not meta.has_key(k):
meta[k] = []
meta[k].append(v)
self.meta = meta
def get_item(self, k):
items = self.get_multi_item(k)
if items:
return items[0]
else:
return None
def get_multi_item(self, k):
if self.meta.has_key(k):
return self.meta[k]
else:
return None
def print_item(self, entry, key):
el = self.get_multi_item(key)
if not el:
return
for e in el:
print "%s\t%s" % (entry, e)
def print_date(self, key):
date = self.get_item(key)
if not date:
return
year = None
month = None
day = None
m = re.search(r'(\d\d\d\d)(?:[-/])(\d+)(?:[-/])(\d+)', date)
if m:
year = m.group(1)
month = m.group(2)
day = m.group(3)
if not year:
m = re.search(r'(\d\d\d\d)(?:[-/])(\d+)', date)
if m:
year = m.group(1)
month = m.group(2)
if not year:
m = re.search(r'(\d\d\d\d)', date)
if m:
year = m.group(1)
m = re.search(r"([a-z]+)", date, re.IGNORECASE)
if m:
months = {
'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12,
'January': 1, 'February': 2, 'March': 3, 'April': 4,
'May': 5, 'June': 6, 'July': 7, 'August': 8,
'September': 9, 'October': 10, 'November': 11, 'December': 12
}
try:
print "month\t%s" % months[m.group(1).capitalize()]
month = None
except:
pass
if year:
print "year\t%s" % year
if month:
print "month\t%s" % month
if day:
print "day\t%s" % day
def test():
url = "http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4755987"
print "getting %s " % url
metaheaders = MetaHeaders(url=url)
for (k,v) in metaheaders.meta.items():
print "%s = %s" % (k,v)
print "===============\nRepeat with manual fetch"
from urllib2 import urlopen
page = urlopen(url).read()
metaheaders = MetaHeaders(page=page)
for (k,v) in metaheaders.meta.items():
print "%s = %s" % (k,v)
if __name__ == '__main__':
test()
| OAButton/tricorder | plugins/python/metaheaders.py | Python | bsd-3-clause | 2,791 |
import os
from subprocess import check_call
from kernel.cli import main
user_config = "examples/user.config"
dist_config = "examples/dist.config"
trim_config = "examples/trim.config"
comb_config = "examples/comb.config"
diff_dir = "examples/diff"
out_trim_config = "tmp/trim.config"
out_comb_config = "tmp/comb.config"
out_diff_dir = "tmp/diff"
os.makedirs("tmp/diff", exist_ok=True)
def test_userconfig_trim():
main(["user-config", "--trim", user_config, dist_config, out_trim_config])
check_call(["diff", "-u", out_trim_config, trim_config])
def test_userconfig_combine():
main(["user-config", "--combine", user_config, dist_config, out_comb_config])
check_call(["diff", "-u", out_comb_config, comb_config])
def test_userconfig_diff():
main(["user-config", "--diff", user_config, dist_config, comb_config, out_diff_dir])
check_call(["diff", "-r", "-u", out_diff_dir, diff_dir])
| crossdistro/kernel-tools | tests/testuserconfig.py | Python | bsd-2-clause | 912 |
from eventlet import patcher
from eventlet.green import asyncore
from eventlet.green import select
from eventlet.green import socket
from eventlet.green import threading
from eventlet.green import time
patcher.inject("test.test_asyncore", globals())
def new_closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
# the only change we make is to not assign to asyncore.socket_map
# because doing so fails to assign to the real asyncore's socket_map
# and thus the test fails
socketmap = asyncore.socket_map.copy()
try:
asyncore.socket_map.clear()
asyncore.socket_map.update(testmap)
asyncore.close_all()
finally:
testmap = asyncore.socket_map.copy()
asyncore.socket_map.clear()
asyncore.socket_map.update(socketmap)
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
HelperFunctionTests.closeall_check = new_closeall_check
try:
# Eventlet's select() emulation doesn't support the POLLPRI flag,
# which this test relies on. Therefore, nuke it!
BaseTestAPI.test_handle_expt = lambda *a, **kw: None
except NameError:
pass
if __name__ == "__main__":
test_main()
| simplegeo/eventlet | tests/stdlib/test_asyncore.py | Python | mit | 1,543 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
def addroot():
import sys
import os.path
print("__file__=", __file__)
rootdir = os.path.abspath(os.path.join(
os.path.split(__file__)[0], '..', '..', '..'))
sys.path.append(rootdir)
print('%s added to PATH' % rootdir)
if __name__ == "__main__":
addroot()
import pytools.build as b
b.build()
b.test()
| rboman/progs | apps/fractal/olc/build.py | Python | apache-2.0 | 400 |
# vim:set et ts=4 sw=4 ai ft=python:
# pylint: disable=superfluous-parens
"""
One Time JWT
Simple mechanism for cross service authorization. Usage:
Client side:
import onetimejwt
jwt = onetimejwt.generate('shared secret', 60) # shared secret, 60 second age
headers = {
"Authorization": "Bearer " + onetimejwt.generate('shared secret', 60)
}
Server side, create a single instance of Manager and use it for all threads:
import onetimejwt
# at startup, creates a cleanup thread
# note: you can include any number of secrets
JTM = onetimejwt.Manager('shared secret', maxage=60)
# during processing -- throws JwtFailed exception if not authorized
JTM.valid(headers.get('Authorization'))
Manager will keep a list of recognized JWTS, and uses logging of a warning level
to report problems.
You can store the secret in base64 format for full binary spectrum of random
data. If you do this, begin the secret with 'base64:...' and Manager init will
properly handler it. generate, however, does not pay attention to this--
for performance purposes it assumes you give it the final form of the secret.
To pre-process the secret for generate, call:
decoded_secret = threading.decode_secret("base64:bm90IHJlYWxseSBiaW5hcnk=")
jwt = onetimejwt.generate(decoded_secret, 60)
------------------------------------------------------------------
Copyright 2016 Brandon Gillespie
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import threading
import base64
import uuid
import time
import jwt
import timeinterval # look elsewhere for this module
#import logging
__version__ = 1.0
def decode_secret(secret):
"""Allow for base64 encoding of binary secret data--greater entropy"""
if secret[:6] == "base64":
return base64.b64decode(secret[7:])
else:
return secret
def generate(secret, age, **payload):
"""Generate a one-time jwt with an age in seconds"""
jti = str(uuid.uuid1()) # random id
if not payload:
payload = {}
payload['exp'] = int(time.time() + age)
payload['jti'] = jti
return jwt.encode(payload, decode_secret(secret))
def mutex(func):
"""use a thread lock on current method, if self.lock is defined"""
def wrapper(*args, **kwargs):
"""Decorator Wrapper"""
lock = args[0].lock
lock.acquire(True)
try:
return func(*args, **kwargs)
except:
raise
finally:
lock.release()
return wrapper
class JwtFailed(Exception):
"""Exception"""
pass
class Manager(object):
"""
Threadsafe mechanism to have one-time jwts.
"""
secrets = []
jwts = {}
age = 60
lock = threading.Lock()
def __init__(self, *secrets, **kwargs):
self.age = kwargs.get('age', 60)
for secret in secrets:
self.secrets.append(decode_secret(secret))
timeinterval.start(self.age * 1000, self._clean)
@mutex
def _clean(self):
"""Run by housekeeper thread"""
now = time.time()
for jwt in self.jwts.keys():
if (now - self.jwts[jwt]) > (self.age * 2):
del self.jwts[jwt]
@mutex
def already_used(self, tok):
"""has this jwt been used?"""
if tok in self.jwts:
return True
self.jwts[tok] = time.time()
return False
def valid(self, token):
"""is this token valid?"""
now = time.time()
if 'Bearer ' in token:
token = token[7:]
data = None
for secret in self.secrets:
try:
data = jwt.decode(token, secret)
break
except jwt.DecodeError:
continue
except jwt.ExpiredSignatureError:
raise JwtFailed("Jwt expired")
if not data:
raise JwtFailed("Jwt cannot be decoded")
exp = data.get('exp')
if not exp:
raise JwtFailed("Jwt missing expiration (exp)")
if now - exp > self.age:
raise JwtFailed("Jwt bad expiration - greater than I want to accept")
jti = data.get('jti')
if not jti:
raise JwtFailed("Jwt missing one-time id (jti)")
if self.already_used(jti):
raise JwtFailed("Jwt re-use disallowed (jti={})".format(jti))
return data
| srevenant/onetimejwt | onetimejwt/__init__.py | Python | agpl-3.0 | 4,972 |
import sys
import time
import logging
from scripts import utils as script_utils
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def main(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True, is_published=True, node__is_deleted=False)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
| chrisseto/osf.io | scripts/add_identifiers_to_existing_preprints.py | Python | apache-2.0 | 2,141 |
#!/usr/bin/env python3.2
"""
========
OVERVIEW
========
The script compares two gff3 file at base, exon/CDS and gene level and outputs the positive
predictive value(PPV) and sensitivity(SN). The first input will be treated as a known gff3 file
and second as the predicted gff3 file. The PPV/SN in the output will be for predicted gff3 with
respect to the known gff3.
=====
INPUT
=====
Options
-------
-a1 --ANNOTATION_1 required
Path to first annotation file(known).
-a2 --ANNOTATION_2 required
Path to second annotation file(predicted).
-o --output_dir required
Path to output directory.
-f --feature required
======
OUTPUT
======
Script generates a summary.txt file in the output directory.
Summary.txt is a tab delimited file with following headers :
Feature Known Predicted True_predicted SN PPV
Gene
Exon/CDS
Base
And final_stats.txt file :
1. No. of known gene : 4172
2. No. of predicted gene : 7654
3. No. of predicted gene overlapping 0 known gene (new gene): 337
4. No. of predicted gene overlapping > 1 known gene : 779
5. No. of predicted gene overlaping 1 known gene : 4667
6. No. of predicted gene overlapping >= 1 known gene in opp strand : 1871
7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundaries) : 1
8. No. of predicted gene overlapping >= 1 known predicted gene in opp strand : 0
9. No. of known gene overlapping 0 predicted gene (gene missing): 174
10. No. of known gene overlapping > 1 predicted gene : 1548
11. No. of known gene overlaping 1 predicted gene : 2373
12. No. of known gene overlapping >= 1 predicted gene in opp strand : 77
The summary will also be printed in the console.
pred.found.txt : All the predicted gene overlaping 1 known gene
pred.no_overlap.txt : All the predicted genes not overlapping any known genes (new_gene)
pred.opposite.txt : All the predicted genes overlapping any known genes in opposite strand
pred.overlap_more_than_one.txt : All the predicted genes overlapping more than one known gene.(gene merge)
known.found.txt : All the known gene overlaping 1 predicted gene
known.no_overlap.txt : All the known genes not overlapping any predicted genes (genes_missing)
known.opposite.txt : All the known genes overlapping any predicted genes in opposite strand
known.overlap_more_than_one.txt : All the known genes overlapping more than one predicted gene. (gene split)
====
NOTE
====
Both the input annotation file should be in the correct gff3 format.
http://www.sequenceontology.org/gff3.shtml
Examples :
chr1 genbank gene 1 3000 . - . ID=gene1
chr1 genbank mRNA 10 2900 . - . ID=mrna1;Parent=gene1
chr1 genbank exon 10 1000 . - . ID=exon1;Parent=mrna1
======
Author
======
Priti Kumari
Bioinformatics Software Engineer
Institute for Genome Sciences
University of Maryland
Baltimore, Maryland 21201
"""
import argparse
import os
import fileinput
import biothings
import biocodegff
import sys
def interface():
parser = argparse.ArgumentParser( description='Put a description of your script here')
parser.add_argument('-a1', '--annotation_1',type=str, required=True,
help='The first annotation file.')
parser.add_argument('-a2', '--annotation_2',type=str, required=False,
help='The second annotation file.')
parser.add_argument('-f', '--feature',type=str, required=True,
help='Select:Exon or CDS')
## output file to be written
parser.add_argument('-o', '--output_dir', type=str, required=True, help='Path to an output directory' )
parser = parser.parse_args()
return parser
def base_comparison(p_exon, a_exon):
a_base = 0
p_base = 0
true_base_value = 0
exon2_bed = args.output_dir + '/exon_2.bed'
e_bed = open(exon2_bed, 'w')
for exon in p_exon :
chrom = (exon.split(':'))[0]
start = int((exon.split(':'))[1])
stop = int((exon.split(':'))[2])
strand = (exon.split(':'))[3]
if (strand == str(1)) :
strand = "+"
else :
strand = "-"
e_bed.write(chrom+"\t"+str(start)+"\t"+str(stop)+"\texon\t"+str(0)+"\t"+strand+"\n")
e_bed.close()
out2 = args.output_dir + '/exon_2_merged.bed'
cmd = "bedtools merge -nms -scores sum -i " + exon2_bed + " -s >"+out2
#print(cmd)
os.system(cmd)
exon1_bed = args.output_dir + '/exon_1.bed'
e_bed = open(exon1_bed, 'w')
for exon in a_exon :
chrom = (exon.split(':'))[0]
start = int((exon.split(':'))[1])
stop = int((exon.split(':'))[2])
strand = (exon.split(':'))[3]
if (strand == str(1)) :
strand = "+"
else :
strand = "-"
e_bed.write(chrom+"\t"+str(start)+"\t"+str(stop)+"\texon\t"+str(0)+"\t"+strand+"\n")
e_bed.close()
out1 = args.output_dir + '/exon_1_merged.bed'
cmd = "bedtools merge -nms -scores sum -i " + exon1_bed + " -s >"+out1
#print(cmd)
os.system(cmd)
out_intersect = args.output_dir + '/exon_1_2_intersect.bed'
cmd = "bedtools intersect -s -wo -a " + out1 + " -b " + out2 + " >" + out_intersect
#print(cmd)
os.system(cmd)
a_base_file = open(out1,'r')
for line in a_base_file :
arr = line.split("\t")
a_base = a_base + (int(arr[2]) - int(arr[1]))
a_base_file.close()
p_base_file = open(out2,'r')
for line in p_base_file :
arr = line.split("\t")
p_base = p_base + (int(arr[2]) - int(arr[1]))
p_base_file.close()
true_base_file = open(out_intersect,'r')
for line in true_base_file :
arr = line.split("\t")
true_base_value = true_base_value + int(arr[12])
true_base_file.close()
return (a_base, p_base, true_base_value)
def compare_cds(cds1,cds2,tag) :
gene_found = 0
gene_opp = 0
gene_no_overlap = 0
gene_more_than_one_overlap = 0
temp_file1 = args.output_dir + "/" + tag + '.found.txt'
ft1 = open(temp_file1,'w')
temp_file2 = args.output_dir + "/" + tag + '.opposite.txt'
ft2 = open(temp_file2,'w')
temp_file3 = args.output_dir + "/" + tag + '.no_overlap.txt'
ft3 = open(temp_file3,'w')
temp_file4 = args.output_dir + "/" + tag + '.overlap_more_than_one.txt'
ft4 = open(temp_file4,'w')
for c1 in cds1 :
gene_overlap_same = []
gene_overlap_opp = []
chrom1 = (c1.split(':'))[0]
cds_id1 = (c1.split(':'))[1]
start1 = int((c1.split(':'))[2])
stop1 = int((c1.split(':'))[3])
strand1 = (c1.split(':'))[4]
for c2 in cds2:
chrom2 = (c2.split(':'))[0]
cds_id2 = (c2.split(':'))[1]
start2 = int((c2.split(':'))[2])
stop2 = int((c2.split(':'))[3])
strand2 = (c2.split(':'))[4]
if (chrom1 != chrom2) :
continue
#if (start2 > stop1) :
# break
if(start1 <= stop2 and start2 <= stop1) :
arr = [start1,stop1,start2,stop2]
arr.sort()
len_overlap = arr[2] - arr[1]
#if ((stop1 - start1) == 0) :
# print(c1)
per_overlap = (len_overlap/(stop1 - start1)) * 100
if (strand1 == strand2 ) :
gene_overlap_same.append(per_overlap)
else :
gene_overlap_opp.append(per_overlap)
if (len(gene_overlap_same) == 1) :
gene_found += 1
ft1.write(chrom1 + "\t" + str(start1) + "\t" + str(stop1) + "\t" + strand1 + "\t" + cds_id1 + "\n")
if (len(gene_overlap_same) == 0 and len(gene_overlap_opp) >= 1) :
gene_opp += 1
ft2.write(chrom1 + "\t" + str(start1) + "\t" + str(stop1) + "\t" + strand1 + "\t" + cds_id1 + "\n")
if (len(gene_overlap_same) == 0 and len(gene_overlap_opp) == 0) :
gene_no_overlap +=1
ft3.write(chrom1 + "\t" + str(start1) + "\t" + str(stop1) + "\t" + strand1 + "\t" + cds_id1 + "\n")
if (len(gene_overlap_same) > 1) :
gene_more_than_one_overlap += 1
ft4.write(chrom1 + "\t" + str(start1) + "\t" + str(stop1) + "\t" + strand1 + "\t" + cds_id1 + "\n")
arr = [gene_found,gene_opp,gene_no_overlap,gene_more_than_one_overlap]
return arr
def cordinate(id,loc) :
return(id + ":" + str(loc.fmin) + ":" + str(loc.fmax)+ ":" + str(loc.strand))
def process_files(args):
(assemblies_1, features_1) = biocodegff.get_gff3_features(args.annotation_1)
(assemblies_2, features_2) = biocodegff.get_gff3_features(args.annotation_2)
a_exons = [] ## Set contains only uniq exons from known annotation, since multiple same exons can appear in a gff file.
p_exons = [] ## For predicted annotation
a_gene = []
p_gene = []
a_mrna = []
p_mrna = []
exon_pred_all = set()
gene_true = set()
mrna_true = set()
chr = []
a_cds = []
p_cds = []
a_cd = []
p_cd= []
chr = []
true_pred_file = args.output_dir + '/true_predicted_genes.txt'
true_file = open(true_pred_file,'w')
true_file.write("Known\tPredicted\n")
for asm_id in assemblies_1: ## Iterate through each chromosome from the known ref annotation
assembly_1 = assemblies_1[asm_id]
assembly_2 = assemblies_2.get(asm_id,-1) ## Find that chromosome in the predicted gff file
genes_1 = assembly_1.genes() ## All genes from known annotation
anno_exons = set()
for gene_1 in sorted(genes_1) : ## Add unique gene, mrna , exon features from known annotation to get each known feature total count
gene_1_loc = gene_1.location_on(assembly_1)
cord_a = cordinate(asm_id,gene_1_loc) ## Use chromosome id+start+stop+strand as a string to determine uniqueness.
if (cord_a not in a_gene) :
a_gene.append(cord_a)
ex_start = []
ex_stop = []
for mrna_1 in sorted(gene_1.mRNAs()) :
mrna_1_loc = mrna_1.location_on(assembly_1)
cord = cordinate(asm_id,mrna_1_loc)
if (cord not in a_mrna) :
a_mrna.append(cord)
if (args.feature == "Exon") :
feat_1 = mrna_1.exons()
if (args.feature == "CDS") :
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1) :
exon_1_loc = exon_1.location_on(assembly_1)
cord = cordinate(asm_id, exon_1_loc)
if (cord not in a_exons) :
a_exons.append(cord)
anno_exons.add(cord)
ex_start.append(exon_1_loc.fmin)
ex_stop.append(exon_1_loc.fmax)
ex_start.sort()
ex_stop.sort()
if (len(ex_start) >= 1) :
cds1 = asm_id + ":" + gene_1.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_1_loc.strand)
else :
cds1 = asm_id + ":" + gene_1.id + ":" + str(gene_1_loc.fmin) + ":" + str(gene_1_loc.fmax) + ":" + str(gene_1_loc.strand)
if (cord_a not in a_cd) :
a_cds.append(cds1)
a_cd.append(cord_a)
if (type(assembly_2) is int) : ## If the chromosome is not found in prediected file, move to next chromosome.
continue
genes_2 = assembly_2.genes() ## All genes from predicted annotation.
chr.append(asm_id) ## Append all found chromosome in a list.
pred_exons = set()
for gene_2 in sorted(genes_2) : ## Add unique gene, mrna , exon features from predicted annotation to get each predicted feature total count.
gene_2_loc = gene_2.location_on(assembly_2)
cord_p = cordinate(asm_id, gene_2_loc)
if (cord_p not in p_gene) :
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()) :
mrna_2_loc = mrna_2.location_on(assembly_2)
cord = cordinate(asm_id, mrna_2_loc)
if (cord not in p_mrna) :
p_mrna.append(cord)
if (args.feature == "Exon") :
feat_2 = mrna_2.exons()
if (args.feature == "CDS") :
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2) :
exon_2_loc = exon_2.location_on(assembly_2)
cord = cordinate(asm_id ,exon_2_loc)
pred_exons.add(cord)
if (cord not in p_exons) :
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if (len(ex_start) >= 1) :
cds2 = asm_id + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
else :
cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" + str(gene_2_loc.strand)
if (cord_p not in p_cd) :
p_cds.append(cds2)
p_cd.append(cord_p)
exon_pred_all.update(pred_exons.intersection(anno_exons)) # true exons
for gene_2 in sorted(genes_2) : ## From the predicted feature determine the true once. Iterate through each predicted gene sorted by cordinate
gene_2_loc = gene_2.location_on(assembly_2)
cord_g = cordinate(asm_id, gene_2_loc)
if (cord_g in gene_true) : ## To prevent duplication, check if the feature already exists in the set of truly predicted gene.
continue
ex_mrna1 = set()
ex_mrna2 = set()
for gene_1 in sorted(genes_1) :
gene_1_loc = gene_1.location_on(assembly_1)
if (gene_1_loc.strand != gene_2_loc.strand) :
continue
if (gene_2.overlaps_with(gene_1)) :
for mrna_2 in sorted(gene_2.mRNAs()) :
if (args.feature == "Exon") :
feat_2 = mrna_2.exons()
if (args.feature == "CDS") :
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2) :
exon_2_loc = exon_2.location_on(assembly_2)
cord2 = cordinate(asm_id , exon_2_loc)
ex_mrna2.add(cord2)
for mrna_1 in sorted(gene_1.mRNAs()) :
if (args.feature == "Exon") :
feat_1 = mrna_1.exons()
if (args.feature == "CDS") :
feat_1 = mrna_1.CDSs()
for exon_1 in sorted(feat_1) :
exon_1_loc = exon_1.location_on(assembly_1)
cord1 = cordinate(asm_id, exon_1_loc)
ex_mrna1.add(cord1)
ex_union = ex_mrna1.union(ex_mrna2)
if (len(ex_union) == len(ex_mrna1) and len(ex_union) == len(ex_mrna2)) :
gene_true.add(cord_g)
true_file.write(gene_1.id+"\t"+gene_2.id+"\n")
break
for asm_id in assemblies_2: ## Iterate through each chromosome from the predicted annotation
if asm_id not in chr :
assembly_2 = assemblies_2.get(asm_id,-1) ## Find that chromosome in the predicted gff file which is not found in known annotation
genes_2 = assembly_2.genes() ## Add genes, mrna, exon features from predicted annotation to total predicted feature set.
for gene_2 in sorted(genes_2) :
gene_2_loc = gene_2.location_on(assembly_2)
cord_p = cordinate(asm_id ,gene_2_loc)
if (cord_p not in p_gene) :
p_gene.append(cord_p)
ex_start = []
ex_stop = []
for mrna_2 in sorted(gene_2.mRNAs()) :
mrna_2_loc = mrna_2.location_on(assembly_2)
cord = cordinate(asm_id , mrna_2_loc)
if (cord not in p_mrna) :
p_mrna.append(cord)
if (args.feature == "Exon") :
feat_2 = mrna_2.exons()
if (args.feature == "CDS") :
feat_2 = mrna_2.CDSs()
for exon_2 in sorted(feat_2) :
exon_2_loc = exon_2.location_on(assembly_2)
cord = cordinate(asm_id ,exon_2_loc)
if (cord not in p_exons) :
p_exons.append(cord)
ex_start.append(exon_2_loc.fmin)
ex_stop.append(exon_2_loc.fmax)
ex_start.sort()
ex_stop.sort()
if (len(ex_start) >= 1) :
cds2 = asm_id + ":" + gene_2.id + ":" + str(ex_start[0]) + ":" + str(ex_stop[-1]) + ":" + str(gene_2_loc.strand)
else :
cds2 = asm_id + ":" + gene_2.id + ":" + str(gene_2_loc.fmin) + ":" + str(gene_2_loc.fmax) + ":" + str(gene_2_loc.strand)
if (cord_p not in p_cd) :
p_cds.append(cds2)
p_cd.append(cord_p)
#Calculate SN/SP for bases
(a_base_val, p_base_val, true_base) = base_comparison(p_exons,a_exons)
base_sn = (true_base/a_base_val) * 100
base_sp = (true_base/p_base_val) * 100
#Calculate SN/SP for exons
annotated_exon = len(a_exons)
predicted_exon = len(p_exons)
true_pred_exon = len(exon_pred_all)
exon_sn = (true_pred_exon/annotated_exon) * 100
exon_sp = (true_pred_exon/predicted_exon) * 100
#Calculate SN/SP for genes
annotated_gene = len(a_gene)
predicted_gene = len(p_gene)
true_pred_gene = len(gene_true)
gene_sn = (true_pred_gene/annotated_gene) * 100
gene_sp = (true_pred_gene/predicted_gene) * 100
print("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
print("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp))
print(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp))
print("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp))
out_file = args.output_dir + '/summary.txt'
if not (os.path.exists(args.output_dir)) :
sys.exit("Directory does not exist.")
fout = open(out_file,'w')
fout.write("Feature\tKnown\tPredicted\tTrue_Predicted\tSN\tPPV\n")
fout.write("Gene\t"+str(annotated_gene)+"\t"+str(predicted_gene)+"\t"+str(true_pred_gene)+"\t"+str(gene_sn)+"\t"+str(gene_sp)+"\n")
fout.write(args.feature+"\t"+str(annotated_exon)+"\t"+str(predicted_exon)+"\t"+str(true_pred_exon)+"\t"+str(exon_sn)+"\t"+str(exon_sp)+"\n")
fout.write("Base\t"+str(a_base_val)+"\t"+str(p_base_val)+"\t"+str(true_base)+"\t"+str(base_sn)+"\t"+str(base_sp)+"\n\n")
arr_pred = compare_cds(p_cds,a_cds,"pred")
arr_known = compare_cds(a_cds,p_cds,"known")
arr_pred_same = compare_cds(p_cds,p_cds,"pred_same")
new_gene = arr_pred[2]
gene_merge = arr_pred[3]
gene_found = arr_pred[0]
gene_opp = arr_pred[1]
gene_missing = arr_known[2]
gene = arr_known[0]
gene_opp_known = arr_known[1]
gene_split = arr_known[3]
gene_pred_overlap_opp = arr_pred_same[1]
print ("1. No. of known gene : ",len(a_cds))
print ("2. No. of predicted gene : ",len(p_cds))
print ("3. No. of predicted gene overlapping 0 known gene (new gene): ",new_gene)
print ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : ",gene_merge)
print ("5. No. of predicted gene overlaping 1 known gene : ",gene_found)
print ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : ",gene_opp)
print ("7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundaries) : ",true_pred_gene)
print ("8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : ",gene_pred_overlap_opp)
print ("9. No. of known gene overlapping 0 predicted gene (gene missing): ",gene_missing)
print ("10. No. of known gene overlapping > 1 predicted gene(gene split) : ",gene_split)
print ("11. No. of known gene overlaping 1 predicted gene : ",gene)
print ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : ",gene_opp_known)
out_file = args.output_dir + '/final_stats.txt'
if not (os.path.exists(args.output_dir)) :
sys.exit("Directory does not exist.")
fout = open(out_file,'w')
fout.write ("1. No. of known gene : " + str(len(a_cds)) + "\n")
fout.write ("2. No. of predicted gene : " + str(len(p_cds)) + "\n")
fout.write ("3. No. of predicted gene overlapping 0 known gene (new gene): " + str(new_gene) + "\n")
fout.write ("4. No. of predicted gene overlapping > 1 known gene (gene merge) : " + str(gene_merge) + "\n")
fout.write ("5. No. of predicted gene overlaping 1 known gene : " + str(gene_found) + "\n")
fout.write ("6. No. of predicted gene overlapping >= 1 known gene in opp strand : " + str(gene_opp) + "\n")
fout.write ("7. No. of predicted gene overlapping 1 known gene (exact intron/exon boundary) : " + str(true_pred_gene) + "\n")
fout.write ("8. No. of predicted gene overlapping >= 1 predicted gene in opp strand : " + str(gene_pred_overlap_opp) + "\n")
fout.write ("9. No. of known gene overlapping 0 predicted gene (gene missing): " + str(gene_missing) + "\n")
fout.write ("10. No. of known gene overlapping > 1 predicted gene (gene_split): " + str(gene_split) + "\n")
fout.write ("11. No. of known gene overlaping 1 predicted gene : " + str(gene) + "\n")
fout.write ("12. No. of known gene overlapping >= 1 predicted gene in opp strand : " + str(gene_opp_known) + "\n")
true_pred_file = args.output_dir + '/true_pred.txt'
fout_true = open(true_pred_file,'w')
for true_gene in gene_true :
fout_true.write(true_gene+"\n")
#Clean up
delete_file = ['exon_1.bed','exon_2.bed','exon_1_merged.bed','exon_2_merged.bed','exon_1_2_intersect.bed']
for f in delete_file :
cmd = "rm " + args.output_dir + "/" + f
os.system(cmd)
if __name__ == '__main__':
args = interface()
process_files(args)
| zctea/biocode | sandbox/priti.aries88/structural_comparison_gff3.py | Python | gpl-3.0 | 24,536 |
from __future__ import absolute_import
from dbm import *
from ..version_info import PY2
if PY2:
from . import dumb, gnu, ndbm
from whichdb import *
from anydbm import *
| EnTeQuAk/dotfiles | sublime-text-3/Packages/isort/pies/dbm/__init__.py | Python | unlicense | 184 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = 'Emanuele Bertoldi <emanuele.bertoldi@gmail.com>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.1'
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from ..cache import LoggedInUserCache
from ..backends import *
from ..models import User, Group
user_model_string = settings.AUTH_USER_MODEL
auth_app, sep, user_model_name = user_model_string.rpartition('.')
ob = ObjectPermissionBackend()
logged_cache = LoggedInUserCache()
IS_TEST_DB = settings.DATABASES.get('default', {}).get('NAME', '').startswith('test_')
class FakeRequest(object):
def __init__(self):
self.user = None
self.GET = {}
self.POST = {}
self.META = {
'HTTP_HOST': "myhost.com",
'HTTP_REFERER': "http://www.test.com",
'PATH_INFO': "/home/test/",
}
def clear_perm_caches(user):
"""Helper function which clears perms caches of the given user.
"""
if hasattr(user, '_perm_cache'):
delattr(user, '_perm_cache')
if hasattr(user, '_user_perm_cache'):
delattr(user, '_user_perm_cache')
if hasattr(user, '_group_perm_cache'):
delattr(user, '_group_perm_cache')
if hasattr(user, '_obj_perm_cache'):
delattr(user, '_obj_perm_cache')
if hasattr(user, '_user_obj_perm_cache'):
delattr(user, '_user_obj_perm_cache')
if hasattr(user, '_group_obj_perm_cache'):
delattr(user, '_group_obj_perm_cache')
| mobb-io/django-erp | djangoerp/core/tests/__init__.py | Python | mit | 2,133 |
#!/usr/bin/env python
# -*- encoding: UTF8 -*-
# author: Philipp Klaus, philipp.klaus →AT→ gmail.com
# author: InterNetworX, info →AT→ inwx.de
# This file is part of python-inwx-xmlrpc.
#
# python-inwx-xmlrpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-inwx-xmlrpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-inwx-xmlrpc. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
###### This the most important file of the project: #######
###### It contains the classes inwx and domrobot, which #######
###### implement the XML-RPC communication with the #######
###### InterNetworX API. #######
from xmlrpclib import ServerProxy, Fault, ProtocolError, _Method, SafeTransport
class domrobot (ServerProxy):
def __init__ (self, address, username=None, password=None, language='en', verbose=False):
self.__address = address
#super(domrobot, self).__init__(address, transport=InwxTransport(), encoding = "UTF-8", verbose=verbose)
ServerProxy.__init__(self, address, transport=InwxTransport(), encoding = "UTF-8", verbose=verbose)
self.account.login({'lang': language, 'user': username, 'pass': password})
def __getattr__(self,name):
return _Method(self.__request, name)
def __request (self, methodname, params):
method_function = ServerProxy.__getattr__(self,methodname)
self.__params = dict()
if params and type(params) is tuple and len(params)>0 and type(params[0]) is dict:
self.__params.update(params[0])
try:
response = method_function(self.__params)
except Fault, err:
raise NameError("Fault", err)
except ProtocolError, err:
raise NameError("ProtocolError", err)
except Exception, err:
raise NameError("Some other error occured, presumably with the network connection to %s" % self.__address, err)
if response['code'] < 2000:
try:
return response['resData']
except:
# not all requests send a response
return None
else:
raise NameError('There was a problem: %s (Error code %s)' % (response['msg'], response['code']), response)
##
# Adds Cookie support to the SafeTransport class:
class InwxTransport(SafeTransport):
user_agent = "DomRobot/1.0 Python python-inwx-xmlrpc"
__cookie = None
def single_request(self, host, handler, request_body, verbose=0):
# This method is almost the same as:
# http://hg.python.org/cpython/file/2.7/Lib/xmlrpclib.py#l1281
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
## for debugging:
#print(host, handler)
#print(request_body)
#print(response.getheaders())
#print(response.read())
if response.status == 200:
self.verbose = verbose
cookie_header = response.getheader('set-cookie')
if cookie_header: self.__cookie = cookie_header
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#discard any response data and raise exception
if (response.getheader("content-length", 0)):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def send_content(self, connection, request_body):
# This method is almost the same as:
# http://hg.python.org/cpython/file/2.7/Lib/xmlrpclib.py#l1428
connection.putheader("Content-Type", "text/xml")
connection.putheader("Content-Length", str(len(request_body)))
if self.__cookie:
connection.putheader("Cookie", self.__cookie)
connection.endheaders(request_body)
class prettyprint (object):
"""
This object is just a collection of prettyprint helper functions for the output of the XML-API.
"""
@staticmethod
def contacts(contacts):
"""
iterable contacts: The list of contacts to be printed.
"""
output = "\nCurrently you have %i contacts set up for your account at InterNetworX:\n\n" % len(contacts)
for contact in contacts:
output += "ID: %s\nType: %s\n%s\n%s\n%s %s\n%s\n%s\nTel: %s\n------\n" % (contact['id'], contact['type'], contact['name'], contact['street'], contact['pc'], contact['city'], contact['cc'], contact['email'], contact['voice'])
return output
@staticmethod
def domains(domains):
"""
list domains: The list of domains to be pretty printed.
"""
output = "\n%i domains:\n" % len(domains)
for domain in domains:
output += "Domain: %s (Type: %s)\n" % (domain['domain'], domain['type'])
return output
@staticmethod
def nameserversets(nameserversets):
"""
list namerserversets: The list of nameserversets to be pretty printed.
"""
count, total = 0, len(nameserversets)
output = "\n%i nameserversets:\n" % total
for nameserverset in nameserversets:
count += 1
output += "%i of %i - ID: %i consisting of [%s]\n" % (count, total, nameserverset['id'], ", ".join(nameserverset['ns']))
return output
@staticmethod
def domain_log(logs):
"""
list logs: The list of nameserversets to be pretty printed.
"""
count, total = 0, len(logs)
output = "\n%i log entries:\n" % total
for log in logs:
count += 1
output += "%i of %i - %s status: '%s' price: %.2f invoice: %s date: %s remote address: %s\n" % (count, total, log['domain'], log['status'], log['price'], log['invoice'], log['date'], log['remoteAddr'])
output += " user text: '%s'\n" % log['userText'].replace("\n",'\n ')
return output
@staticmethod
def domain_check(checks):
"""
list checks: The list of domain checks to be pretty printed.
"""
count, total = 0, len(checks)
output = "\n%i domain check(s):\n" % total
for check in checks['domain']:
count += 1
output += "%s = %s" % (check['domain'], check['status'])
return output
| pklaus/python-inwx-xmlrpc | inwx.py | Python | gpl-3.0 | 7,335 |
from ..metrics import mse, logloss, mae, hinge, binary_crossentropy
categorical_crossentropy = logloss
def get_loss(name):
"""Returns loss function by the name."""
try:
return globals()[name]
except:
raise ValueError('Invalid metric function.')
| zhuhuifeng/PyML | mla/neuralnet/loss.py | Python | apache-2.0 | 277 |
#!/usr/bin/env python
import sys
usage = """
indent.py <filename>
Indent the given JS file to show the structure of context save/restore instances.
"""
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 1:
print usage
sys.exit(1)
filename = args[0]
level = 0
def print_line(line):
print ("\t"*level) + line
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith("ctx.save()"):
print_line(line + "// [")
level += 1
elif line.startswith("ctx.restore()"):
level -= 1
print_line(line + "// ]")
else:
print_line(line)
| shaunlebron/PterodactylAttack | project/swf/pteros/baby/indent.py | Python | gpl-3.0 | 604 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions
from gbpservice._i18n import _
from gbpservice.neutron.extensions import group_policy as gp
CISCO_APIC_GBP_REUSE_BD_EXT = 'cisco_apic_gbp_reuse_bd'
EXTENDED_ATTRIBUTES_2_0 = {
gp.L2_POLICIES: {
'reuse_bd': {
'allow_post': True, 'allow_put': False, 'default': None,
'validate': {'type:uuid_or_none': None},
'is_visible': True},
},
}
class Apic_reuse_bd(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "APIC GBP Reuse BD Extension"
@classmethod
def get_alias(cls):
return CISCO_APIC_GBP_REUSE_BD_EXT
@classmethod
def get_description(cls):
return _("This extension enables creating L2 policy objects that "
"use the same BridgeDomain on APIC")
@classmethod
def get_updated(cls):
return "2016-11-11T04:20:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| noironetworks/group-based-policy | gbpservice/neutron/extensions/apic_reuse_bd.py | Python | apache-2.0 | 1,646 |
from __future__ import absolute_import
from agms.response.response import Response
from agms.exception.response_exception import ResponseException
class HPPResponse(Response):
"""
A class representing AGMS HPP Response objects.
"""
def __init__(self, response, op):
self._response = {}
self._op = op
self._mapping = {
'0': 'hash',
}
response = response['soap:Envelope']['soap:Body'][op + 'Response'][op + 'Result']
self._response['0'] = response
self._hash = response
if not self.is_successful():
raise ResponseException('HPP Generation failed with message ' + self._hash)
def get_hash(self):
return self._hash
def is_successful(self):
self._hash = self.get_hash()
if (not self._hash or
self._hash == 0 or
'INVALID' in self._hash or
'ERROR' in self._hash or
'EXCEPTION' in self._hash or
'REQUIRED' in self._hash or
'IF USED' in self._hash or
'MUST BE' in self._hash or
'FAILED' in self._hash):
return False
else:
return True | agmscode/agms_python | agms/response/hpp_response.py | Python | mit | 1,194 |
# Run this in Python 3.3 for the unicode to work correctly
codec = 'utf-8'
fp = open('HebrewReference.csv','wb')
fp.write(codec.encode(codec)+b'\n')
#fp.write(csv([codec], 'codec'))
def csv(values=[], prefix=''):
if prefix:
prefix += ','
prefix += ','.join(values)+'\n'
return prefix.encode(codec)
######################################################################
alephbet = 'א,ב,ג,ד,ה,ו,ז,ח,ט,י,כ,ל,מ,נ,ס,ע,פ,צ,ק,ר,ש,ת'.split(',')
a_first = 'א,בּ,גּ,דּ,ה,ו,ז,ח,ט,י,כּ,ל,מ,נ,ס,ע,פּ,צ,ק,ר,ש,תּ'.split(',')
a_final = 'א,ב,ג,ד,ה,ו,ז,ח,ט,י,ך,ל,ם,ן,ס,ע,ף,ץ,ק,ר,ש,ת'.split(',')
a_name = ('Aleph','Bet','Gimel','Dalet','Hey','Vav','Zayin','Chet','Tet','Yod','Kaf','Lamed','Mem',
'Nun','Samech','Ayin','Peh','Tsadi','Kof','Resh','Shin','Tav')
#a_speak = (u'\u00e1h-leph','beth',u'g\u00e9e-mel',u'd\u00e1h-leth','heh','vahv',u'z\u00e1h-yin',u'\u1e25eth','teht','yodh','kahf',u'l\u00e1h-med','mem',
# 'nun',u's\u00e1h-mekh',u'\u00e1h-yin','peh',u'ts\u00e1h-dee','kofh','rehsh','seen','taw')
a_english = ('›','b','g','d','h','v','z','h','t','y','k','l','m','n','s','‹','p','s','q','r','s','t')
fp.write(csv(alephbet, 'alephbet'))
fp.write(csv(a_first, 'a_first'))
fp.write(csv(a_final, 'a_final'))
fp.write(csv(a_name, 'a_name'))
fp.write(csv(a_english, 'a_english'))
######################################################################
vowels = 'בבַבֶבִבֻבָבֵבֵיבִיבוֿבוּ'
v_names = ('Patah','Seghol','Hiriq Qatan','Qubbus',
'Qamats','Sereh','Sereh','Hiriq Gadol','Holam','Shuruq')
v_translit = ('a','e','i','u',
'a','e','e','i','o','o','u')
v_word = ('card','pen','sit','pull',
'card','prey','prey','marine','lore','lore','flute')
# Remove Bet to make vowels generic
def addO(s=b''):
return b'\xd7\x91'+s
# The '◌' character doesn't display correctly with the markings
if b'\xd7\x95' in s:
return s+'◌'.encode(codec)
else:
return s
vowels_en = tuple([ addO(s).decode(codec) for s in bytes(vowels.encode(codec)).split(b'\xd7\x91') if s ])
print('\nHebrew Vowels: ',vowels)
for i in zip(v_names, vowels_en, v_translit, v_word):
#print(' {0:13s}{1:>3s} {3:s} as in {4:7s} {2:s}'.format(i[0], i[1].decode(codec), i[1], i[2], i[3]))
print(' {0:13s}{1:>3s} {3:s} as in {4:7s} {2:s}'.format(i[0], i[1], i[1].encode(codec), i[2], i[3]))
fp.write(csv(vowels_en, 'vowels'))
fp.write(csv(v_names, 'v_names'))
fp.write(csv(v_translit, 'v_english'))
fp.write(csv(v_word, 'v_word'))
fp.write(csv(v_names, 'v_names'))
######################################################################
# TODO
# Add example words for transliteration... h as in hair
# See p19 for similar letters
fp.close()
| jimaples/Moreh | hebrew_quiz/code/GenerateUnicode.py | Python | mit | 2,916 |
import subprocess, shutil, glob
# uninstall and clean
whls = glob.glob('dist/no_you_talk_to_the_hand-*-py2-none-any.whl')
for whl in whls:
subprocess.call('pip uninstall {}'.format(whl), shell=True)
shutil.rmtree('dist')
# build and install
subprocess.call('python setup.py build', shell=True)
subprocess.call('python setup.py bdist_wheel', shell=True)
whl = glob.glob('dist/no_you_talk_to_the_hand-*-py2-none-any.whl')[0]
subprocess.call('pip install {}'.format(whl), shell=True)
| flashashen/no-YOU-talk-to-the-hand | rebuild_and_install.py | Python | mit | 488 |
# Value interface
from abc import ABCMeta, abstractmethod
class Value(object):
"""A value represents an item in either a denotation (gold or predicted)"""
__metaclass__ = ABCMeta
@abstractmethod
def match(self, other):
"""Return True if the value matches the other value based on the
official criteria.
Args:
other (Value)
Returns:
a boolean
"""
pass
def train_match(self, other):
"""Return a boolean of whether self and other are considered
equal at train time. This can be used to encourage the model to
predict values with the right type.
The default is to use match.
Args:
other: Value
"""
return self.match(other)
def check_denotation(target_values, predicted_values):
"""Return True if the predicted denotation is correct.
Args:
target_values (list[Value] or set[Value])
predicted_values (list[Value] or set[Value])
Returns:
bool
"""
if isinstance(predicted_values, Exception):
# the executor can return Exceptions as the denotation, if the logical form does not make sense
return False
# Check size
if len(target_values) != len(predicted_values):
return False
# Check items
for target in target_values:
if not any(target.match(pred) for pred in predicted_values):
return False
return True
| kelvinguu/lang2program | strongsup/value.py | Python | apache-2.0 | 1,474 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License
#
# Copyright 2012 Sony Mobile Communications. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Unit tests for the Gerrit event stream handler and event objects. """
import json
import os
import unittest
from pygerrit.events import PatchsetCreatedEvent, \
RefUpdatedEvent, ChangeMergedEvent, CommentAddedEvent, \
ChangeAbandonedEvent, ChangeRestoredEvent, \
DraftPublishedEvent, GerritEventFactory, GerritEvent, UnhandledEvent, \
ErrorEvent, MergeFailedEvent, ReviewerAddedEvent, TopicChangedEvent
from pygerrit.client import GerritClient
from pygerrit import GerritReviewMessageFormatter
from pygerrit.rest import GerritReview
EXPECTED_TEST_CASE_FIELDS = ['header', 'footer', 'paragraphs', 'result']
TEST_CASES = [
{'header': None,
'footer': None,
'paragraphs': [],
'result': ""},
{'header': "Header",
'footer': "Footer",
'paragraphs': [],
'result': ""},
{'header': None,
'footer': None,
'paragraphs': ["Test"],
'result': "Test"},
{'header': None,
'footer': None,
'paragraphs': ["Test", "Test"],
'result': "Test\n\nTest"},
{'header': "Header",
'footer': None,
'paragraphs': ["Test"],
'result': "Header\n\nTest"},
{'header': "Header",
'footer': None,
'paragraphs': ["Test", "Test"],
'result': "Header\n\nTest\n\nTest"},
{'header': "Header",
'footer': "Footer",
'paragraphs': ["Test", "Test"],
'result': "Header\n\nTest\n\nTest\n\nFooter"},
{'header': "Header",
'footer': "Footer",
'paragraphs': [["One"]],
'result': "Header\n\n* One\n\nFooter"},
{'header': "Header",
'footer': "Footer",
'paragraphs': [["One", "Two"]],
'result': "Header\n\n* One\n* Two\n\nFooter"},
{'header': "Header",
'footer': "Footer",
'paragraphs': ["Test", ["One"], "Test"],
'result': "Header\n\nTest\n\n* One\n\nTest\n\nFooter"},
{'header': "Header",
'footer': "Footer",
'paragraphs': ["Test", ["One", "Two"], "Test"],
'result': "Header\n\nTest\n\n* One\n* Two\n\nTest\n\nFooter"},
{'header': "Header",
'footer': "Footer",
'paragraphs': ["Test", "Test", ["One"]],
'result': "Header\n\nTest\n\nTest\n\n* One\n\nFooter"},
{'header': None,
'footer': None,
'paragraphs': [["* One", "* Two"]],
'result': "* One\n* Two"},
{'header': None,
'footer': None,
'paragraphs': [["* One ", " * Two "]],
'result': "* One\n* Two"},
{'header': None,
'footer': None,
'paragraphs': [["*", "*"]],
'result': ""},
{'header': None,
'footer': None,
'paragraphs': [["", ""]],
'result': ""},
{'header': None,
'footer': None,
'paragraphs': [[" ", " "]],
'result': ""},
{'header': None,
'footer': None,
'paragraphs': [["* One", " ", "* Two"]],
'result': "* One\n* Two"}]
@GerritEventFactory.register("user-defined-event")
class UserDefinedEvent(GerritEvent):
""" Dummy event class to test event registration. """
def __init__(self, json_data):
super(UserDefinedEvent, self).__init__(json_data)
self.title = json_data['title']
self.description = json_data['description']
def _create_event(name, gerrit):
""" Create a new event.
Read the contents of the file specified by `name` and load as JSON
data, then add as an event in the `gerrit` client.
"""
testfile = open(os.path.join("testdata", name + ".txt"))
data = testfile.read().replace("\n", "")
gerrit.put_event(data)
return data
class TestGerritEvents(unittest.TestCase):
def setUp(self):
self.gerrit = GerritClient("review")
def test_patchset_created(self):
_create_event("patchset-created-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, PatchsetCreatedEvent))
self.assertEqual(event.name, "patchset-created")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(event.uploader.name, "Uploader Name")
self.assertEqual(event.uploader.email, "uploader@example.com")
def test_draft_published(self):
_create_event("draft-published-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, DraftPublishedEvent))
self.assertEqual(event.name, "draft-published")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(event.uploader.name, "Uploader Name")
self.assertEqual(event.uploader.email, "uploader@example.com")
def test_ref_updated(self):
_create_event("ref-updated-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, RefUpdatedEvent))
self.assertEqual(event.name, "ref-updated")
self.assertEqual(event.ref_update.project, "project-name")
self.assertEqual(event.ref_update.oldrev,
"0000000000000000000000000000000000000000")
self.assertEqual(event.ref_update.newrev,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.ref_update.refname, "refs/tags/refname")
self.assertEqual(event.submitter.name, "Submitter Name")
self.assertEqual(event.submitter.email, "submitter@example.com")
def test_change_merged(self):
_create_event("change-merged-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, ChangeMergedEvent))
self.assertEqual(event.name, "change-merged")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(event.submitter.name, "Submitter Name")
self.assertEqual(event.submitter.email, "submitter@example.com")
def test_merge_failed(self):
_create_event("merge-failed-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, MergeFailedEvent))
self.assertEqual(event.name, "merge-failed")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(event.submitter.name, "Submitter Name")
self.assertEqual(event.submitter.email, "submitter@example.com")
self.assertEqual(event.reason, "Merge failed reason")
def test_comment_added(self):
_create_event("comment-added-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, CommentAddedEvent))
self.assertEqual(event.name, "comment-added")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(len(event.approvals), 2)
self.assertEqual(event.approvals[0].category, "CRVW")
self.assertEqual(event.approvals[0].description, "Code Review")
self.assertEqual(event.approvals[0].value, "1")
self.assertEqual(event.approvals[1].category, "VRIF")
self.assertEqual(event.approvals[1].description, "Verified")
self.assertEqual(event.approvals[1].value, "1")
self.assertEqual(event.author.name, "Author Name")
self.assertEqual(event.author.email, "author@example.com")
def test_reviewer_added(self):
_create_event("reviewer-added-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, ReviewerAddedEvent))
self.assertEqual(event.name, "reviewer-added")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.patchset.number, "4")
self.assertEqual(event.patchset.revision,
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.patchset.ref, "refs/changes/56/123456/4")
self.assertEqual(event.patchset.uploader.name, "Uploader Name")
self.assertEqual(event.patchset.uploader.email, "uploader@example.com")
self.assertEqual(event.reviewer.name, "Reviewer Name")
self.assertEqual(event.reviewer.email, "reviewer@example.com")
def test_change_abandoned(self):
_create_event("change-abandoned-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, ChangeAbandonedEvent))
self.assertEqual(event.name, "change-abandoned")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.abandoner.name, "Abandoner Name")
self.assertEqual(event.abandoner.email, "abandoner@example.com")
self.assertEqual(event.reason, "Abandon reason")
def test_change_restored(self):
_create_event("change-restored-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, ChangeRestoredEvent))
self.assertEqual(event.name, "change-restored")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.restorer.name, "Restorer Name")
self.assertEqual(event.restorer.email, "restorer@example.com")
self.assertEqual(event.reason, "Restore reason")
def test_topic_changed(self):
_create_event("topic-changed-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, TopicChangedEvent))
self.assertEqual(event.name, "topic-changed")
self.assertEqual(event.change.project, "project-name")
self.assertEqual(event.change.branch, "branch-name")
self.assertEqual(event.change.topic, "topic-name")
self.assertEqual(event.change.change_id,
"Ideadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
self.assertEqual(event.change.number, "123456")
self.assertEqual(event.change.subject, "Commit message subject")
self.assertEqual(event.change.url, "http://review.example.com/123456")
self.assertEqual(event.change.owner.name, "Owner Name")
self.assertEqual(event.change.owner.email, "owner@example.com")
self.assertEqual(event.changer.name, "Changer Name")
self.assertEqual(event.changer.email, "changer@example.com")
self.assertEqual(event.oldtopic, "old-topic")
def test_user_defined_event(self):
_create_event("user-defined-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, UserDefinedEvent))
self.assertEqual(event.title, "Event title")
self.assertEqual(event.description, "Event description")
def test_unhandled_event(self):
data = _create_event("unhandled-event", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, UnhandledEvent))
self.assertEqual(event.json, json.loads(data))
def test_invalid_json(self):
_create_event("invalid-json", self.gerrit)
event = self.gerrit.get_event(False)
self.assertTrue(isinstance(event, ErrorEvent))
def test_add_duplicate_event(self):
try:
@GerritEventFactory.register("user-defined-event")
class AnotherUserDefinedEvent(GerritEvent):
pass
except:
return
self.fail("Did not raise exception when duplicate event registered")
class TestGerritReviewMessageFormatter(unittest.TestCase):
""" Test that the GerritReviewMessageFormatter class behaves properly. """
def _check_test_case_fields(self, test_case, i):
for field in EXPECTED_TEST_CASE_FIELDS:
self.assertTrue(field in test_case,
"field '%s' not present in test case #%d" %
(field, i))
self.assertTrue(isinstance(test_case['paragraphs'], list),
"'paragraphs' field is not a list in test case #%d" % i)
def test_is_empty(self):
""" Test if message is empty for missing header and footer. """
f = GerritReviewMessageFormatter(header=None, footer=None)
self.assertTrue(f.is_empty())
f.append(['test'])
self.assertFalse(f.is_empty())
def test_message_formatting(self):
""" Test message formatter for different test cases. """
for i in range(len(TEST_CASES)):
test_case = TEST_CASES[i]
self._check_test_case_fields(test_case, i)
f = GerritReviewMessageFormatter(header=test_case['header'],
footer=test_case['footer'])
for paragraph in test_case['paragraphs']:
f.append(paragraph)
m = f.format()
self.assertEqual(m, test_case['result'],
"Formatted message does not match expected "
"result in test case #%d:\n[%s]" % (i, m))
class TestGerritReview(unittest.TestCase):
""" Test that the GerritReview class behaves properly. """
def test_str(self):
""" Test for str function. """
obj = GerritReview()
self.assertEqual(str(obj), '{}')
obj2 = GerritReview(labels={'Verified': 1, 'Code-Review': -1})
self.assertEqual(
str(obj2),
'{"labels": {"Verified": 1, "Code-Review": -1}}')
obj3 = GerritReview(comments=[{'filename': 'Makefile',
'line': 10, 'message': 'test'}])
self.assertEqual(
str(obj3),
'{"comments": {"Makefile": [{"line": 10, "message": "test"}]}}')
obj4 = GerritReview(labels={'Verified': 1, 'Code-Review': -1},
comments=[{'filename': 'Makefile', 'line': 10,
'message': 'test'}])
self.assertEqual(
str(obj4),
'{"labels": {"Verified": 1, "Code-Review": -1},'
' "comments": {"Makefile": [{"line": 10, "message": "test"}]}}')
obj5 = GerritReview(comments=[{'filename': 'Makefile', 'line': 15,
'message': 'test'}, {'filename': 'Make',
'line': 10,
'message': 'test1'}])
self.assertEqual(
str(obj5),
'{"comments": {"Make": [{"line": 10, "message": "test1"}],'
' "Makefile": [{"line": 15, "message": "test"}]}}')
if __name__ == '__main__':
unittest.main()
| benjiii/pygerrit | unittests.py | Python | mit | 22,115 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Quota service definition and implementation.
Contains message and service definitions for a simple quota service. The
service maintains a set of quotas for users that can be deducted from in
a single transaction. The requests to do this can be configured so that if
one quota check fails, none of the quota changes will take effect.
The service is configured using a QuotaConfig object and can be passed an
existing quota state (useful for if the service quits unexpectedly and is
being restored from checkpoint). For this reason it is necessary to use
a factory instead of the default constructor. For example:
quota_config = QuotaConfig(
buckets = [ QuotaBucket('DISK', 1000000),
QuotaBucket('EMAILS', 100, refresh_every=24 * 60 * 60),
])
quota_state = {}
quota_service = QuotaService.new_factory(quota_config, quota_state)
Every on-going request to the quota service shares the same configuration and
state objects.
Individual quota buckets can be specified to refresh to their original amounts
at regular intervals. These intervals are specified in seconds. The example
above specifies that the email quota is refreshed to 100 emails every day.
It is up to the client using the quota service to respond correctly to the
response of the quota service. It does not try to raise an exception on
dential.
"""
import threading
import time
from protorpc import messages
from protorpc import remote
from protorpc import util
class QuotaCheck(messages.Message):
"""Result of checking quota of a single bucket.
Fields:
name: Name of quota bucket to check.
tokens: Number of tokens to check for quota or deduct. A negative value
can be used to credit quota buckets.
mode: Quota check-mode. See Mode enumeration class for more details.
"""
class Mode(messages.Enum):
"""Mode for individual bucket quota check.
Values:
ALL: All tokens must be available for consumption or else quota check
fails and all deductions/credits are ignored.
SOME: At least some tokens must be available for consumption. This check
will only fail if the remaining tokens in the bucket are already at
zero.
CHECK_ALL: All tokens must be available in bucket or else quota check
fails and all other deductions/credits are ignored. This will not cause
a deduction to occur for the indicated bucket.
CHECK_ALL: At least some tokens must be available in bucket or else quota
check fails and all other deductions/credits are ignored. This will
not cause a deduction to occur for the indicated bucket.
"""
ALL = 1
SOME = 2
CHECK_ALL = 3
CHECK_SOME = 4
name = messages.StringField(1, required=True)
tokens = messages.IntegerField(2, required=True)
mode = messages.EnumField(Mode, 3, default=Mode.ALL)
class QuotaRequest(messages.Message):
"""A request to check or deduct tokens from a users bucket.
Fields:
user: User to check or deduct quota for.
quotas: Quotas to check or deduct against.
"""
user = messages.StringField(1, required=True)
quotas = messages.MessageField(QuotaCheck, 2, repeated=True)
class CheckResult(messages.Message):
"""Quota check results.
Fields:
status: Status of quota check for bucket. See Status enum for details.
available: Number of actual tokens available or consumed. Will be
less than the number of requested tokens when bucket has fewer
tokens than requested.
"""
class Status(messages.Enum):
"""Status of check result.
Values:
OK: All requested tokens are available or were deducted.
SOME: Some requested tokens are available or were deducted. This will
cause any deductions to fail if the request mode is ALL or CHECK_ALL.
NONE: No tokens were available. Quota check is considered to have failed.
"""
OK = 1
SOME = 2
NONE = 3
status = messages.EnumField(Status, 1, required=True)
available = messages.IntegerField(2, required=True)
class QuotaResponse(messages.Message):
""" Response to QuotaRequest.
Fields:
all_status: Overall status of quota request. If no quota tokens were
available at all, this will be NONE. If some tokens were available, even
if some buckets had no tokens, this will be SOME. If all tokens were
available this will be OK.
denied: If true, it means that some required quota check has failed. Any
deductions in the request will be ignored, even if those individual
buckets had adequate tokens.
results: Specific results of quota check for each requested bucket. The
names are not included as they will have a one to one correspondence with
buckets indicated in the request.
"""
all_status = messages.EnumField(CheckResult.Status, 1, required=True)
denied = messages.BooleanField(2, required=True)
results = messages.MessageField(CheckResult, 3, repeated=True)
class QuotaConfig(messages.Message):
"""Quota configuration.
Structure used for configuring quota server. This message is not used
directly in the service definition, but is used to configure the
implementation.
Fields:
buckets: Individual bucket configurations. Bucket configurations are
specified per server and are configured for any user that is requested.
"""
class Bucket(messages.Message):
"""Individual bucket configuration.
Fields:
name: Bucket name.
initial_tokens: Number of tokens initially configured for this bucket.
refresh_every: Number of seconds after which initial tokens are restored.
If this value is None, tokens are never restored once used, unless
credited by the application.
"""
name = messages.StringField(1, required=True)
initial_tokens = messages.IntegerField(2, required=True)
refresh_every = messages.IntegerField(4)
buckets = messages.MessageField(Bucket, 1, repeated=True)
class QuotaStateRequest(messages.Message):
"""Request state of all quota buckets for a single user.
Used for determining how many tokens remain in all the users quota buckets.
Fields:
user: The user to get buckets for.
"""
user = messages.StringField(1, required=True)
class BucketState(messages.Message):
"""State of an individual quota bucket.
Fields:
name: Name of bucket.
remaining_tokens: Number of tokens that remain in bucket.
"""
name = messages.StringField(1, required=True)
remaining_tokens = messages.IntegerField(2, required=True)
class QuotaStateResponse(messages.Message):
"""Response to QuotaStateRequest containing set of bucket states for user."""
bucket_states = messages.MessageField(BucketState, 1, repeated=True)
class QuotaState(object):
"""Quota state class, used by implementation of service.
This class is responsible for managing all the bucket states for a user.
Quota checks and deductions must be done in the context of a transaction. If
a transaction fails, it can be rolled back so that the values of the
individual buckets are preserved, even if previous checks and deductions
succeeded.
"""
@util.positional(3)
def __init__(self, state, buckets):
"""Constructor.
Args:
state: A dictionary that is used to contain the state, mapping buckets to
tuples (remaining_tokens, next_refresh):
remaining_tokens: Number of tokens remaining in the bucket.
next_refresh: Time when bucket needs to be refilled with initial
tokens.
buckets: A dictionary that maps buckets to BucketConfig objects.
"""
self.__state = state
self.__buckets = buckets
self.__lock = threading.Lock() # Used at transaction commit time.
self.__transaction = threading.local()
self.__transaction.changes = None # Dictionary bucket -> token deduction.
# Can be negative indicating credit.
self.__transaction.time = None # Time at which transaction began.
def in_transaction(self):
return self.__transaction.changes is not None
def begin_transaction(self):
"""Begin quota transaction."""
assert not self.in_transaction()
self.__transaction.changes = {}
self.__transaction.time = int(time.time())
self.__lock.acquire()
def commit_transaction(self):
"""Commit deductions of quota transaction."""
assert self.in_transaction()
for name, change in self.__transaction.changes.iteritems():
remaining_tokens, next_refresh = self.__state[name]
new_tokens = max(0, remaining_tokens + change)
self.__state[name] = new_tokens, next_refresh
self.__transaction.changes = None
self.__lock.release()
def abort_transaction(self):
"""Roll back transaction ignoring quota changes."""
assert self.in_transaction()
self.__transaction.changes = None
self.__lock.release()
def get_remaining_tokens(self, name):
"""Get remaining tokens for a bucket.
This function must be called within a transaction.
Args:
name: Bucket name.
Returns:
Integer of remaining tokens in users quota bucket.
"""
assert self.in_transaction()
changes = self.__transaction.changes.get(name, 0)
remaining_tokens, next_refresh = self.__state.get(name, (None, None))
if remaining_tokens is not None and (
next_refresh is None or
next_refresh >= self.__transaction.time):
return remaining_tokens + changes
bucket = self.__buckets.get(name, None)
if bucket is None:
return None
if bucket.refresh_every:
next_refresh = self.__transaction.time + bucket.refresh_every
else:
next_refresh = None
self.__state[name] = bucket.initial_tokens, next_refresh
return bucket.initial_tokens + changes
def check_quota(self, name, tokens):
"""Check to determine if there are enough quotas in a bucket.
Args:
name: Name of bucket to check.
tokens: Number of tokens to check for availability. Can be negative.
Returns:
The count of requested tokens or if insufficient, the number of tokens
available.
"""
assert self.in_transaction()
assert name not in self.__transaction.changes
remaining_tokens = self.get_remaining_tokens(name)
if remaining_tokens is None:
return None
return min(tokens, remaining_tokens)
def deduct_quota(self, name, tokens):
"""Add a quota deduction to the transaction.
Args:
name: Name of bucket to deduct from.
tokens: Number of tokens to request.
Returns:
The count of requested tokens or if insufficient, the number of tokens
available that will be deducted upon transaction commit.
"""
available_tokens = self.check_quota(name, tokens)
if available_tokens is None:
return None
diff = max(0, tokens - available_tokens)
self.__transaction.changes[name] = -(tokens - diff)
return available_tokens
class QuotaService(remote.Service):
"""Quota service."""
__state_lock = threading.Lock()
def __init__(self, config, states):
"""Constructor.
NOTE: This constructor requires parameters which means a factory function
must be used for instantiating the QuotaService.
Args:
config: An instance of QuotaConfig.
states: Dictionary mapping user -> QuotaState objects.
"""
self.__states = states
self.__config = config
self.__buckets = {}
for bucket in self.__config.buckets:
self.__buckets[bucket.name] = bucket
def __get_state(self, user):
"""Get the state of a user.
If no user state exists, this function will create one and store
it for access later.
user: User string to get quota state for.
"""
state = self.__states.get(user, None)
if state is None:
state = QuotaState({}, self.__buckets)
# TODO: Potentially problematic bottleneck.
self.__state_lock.acquire()
try:
self.__states[user] = state
finally:
self.__state_lock.release()
return state
@remote.method(QuotaRequest, QuotaResponse)
def check_quota(self, request):
"""Perform a quota check for a user."""
state = self.__get_state(request.user)
response = QuotaResponse(all_status=CheckResult.Status.OK)
response.denied = False
state.begin_transaction()
try:
for quota in request.quotas:
if quota.mode in (QuotaCheck.Mode.CHECK_ALL,
QuotaCheck.Mode.CHECK_SOME):
func = state.check_quota
else:
func = state.deduct_quota
available = func(quota.name, quota.tokens)
if available is None:
raise remote.ApplicationError(
'Unknown quota %s requested' % quota.name)
result = CheckResult(available=available)
response.results.append(result)
if available == quota.tokens:
result.status = CheckResult.Status.OK
if response.all_status == CheckResult.Status.NONE:
result.status = CheckResult.Status.SOME
elif available == 0:
result.status = CheckResult.Status.NONE
if response.all_status == CheckResult.Status.OK:
response.all_status = CheckResult.Status.NONE
response.denied = True
else:
result.status = CheckResult.Status.SOME
response.all_status = CheckResult.Status.SOME
if quota.mode in (QuotaCheck.Mode.ALL, QuotaCheck.Mode.CHECK_ALL):
response.denied = True
if response.denied:
state.abort_transaction()
else:
state.commit_transaction()
except:
state.abort_transaction()
raise
return response
@remote.method(QuotaStateRequest, QuotaStateResponse)
def get_quota_state(self, request):
"""Get current state of users quota buckets."""
state = self.__get_state(request.user)
state.begin_transaction()
try:
response = QuotaStateResponse()
for name in sorted(self.__buckets.keys()):
bucket_state = BucketState(
name=name,
remaining_tokens=state.get_remaining_tokens(name))
response.bucket_states.append(bucket_state)
return response
finally:
state.abort_transaction()
| ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/protorpc/demos/quotas/backend/quotas/services.py | Python | bsd-3-clause | 14,871 |
"""
Graphical user interface functionalities for the
SampleResource Aggregate Manager.
@date: Jun 12, 2013
@author: CarolinaFernandez
"""
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from expedient.clearinghouse.aggregate.models import Aggregate
from expedient.clearinghouse.slice.models import Slice
from expedient.common.messaging.models import DatedMessage
from expedient.common.utils.plugins.plugincommunicator import *
from expedient.common.utils.plugins.resources.link import Link
from expedient.common.utils.plugins.resources.node import Node
from expedient.common.utils.views import generic_crud
from sample_resource.controller.resource import SampleResource as SampleResourceController
from sample_resource.forms.SampleResource import SampleResource as SampleResourceModelForm
from sample_resource.models import SampleResource as SampleResourceModel,\
SampleResourceAggregate as SampleResourceAggregateModel
import copy
import logging
import xmlrpclib
def create_resource(request, slice_id, agg_id):
"""Show a page that allows user to add a SampleResource to the aggregate."""
if request.method == "POST":
# Shows error message when aggregate unreachable, disable SampleResource creation and get back to slice detail page
agg = Aggregate.objects.get(id = agg_id)
if agg.check_status() == False:
DatedMessage.objects.post_message_to_user(
"SampleResource Aggregate '%s' is not available" % agg.name,
request.user, msg_type=DatedMessage.TYPE_ERROR,)
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
if 'create_resource' in request.POST:
return HttpResponseRedirect(reverse("sample_resource_resource_crud", args=[slice_id, agg_id]))
else:
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
def resource_crud(request, slice_id, agg_id, resource_id = None):
"""
Show a page that allows user to create/edit SampleResource's to the Aggregate.
"""
slice = get_object_or_404(Slice, id = slice_id)
aggregate = Aggregate.objects.get(id = agg_id)
error_crud = ""
def pre_save(instance, created):
"""
Fills SampleResource instance prior to its saving.
Used within the scope of the generic_crud method.
"""
instance = SampleResourceController.fill(instance, slice, agg_id, resource_id)
try:
return generic_crud(request, obj_id=resource_id, model=SampleResourceModel,
form_class=SampleResourceModelForm,
template="sample_resource_resource_crud.html",
redirect=lambda inst: reverse("slice_detail", args=[slice_id]),
extra_context={"agg": aggregate, "slice": slice, "exception": error_crud, "breadcrumbs": (
("Home", reverse("home")),
("Project %s" % slice.project.name, reverse("project_detail", args=[slice.project.id])),
("Slice %s" % slice.name, reverse("slice_detail", args=[slice_id])),
("%s SampleResource" % "Update" if resource_id else "Create", reverse("sample_resource_resource_crud", args=[slice_id, agg_id])),)
}, extra_form_params={}, template_object_name="object", pre_save=pre_save,
post_save=None, success_msg=None)
except ValidationError as e:
# Django exception message handling is different to Python's...
error_crud = ";".join(e.messages)
except Exception as e:
print "[WARNING] Could not create resource in plugin 'sample_resource'. Details: %s" % str(e)
DatedMessage.objects.post_message_to_user(
"SampleResource might have been created, but some problem ocurred: %s" % str(e),
request.user, msg_type=DatedMessage.TYPE_ERROR)
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
def manage_resource(request, resource_id, action_type):
"""
Manages the actions executed over SampleResource's.
"""
if action_type == "delete":
SampleResourceController.delete(resource_id)
# Go to manage resources again
return HttpResponse("")
###
# Topology to show in the Expedient
#
def get_sr_list(slice):
return SampleResourceModel.objects.filter(slice_id = slice.uuid)
def get_sr_aggregates(slice):
sr_aggs = []
try:
sr_aggs = slice.aggregates.filter(leaf_name=SampleResourceAggregateModel.__name__.lower())
except:
pass
return sr_aggs
def get_node_description(node):
description = "<strong>Sample Resource: " + node.name + "</strong><br/><br/>"
description += "• Temperature: %s (°%s)" % (str(node.get_temperature()), str(node.get_temperature_scale()))
connections = ""
node_connections = node.get_connections()
for i, connection in enumerate(node_connections):
connections += connection.name
if i < len(node_connections)-1:
connections += ", "
description += "<br/>• Connected to: %s" % str(connections)
return description
def get_nodes_links(slice, chosen_group=None):
nodes = []
links = []
sr_aggs = get_sr_aggregates(slice)
# Getting image for the nodes
# FIXME: avoid to ask the user for the complete name of the method here! he should NOT know it
try:
image_url = reverse('img_media_sample_resource', args=("sensor-tiny.png",))
except:
image_url = 'sensor-tiny.png'
# For every SampleResource AM
for i, sr_agg in enumerate(sr_aggs):
sr_agg = sr_agg.sampleresourceaggregate
# Iterates over every SampleResource contained within the slice
for sr in sr_agg.get_resources():
sr = sr.sampleresource
nodes.append(Node(
# Users shall not be left the choice to choose group/island; otherwise collision may arise
name = sr.name, value = sr.id, aggregate = sr.aggregate, type = "Sample resource",
description = get_node_description(sr), image = image_url)
)
for connection in sr.get_connections():
# Two-ways link
links.append(
Link(
target = str(sr.id), source = str(connection.id),
value = "rsc_id_%s-rsc_id_%s" % (connection.id, sr.id)
),
)
links.append(
Link(
target = str(sr.id), source = str(connection.id),
value = "rsc_id_%s-rsc_id_%s" % (sr.id, connection.id)
),
)
return [nodes, links]
#from expedient.common.utils.plugins.plugininterface import PluginInterface
#
#class Plugin(PluginInterface):
# @staticmethod
def get_ui_data(slice):
"""
Hook method. Use this very same name so Expedient can get the resources for every plugin.
"""
ui_context = dict()
try:
ui_context['sr_list'] = get_sr_list(slice)
ui_context['sr_aggs'] = get_sr_aggregates(slice)
ui_context['nodes'], ui_context['links'] = get_nodes_links(slice)
except Exception as e:
print "[ERROR] Problem loading UI data for plugin 'sample_resource'. Details: %s" % str(e)
return ui_context
| dana-i2cat/felix | expedient/doc/plugins/samples/plugin/sample_resource/controller/GUIdispatcher.py | Python | apache-2.0 | 7,485 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class OsuMicroBenchmarks(AutotoolsPackage):
"""The Ohio MicroBenchmark suite is a collection of independent MPI
message passing performance microbenchmarks developed and written at
The Ohio State University. It includes traditional benchmarks and
performance measures such as latency, bandwidth and host overhead
and can be used for both traditional and GPU-enhanced nodes."""
homepage = "http://mvapich.cse.ohio-state.edu/benchmarks/"
url = "http://mvapich.cse.ohio-state.edu/download/mvapich/osu-micro-benchmarks-5.3.tar.gz"
version('5.3', '42e22b931d451e8bec31a7424e4adfc2')
variant('cuda', default=False, description="Enable CUDA support")
depends_on('mpi')
depends_on('cuda', when='+cuda')
def configure_args(self):
spec = self.spec
config_args = [
'CC=%s' % spec['mpi'].mpicc,
'CXX=%s' % spec['mpi'].mpicxx
]
if '+cuda' in spec:
config_args.extend([
'--enable-cuda',
'--with-cuda=%s' % spec['cuda'].prefix,
])
# librt not available on darwin (and not required)
if not sys.platform == 'darwin':
config_args.append('LDFLAGS=-lrt')
return config_args
| skosukhin/spack | var/spack/repos/builtin/packages/osu-micro-benchmarks/package.py | Python | lgpl-2.1 | 2,532 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.