gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestMultiLevel:
def test_reindex_level(self, multiindex_year_month_day_dataframe_random_data):
# axis=0
ymd = multiindex_year_month_day_dataframe_random_data
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.sum(level="month")
result = month_sums.reindex(ymd.index, level=1)
expected = ymd.groupby(level="month").transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums["A"].reindex(ymd.index, level=1)
expected = ymd["A"].groupby(level="month").transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.T.sum(axis=1, level="month")
result = month_sums.reindex(columns=ymd.index, level=1)
expected = ymd.groupby(level="month").transform(np.sum).T
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("opname", ["sub", "add", "mul", "div"])
def test_binops_level(
self, opname, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
op = getattr(DataFrame, opname)
with tm.assert_produces_warning(FutureWarning):
month_sums = ymd.sum(level="month")
result = op(ymd, month_sums, level="month")
broadcasted = ymd.groupby(level="month").transform(np.sum)
expected = op(ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(ymd["A"], month_sums["A"], level="month")
broadcasted = ymd["A"].groupby(level="month").transform(np.sum)
expected = op(ymd["A"], broadcasted)
expected.name = "A"
tm.assert_series_equal(result, expected)
def test_reindex(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
expected = frame.iloc[[0, 3]]
reindexed = frame.loc[[("foo", "one"), ("bar", "one")]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
new_index = ymd.index[::10]
chunk = ymd.reindex(new_index)
assert chunk.index is new_index
chunk = ymd.loc[new_index]
assert chunk.index.equals(new_index)
ymdT = ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns.equals(new_index)
def test_groupby_transform(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
s = frame["A"]
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_groupby_corner(self):
midx = MultiIndex(
levels=[["foo"], ["bar"], ["baz"]],
codes=[[0], [0], [0]],
names=["one", "two", "three"],
)
df = DataFrame([np.random.rand(4)], columns=["a", "b", "c", "d"], index=midx)
# should work
df.groupby(level="three")
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples(
[
("f1", "s1"),
("f1", "s2"),
("f2", "s1"),
("f2", "s2"),
("f3", "s1"),
("f3", "s2"),
]
)
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.loc(axis=1)[df.columns.map(lambda u: u[0] in ["f2", "f3"])]
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ["f2", "f3"]).all()
def test_setitem_with_expansion_multiindex_columns(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(
data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3)])
)
y = Series(
data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), ("Z", 2), ("B", 3)])
)
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("level", [0, 1])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_series_group_min_max(
self, all_numeric_reductions, level, skipna, sort, series_with_multilevel_index
):
# GH 17537
ser = series_with_multilevel_index
op = all_numeric_reductions
grouped = ser.groupby(level=level, sort=sort)
# skipna=True
leftside = grouped.agg(lambda x: getattr(x, op)(skipna=skipna))
with tm.assert_produces_warning(FutureWarning):
rightside = getattr(ser, op)(level=level, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level)
tm.assert_series_equal(leftside, rightside)
@pytest.mark.parametrize("level", [0, 1])
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_frame_group_ops(
self,
all_numeric_reductions,
level,
axis,
skipna,
sort,
multiindex_dataframe_random_data,
):
# GH 17537
frame = multiindex_dataframe_random_data
frame.iloc[1, [1, 2]] = np.nan
frame.iloc[7, [0, 1]] = np.nan
level_name = frame.index.names[level]
if axis == 0:
frame = frame
else:
frame = frame.T
grouped = frame.groupby(level=level, axis=axis, sort=sort)
pieces = []
op = all_numeric_reductions
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
with tm.assert_produces_warning(FutureWarning):
rightside = getattr(frame, op)(level=level, axis=axis, skipna=skipna)
if sort:
rightside = rightside.sort_index(level=level, axis=axis)
frame = frame.sort_index(level=level, axis=axis)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level].rename(level_name)
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
@pytest.mark.parametrize("meth", ["var", "std"])
def test_std_var_pass_ddof(self, meth):
index = MultiIndex.from_arrays(
[np.arange(5).repeat(10), np.tile(np.arange(10), 5)]
)
df = DataFrame(np.random.randn(len(index), 5), index=index)
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
with tm.assert_produces_warning(FutureWarning):
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_levels(
self, multiindex_year_month_day_dataframe_random_data, frame_or_series
):
ymd = multiindex_year_month_day_dataframe_random_data
ymd = tm.get_obj(ymd, frame_or_series)
with tm.assert_produces_warning(FutureWarning):
result = ymd.sum(level=["year", "month"])
expected = ymd.groupby(level=["year", "month"]).sum()
tm.assert_equal(result, expected)
def test_groupby_multilevel(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.groupby(level=[0, 1]).mean()
k1 = ymd.index.get_level_values(0)
k2 = ymd.index.get_level_values(1)
expected = ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == ymd.index.names[:2]
result2 = ymd.groupby(level=ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples(
[("foo", "one"), ("foo", "two"), ("bar", "one"), ("bar", "two")]
)
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df["Totals", ""] = df.sum(1)
df = df._consolidate()
def test_level_with_tuples(self):
index = MultiIndex(
levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[("foo", "bar", 0)]
result2 = series.loc[("foo", "bar", 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"):
series[("foo", "bar", 0), 2]
result = frame.loc[("foo", "bar", 0)]
result2 = frame.xs(("foo", "bar", 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(
levels=[[("foo", "bar"), ("foo", "baz"), ("foo", "qux")], [0, 1]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
)
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[("foo", "bar")]
result2 = series.loc[("foo", "bar")]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[("foo", "bar")]
result2 = frame.xs(("foo", "bar"))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_reindex_level_partial_selection(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.reindex(["foo", "qux"], level=0)
expected = frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = frame.T.reindex(["foo", "qux"], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = frame.loc[["foo", "qux"]]
tm.assert_frame_equal(result, expected)
result = frame["A"].loc[["foo", "qux"]]
tm.assert_series_equal(result, expected["A"])
result = frame.T.loc[:, ["foo", "qux"]]
tm.assert_frame_equal(result, expected.T)
@pytest.mark.parametrize("d", [4, "d"])
def test_empty_frame_groupby_dtypes_consistency(self, d):
# GH 20888
group_keys = ["a", "b", "c"]
df = DataFrame({"a": [1], "b": [2], "c": [3], "d": [d]})
g = df[df.a == 2].groupby(group_keys)
result = g.first().index
expected = MultiIndex(
levels=[[1], [2], [3]], codes=[[], [], []], names=["a", "b", "c"]
)
tm.assert_index_equal(result, expected)
def test_duplicate_groupby_issues(self):
idx_tp = [
("600809", "20061231"),
("600809", "20070331"),
("600809", "20070630"),
("600809", "20070331"),
]
dt = ["demo", "demo", "demo", "demo"]
idx = MultiIndex.from_tuples(idx_tp, names=["STK_ID", "RPT_Date"])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_subsets_multiindex_dtype(self):
# GH 20757
data = [["x", 1]]
columns = [("a", "b", np.nan), ("a", "c", 0.0)]
df = DataFrame(data, columns=MultiIndex.from_tuples(columns))
expected = df.dtypes.a.b
result = df.a.b.dtypes
tm.assert_series_equal(result, expected)
class TestSorted:
"""everything you wanted to test about sorting"""
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex(
[["A", "B", "C"], ["c", "b", "a"]], [[0, 1, 2, 0, 1, 2], [0, 2, 1, 1, 0, 2]]
)
df = DataFrame({"col": range(len(idx))}, index=idx, dtype="int64")
assert df.index.is_monotonic_increasing is False
sorted = df.sort_index()
assert sorted.index.is_monotonic_increasing is True
expected = DataFrame(
{"col": [1, 4, 5, 2]},
index=MultiIndex.from_tuples(
[("B", "a"), ("B", "c"), ("C", "a"), ("C", "b")]
),
dtype="int64",
)
result = sorted.loc[pd.IndexSlice["B":"C", "a":"c"], :]
tm.assert_frame_equal(result, expected)
|
|
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import time
import logging
import paramiko
import socket
from winrm import protocol
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
class SSHClient(object):
"""
This class creates a paramiko.SSHClient() object that represents
a session with an SSH server. You can use the SSHClient object to send
commands to the remote host and manipulate files on the remote host.
:param server: A server hostname or ip.
:param host_key_file: The path to the user's .ssh key files.
:param user: The username for the SSH connection. Default = 'root'.
:param timeout: The optional timeout variable for the TCP connection.
:param ssh_pwd: An optional password to use for authentication or for
unlocking the private key.
:param ssh_key_file: SSH key pem data
"""
def __init__(self, server, host_key_file='~/.ssh/known_hosts', user='root', timeout=None,
ssh_pwd=None, ssh_key_file=None):
self.server = server
self.host_key_file = host_key_file
self.user = user
self._timeout = timeout
self._pkey = paramiko.RSAKey.from_private_key_file(ssh_key_file, password=ssh_pwd)
self._ssh_client = paramiko.SSHClient()
self._ssh_client.load_system_host_keys()
self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect()
def connect(self, num_retries=5):
"""
Connect to an SSH server and authenticate with it.
:type num_retries: int
:param num_retries: The maximum number of connection attempts.
"""
retry = 0
while retry < num_retries:
try:
self._ssh_client.connect(self.server, username=self.user, pkey=self._pkey,
timeout=self._timeout)
return
except socket.error as se:
(value, message) = se.args
if value in (51, 61, 111):
log.error('SSH Connection refused, will retry in 5 seconds')
time.sleep(5)
retry += 1
else:
raise
except paramiko.BadHostKeyException:
log.error("{} has an entry in ~/.ssh/known_hosts and it doesn't match".format(
self.server))
retry += 1
except EOFError:
log.error('Unexpected Error from SSH Connection, retry in 5 seconds')
time.sleep(5)
retry += 1
log.error('Could not establish SSH connection')
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
:rtype: :class:`paramiko.sftp_client.SFTPClient`
:return: An SFTP client object.
"""
return self._ssh_client.open_sftp()
def get_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the remote host to the specified path on the local host.
:type src: string
:param src: The path to the target file on the remote host.
:type dst: string
:param dst: The path on your local host where you want to store the file.
"""
sftp_client = self.open_sftp()
sftp_client.get(src, dst)
def put_file(self, src, dst):
"""
Open an SFTP session on the remote host, and copy a file from
the local host to the specified path on the remote host.
:type src: string
:param src: The path to the target file on your local host.
:type dst: string
:param dst: The path on the remote host where you want to store the file.
"""
sftp_client = self.open_sftp()
sftp_client.put(src, dst)
def run(self, command, timeout=None):
"""
Run a command on the remote host.
:type command: string
:param command: The command that you want to send to the remote host.
:param timeout: pass timeout along the line.
:rtype: tuple
:return: This function returns a tuple that contains an integer status,
the stdout from the command, and the stderr from the command.
"""
status = 0
t = []
try:
t = self._ssh_client.exec_command(command, timeout=timeout)
except paramiko.SSHException:
status = 1
std_out = t[1].read()
std_err = t[2].read()
t[0].close()
t[1].close()
t[2].close()
return status, std_out, std_err
def run_pty(self, command):
"""
Request a pseudo-terminal from a server, and execute a command on that server.
:type command: string
:param command: The command that you want to run on the remote host.
:rtype: :class:`paramiko.channel.Channel`
:return: An open channel object.
"""
channel = self._ssh_client.get_transport().open_session()
channel.get_pty()
channel.exec_command(command)
return channel
def close(self):
"""
Close an SSH session and any open channels that are tied to it.
"""
transport = self._ssh_client.get_transport()
transport.close()
class WinRMClient(object):
"""
This class creates a WinRM object that represents a session with a Windows server.
:param host: A server hostname or ip.
:param user: The username for the winrm connection.
:param password: Password to use for authentication.
:param port: WinRM port used to connect. Default is 5986.
:param proto: Protocol used for communication. Default is https.
"""
def __init__(self, host=None, user=None, password=None, port=5986, proto='https'):
self.host = host
self.user = user
self.password = password
self.port = port
self.proto = proto
def run(self, cmd=None, ps=False, transport='ssl', server_cert_validation='ignore'):
"""
Run WinRM command.
:param cmd: Windows command to run
:param ps: <bool> to run powershell command instead
:param transport: Cryptographic protocol. Default is ssl.
:param server_cert_validation: Server side validation type. Default is ignore.
:return: std_out, std_err, exit_code
"""
if not cmd:
log.error('Please provide command to run remotely.')
if ps:
cmd = 'powershell -NoProfile -NonInteractive ' + cmd
secure_host = '{}://{}:{}/wsman'.format(self.proto, self.host, self.port)
protocol.Protocol.DEFAULT_TIMEOUT = "PT7200S"
try:
p = protocol.Protocol(endpoint=secure_host, transport=transport,
username=self.user, password=self.password,
server_cert_validation=server_cert_validation)
shell_id = p.open_shell()
command_id = p.run_command(shell_id, cmd)
std_out, std_err, exit_code = p.get_command_output(shell_id, command_id)
log.info('Output: {}'.format(std_out))
log.debug('Output: {}\nError: {}\nExit Code: {}'.format(std_out, std_err, exit_code))
if exit_code != 0:
log.error('{}.\nFailed to run command: {}'.format(std_err, cmd))
p.cleanup_command(shell_id, command_id)
p.close_shell(shell_id)
except Exception as e:
log.error(e)
raise
return std_out
|
|
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh']
import warnings
import numpy as np
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
# Linear equations
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation ``a x = b`` for ``x``.
Parameters
----------
a : (M, M) array_like
A square matrix.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``a x = b``.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite.
lower : bool, optional
Use only data contained in the lower triangle of `a`, if `sym_pos` is
true. Default is to use upper triangle.
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``a x = b``. Shape of the return matches the
shape of `b`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_a=', overwrite_a)
print('solve:overwrite_b=', overwrite_b)
if sym_pos:
posv, = get_lapack_funcs(('posv',), (a1, b1))
c, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
else:
gesv, = get_lapack_funcs(('gesv',), (a1, b1))
lu, piv, x, info = gesv(a1, b1, overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gesv|posv' %
-info)
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=False, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(l, u) = l_and_u
if l + u + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (l+u+1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=overwrite_b)
b2 /= a1[1, 0]
return b2
if l == u == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*l+u+1, a1.shape[1]), dtype=gbsv.dtype)
a2[l:, :] = a1
lu, piv, x, info = gbsv(l, u, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal gbsv/gtsv' %
-info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal pbsv' %
-info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (http://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
b = _asarray_validated(b)
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack(
(levinson(vals, np.ascontiguousarray(b[:, i]))[0])
for i in range(b.shape[1]))
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant
Notes
-----
For a one-dimensional vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
#XXX: I found no advantage or disadvantage of using finv.
## finv, = get_flinalg_funcs(('inv',),(a1,))
## if finv is not None:
## a_inv,info = finv(a1,overwrite_a=overwrite_a)
## if info==0:
## return a_inv
## if info>0: raise LinAlgError, "singular matrix"
## if info<0: raise ValueError,\
## 'illegal value in %d-th argument of internal inv.getrf|getri'%(-info)
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
### Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
### Linear Least Squares
class LstsqLapackError(LinAlgError):
pass
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side matrix (2-D array).
b : (M,) or (M, K) array_like
Right hand side matrix or vector (1-D or 2-D array).
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver: str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : () or (1,) or (K,) ndarray
Sums of residues, squared 2-norm for each column in ``b - a x``.
If rank of matrix a is ``< N`` or ``> M``, or ``'gelsy'`` is used,
this is an empty array. If b was 1-D, this is an (1,) shape array,
otherwise the shape is (K,).
rank : int
Effective rank of matrix `a`.
s : (min(M,N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``. None is returned when ``'gelsy'`` is used.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are wrong.
See Also
--------
optimize.nnls : linear least squares with non-negativity constraint
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('incompatible dimensions')
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver), (a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
if iwork == 0:
# this is LAPACK bug 0038: dgelsd does not provide the
# size of the iwork array in query mode. This bug was
# fixed in LAPACK 3.2.2, released July 21, 2010.
mesg = ("internal gelsd driver lwork query error, "
"required iwork dimension not returned. "
"This is likely the result of LAPACK bug "
"0038, fixed in LAPACK 3.2.2 (released "
"July 21, 2010). ")
if lapack_driver is None:
# restart with gelss
lstsq.default_lapack_driver = 'gelss'
mesg += "Falling back to 'gelss' driver."
warnings.warn(mesg, RuntimeWarning)
return lstsq(a, b, cond, overwrite_a, overwrite_b,
check_finite, lapack_driver='gelss')
# can't proceed, bail out
mesg += ("Use a different lapack_driver when calling lstsq "
"or upgrade LAPACK.")
raise LstsqLapackError(mesg)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1],1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import mox
import unittest
from quantum.openstack.common import uuidutils
from quantum.plugins.nec.common import ofc_client
from quantum.plugins.nec.db import models as nmodels
from quantum.plugins.nec import drivers
class TestConfig(object):
"""Configuration for this test"""
host = '127.0.0.1'
port = 8888
class TremaDriverTestBase():
driver_name = "trema"
def setUp(self):
self.mox = mox.Mox()
self.driver = drivers.get_driver(self.driver_name)(TestConfig)
self.mox.StubOutWithMock(ofc_client.OFCClient, 'do_request')
def tearDown(self):
self.mox.UnsetStubs()
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test"""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac="11:22:33:44:55:66")
return tenant_id, network_id, portinfo
class TremaDriverNetworkTestBase(TremaDriverTestBase):
def testa_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'id': n, 'description': description}
ofc_client.OFCClient.do_request("POST", "/networks", body=body)
self.mox.ReplayAll()
self.driver.create_network(t, description, n)
self.mox.VerifyAll()
def testb_update_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'description': description}
ofc_client.OFCClient.do_request("PUT", "/networks/%s" % n, body=body)
self.mox.ReplayAll()
self.driver.update_network(t, n, description)
self.mox.VerifyAll()
def testc_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE", "/networks/%s" % n)
self.mox.ReplayAll()
self.driver.delete_network(t, n)
self.mox.VerifyAll()
class TremaPortBaseDriverTest(TremaDriverNetworkTestBase, unittest.TestCase):
driver_name = "trema_port"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
body = {'id': p.id,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ofc_client.OFCClient.do_request("POST",
"/networks/%s/ports" % n, body=body)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE",
"/networks/%s/ports/%s" % (n, p.id))
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaPortMACBaseDriverTest(TremaDriverNetworkTestBase,
unittest.TestCase):
driver_name = "trema_portmac"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path_1 = "/networks/%s/ports" % n
body_1 = {'id': dummy_port,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ofc_client.OFCClient.do_request("POST", path_1, body=body_1)
path_2 = "/networks/%s/ports/%s/attachments" % (n, dummy_port)
body_2 = {'id': p.id, 'mac': p.mac}
ofc_client.OFCClient.do_request("POST", path_2, body=body_2)
path_3 = "/networks/%s/ports/%s" % (n, dummy_port)
ofc_client.OFCClient.do_request("DELETE", path_3)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, p.id)
ofc_client.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaMACBaseDriverTest(TremaDriverNetworkTestBase, unittest.TestCase):
driver_name = "trema_mac"
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments" % n
body = {'id': p.id, 'mac': p.mac}
ofc_client.OFCClient.do_request("POST", path, body=body)
self.mox.ReplayAll()
self.driver.create_port(t, n, p, p.id)
self.mox.VerifyAll()
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments/%s" % (n, p.id)
ofc_client.OFCClient.do_request("DELETE", path)
self.mox.ReplayAll()
self.driver.delete_port(t, n, p.id)
self.mox.VerifyAll()
class TremaFilterDriverTest(TremaDriverTestBase, unittest.TestCase):
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test"""
t, n, p = (super(TremaFilterDriverTest, self).
get_ofc_item_random_params())
filter_id = uuidutils.generate_uuid()
filter_dict = {'tenant_id': t,
'id': filter_id,
'network_id': n,
'priority': 123,
'action': "ACCEPT",
'in_port': p.id,
'src_mac': p.mac,
'dst_mac': "",
'eth_type': 0,
'src_cidr': "",
'dst_cidr': "",
'src_port': 0,
'dst_port': 0,
'protocol': "TCP",
'admin_state_up': True,
'status': "ACTIVE"}
filter_item = nmodels.PacketFilter(**filter_dict)
return t, n, p, filter_item
def testa_create_filter(self):
t, n, p, f = self.get_ofc_item_random_params()
ofp_wildcards = 'dl_vlan,dl_vlan_pcp,nw_tos,dl_dst,' + \
'nw_src:32,nw_dst:32,tp_src,tp_dst'
body = {'id': f.id,
'action': 'ALLOW',
'priority': 123,
'slice': n,
'in_datapath_id': '0x123456789',
'in_port': 1234,
'nw_proto': '0x6',
'dl_type': '0x800',
'dl_src': p.mac,
'ofp_wildcards': ofp_wildcards}
ofc_client.OFCClient.do_request("POST", "/filters", body=body)
self.mox.ReplayAll()
self.driver.create_filter(t, n, f, p, f.id)
self.mox.VerifyAll()
def testb_delete_filter(self):
t, n, p, f = self.get_ofc_item_random_params()
ofc_client.OFCClient.do_request("DELETE", "/filters/%s" % f.id)
self.mox.ReplayAll()
self.driver.delete_filter(t, n, f.id)
self.mox.VerifyAll()
|
|
# sapid lisp
# python implementation
# gilles.arcas@gmail.com, https://github.com/GillesArcas/sapid-lisp
# MIT license
from __future__ import print_function
import sys
import os
import time
# Configuration
TailRecursionOptimization = 'MutualRecursion' # None | 'TailRecursion' | 'MutualRecursion'
# 2/3 compatibility
if sys.version_info < (3,):
integer_types = (int, long,)
else:
integer_types = (int,)
if sys.version_info < (3,):
func_name_attribute = 'func_name'
else:
func_name_attribute = '__name__'
if sys.version_info < (3,):
input_func = raw_input
else:
input_func = input
# Items
# sapid provides 4 types of items: symbol, cons, int and str.
# int (including long) and str are native.
class symbol:
def __init__(self, pname, ftype=None, fval=None):
self.pname = pname
self.cval = None
self.ftype = ftype
self.fval = fval
self.plist = Nil
self.isvar = True
def __lt__(self, x):
return self.pname < x.pname
def __gt__(self, x):
return self.pname > x.pname
def __str__(self):
return self.pname
class cons:
def __init__(self, car, cdr):
self.car = car
self.cdr = cdr
def __str__(self):
return '(' + str(self.car) + ' . ' + str(self.cdr) + ')'
# Symbols
Oblist = {}
def intern(x, ftype=None, fval=None):
# case sensitive
if x in Oblist:
y = Oblist[x]
if ftype != None: y.ftype = ftype
if fval != None: y.fval = fval
return y
else:
Oblist[x] = symbol(x, ftype, fval)
return Oblist[x]
# Builtin symbols
def InitSymbols():
global Nil, T, FSubr, SSubr, Subr0, Sub1, Subr2, Subr12, Lambda, Macro, DMacro
FunctionTypeSymbols = [
'Subr0', 'Subr1', 'Subr2', 'Subr01', 'Subr12', 'NSubr', 'SSubr',
'FSubr', 'Lambda', 'Macro', 'DMacro']
ErrorSymbols = [
'UndefSymError', 'UndefFuncError', 'UndefTagError', 'NotSymbolArgError',
'NotNumberArgError', 'NotStringArgError', 'NotConsArgError', 'NotListArgError',
'NotCharArgError', 'NotVarError', 'ArgNumberError', 'ArgError',
'BindingError', 'SyntaxError', 'IoError', 'IndexError']
# Dummy definition for Nil required to intern Nil and give value to its plist.
# Nil plist is updated latter. Other symbols get the correct plist value.
Nil = None
# Intern standard symbols in lower case
for sym in ['Nil', 'T', 'TopError', 'Eof']:
globals()[sym] = intern(sym.lower())
# Intern error symbols in camel case
for sym in ErrorSymbols:
globals()[sym] = intern(sym)
# Make Nil and T constant, complete Nil plist, and make Nil a list.
Nil.cval = Nil
Nil.isvar = False
T.cval = T
T.isvar = False
Nil.plist = Nil
Nil.car = Nil
Nil.cdr = Nil
# Make function definition form return their own value
SSubr = intern('ssubr')
for sym in FunctionTypeSymbols:
globals()[sym] = intern(sym.lower(), SSubr, subr_selfvalue)
# Add eval function to each ftype symbol
FSubr.evalfunc = evalfsubr
SSubr.evalfunc = evalssubr
Subr0.evalfunc = evalsubr0
Subr1.evalfunc = evalsubr1
Subr2.evalfunc = evalsubr2
Subr01.evalfunc = evalsubr01
Subr12.evalfunc = evalsubr12
NSubr.evalfunc = evalnsubr
Lambda.evalfunc = evalexpr
Macro.evalfunc = evalmacro
DMacro.evalfunc = evaldmacro
# Add apply function to each ftype symbol
FSubr.applyfunc = evalfsubr
SSubr.applyfunc = evalssubr
Subr0.applyfunc = evalsubr0
Subr1.applyfunc = applysubr1
Subr2.applyfunc = applysubr2
Subr01.applyfunc = applysubr01
Subr12.applyfunc = applysubr12
NSubr.applyfunc = applynsubr
Lambda.applyfunc = applyexpr
Macro.applyfunc = evalmacro
DMacro.applyfunc = evalmacro # same as macro, no form to rplac
# Predicates
def symbolp(x):
return isinstance(x, symbol)
def numberp(x):
return isinstance(x, integer_types)
def stringp(x):
return isinstance(x, str)
def consp(x):
return isinstance(x, cons)
def atom(x):
return not consp(x)
def listp(x):
return x == Nil or consp(x)
def null(x):
return x == Nil
def variablep(x):
return symbolp(x) and x.isvar
def unboundp(x):
return x.cval == None
def charp(x):
return stringp(x) and len(x) == 1
# Evaluation stack
Stack = []
# Evaluation
def eval(x):
if symbolp(x):
if unboundp(x):
error(UndefSymError, 'eval', x)
else:
return x.cval
if numberp(x) or stringp(x):
return x
if consp(x):
return evalform(x, x.car, x.cdr)
# Form evaluation
def evalform(form, func, larg):
if symbolp(func):
return evalfn(form, func, func.ftype, func.fval, larg)
elif consp(func):
if stringp(func.cdr):
return evalfn(form, func, func.car, globals()[func.cdr], larg)
else:
return evalfn(form, func, func.car, func.cdr, larg)
else:
error(UndefFuncError, 'eval', func)
# Function evaluation
def evalfn(form, func, ftype, fval, larg):
if hasattr(ftype, 'evalfunc'):
return ftype.evalfunc(func, fval, larg, form)
else:
error(UndefFuncError, 'eval', func)
# Evaluation of builtin functions
# The minimum number of arguments is tested, extra arguments are ignored.
def evalssubr(func, fval, larg, form):
return form
def evalfsubr(func, fval, larg, form):
return fval(larg)
def evalsubr0(func, fval, larg, form):
return fval()
def evalsubr1(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
x = evalprotect(larg.car)
return fval(x)
def evalsubr2(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
assert_condition(consp(larg.cdr), ArgNumberError, func, 1)
x = evalprotect(larg.car)
y = evalprotect(larg.cdr.car)
return fval(x, y)
def evalnsubr(func, fval, larg, form):
evargs = []
while consp(larg):
evargs.append(evalprotect(larg.car))
larg = larg.cdr
return fval(evargs)
def evalsubr01(func, fval, larg, form):
x = evalprotect(larg.car) if consp(larg) else None
return fval(x)
def evalsubr12(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
x = evalprotect(larg.car)
y = evalprotect(larg.cdr.car) if consp(larg.cdr) else None
return fval(x, y)
# Evaluation of expr function calls without recursion optimization
if TailRecursionOptimization == None:
def evalexpr(func, fval, larg, form, evalargs=True):
bind(func, fval, fval.car, larg, evalargs)
try:
x = eprogn(fval.cdr)
finally:
unbind()
return x
if TailRecursionOptimization == None:
def evalprotect(expr):
return eval(expr)
# Evaluation of expr function calls with tail recursion optimization
class TailRecException(Exception):
def __init__(self, fval):
self.fval = fval
if TailRecursionOptimization == 'TailRecursion':
def evalexpr(func, fval, larg, form, evalargs=True):
bind(func, fval, fval.car, larg, evalargs)
if istailrec(fval):
Stack.pop()
raise TailRecException(fval)
try:
while True:
try:
x = eprogn(fval.cdr)
break
except TailRecException:
pass
finally:
unbind()
return x
if TailRecursionOptimization == 'TailRecursion':
def istailrec(fv):
# the binding record of the call to test is already pushed
# tests previous one
i = len(Stack) - 2
stackrec = Stack[i]
if 'lambda' not in stackrec:
return False
if stackrec['lambda'] == fv:
return True
else:
return False
if TailRecursionOptimization != None:
def evalprotect(expr):
# adds something in the stack to avoid tail recursion detection
try:
Stack.append({'eval': expr})
return eval(expr)
finally:
Stack.pop()
# Evaluation of expr function calls with tail and mutual recursion optimization
if TailRecursionOptimization == 'MutualRecursion':
def evalexpr(func, fval, larg, form, evalargs=True):
bind(func, fval, fval.car, larg, evalargs)
if istailrec(fval):
Stack.pop()
raise TailRecException(fval)
try:
while True:
try:
x = eprogn(fval.cdr)
break
except TailRecException as e:
if e.fval == fval:
pass # tail recursion
else:
bindmerge() # mutual recursion
Stack.pop()
raise
except TailRecException:
raise
except:
unbind()
raise
else:
unbind()
return x
if TailRecursionOptimization == 'MutualRecursion':
def istailrec(fv):
# the binding record of the call to test is already pushed
# compares with previous ones
i = len(Stack) - 2
while i >= 0:
stackrec = Stack[i]
if 'lambda' not in stackrec:
return False
if stackrec['lambda'] == fv:
return True
i -= 1
else:
return False
# Binding
def bind(func, fval, lpar, larg, evalargs=True):
# evaluates and saves arguments in stack record
stackrec = {}
stackrec['lambda'] = fval
# store argument values in stack record
while consp(lpar):
if atom(larg):
error(BindingError, func, larg)
elif not variablep(lpar.car):
error(NotVarError, func, lpar.car)
elif evalargs:
stackrec[lpar.car] = evalprotect(larg.car)
else:
stackrec[lpar.car] = larg.car
lpar = lpar.cdr
larg = larg.cdr
# store values of possible binding between symbol and list of values
if variablep(lpar) and listp(larg):
if evalargs:
stackrec[lpar] = evlis(larg)
else:
stackrec[lpar] = larg
# swap cell values and values in stack record
for key in stackrec.keys():
if symbolp(key):
key.cval, stackrec[key] = stackrec[key], key.cval
# push stack record
Stack.append(stackrec)
def unbind():
# pop record
stackrec = Stack.pop()
# restore environment
for key in stackrec.keys():
if symbolp(key):
key.cval = stackrec[key]
def bindmerge():
# merge bindings from top record with previous one
stackrec1 = Stack[len(Stack) - 1]
stackrec2 = Stack[len(Stack) - 2]
for x in stackrec1:
if x != 'lambda' and x not in stackrec2:
stackrec2[x] = stackrec1[x]
# Evaluation of macros
def evalmacro(func, fval, larg, form):
x = applyexpr(func, fval, larg, form)
return eval(x)
def evaldmacro(func, fval, larg, form):
x = applyexpr(func, fval, larg, form)
if atom(x):
return eval(x)
else:
# displaces calling form with expansion
form.car = x.car
form.cdr = x.cdr
return eval(form)
# Apply
def applyform(func, larg):
if symbolp(func):
return applyfn(func, func.ftype, func.fval, larg)
elif consp(func):
if stringp(func.cdr):
return applyfn(func, func.car, globals()[func.cdr], larg)
else:
return applyfn(func, func.car, func.cdr, larg)
else:
error(UndefFuncError, 'apply', func)
def applyfn(func, ftype, fval, larg):
if hasattr(ftype, 'applyfunc'):
return ftype.applyfunc(func, fval, larg, None)
else:
error(UndefFuncError, 'apply', func)
def applysubr1(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
return fval(larg.car)
def applysubr2(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
assert_condition(consp(larg.cdr), ArgNumberError, func, 1)
return fval(larg.car, larg.cdr.car)
def applynsubr(func, fval, larg, form):
return fval(hostlist(larg))
def applysubr01(func, fval, larg, form):
return fval(larg.car if consp(larg) else None)
def applysubr12(func, fval, larg, form):
assert_condition(consp(larg), ArgNumberError, func, 0)
return fval(larg.car, larg.cdr.car if consp(larg.cdr) else None)
def applyexpr(func, fval, larg, form):
return evalexpr(func, fval, larg, form, evalargs=False)
# Evaluation of lists
def eprogn(x):
if atom(x):
if x == Nil:
return Nil
else:
error(NotListArgError, 'eprogn', x)
else:
while consp(x.cdr):
evalprotect(x.car)
x = x.cdr
if x.cdr == Nil:
return eval(x.car)
else:
error(NotListArgError, 'eprogn', x.cdr)
def evlis(x):
if atom(x):
if x == Nil:
return Nil
else:
error(NotListArgError, 'evlis', x)
else:
head = cons(Nil, Nil)
tail = head
while consp(x):
tail.cdr = cons(evalprotect(x.car), Nil)
tail = tail.cdr
x = x.cdr
if x == Nil:
return head.cdr
else:
error(NotListArgError, 'evlis', x)
# Exceptions
class EvalException(Exception):
def __init__(self, tag, result):
self.tag = tag
self.result = result
def itag(sym, x):
Stack.append({'tag':sym})
try:
if consp(x):
r = eprogn(x)
else:
r = x()
except EvalException as e:
if e.tag == sym:
return e.result
else:
raise
finally:
Stack.pop()
return r
def iexit(sym, seq):
raise EvalException(sym, eprogn(seq))
def lock(func, seq):
Stack.append({'lock':None})
try:
r = eprogn(seq)
except EvalException as e:
# uses (e.tag e.result) as list of arguments for the lock function
r = applyfn(func, func.car, func.cdr, cons(e.tag, cons(e.result, Nil)))
else:
# uses (nil r) as list of arguments for the lock function
r = applyfn(func, func.car, func.cdr, cons(Nil, cons(r, Nil)))
finally:
Stack.pop()
return r
# Exception subroutines
def subr_tag(larg): # FSubr
assert_symbolp(larg.car, 'tag')
return itag(larg.car, larg.cdr)
def subr_throw(larg): # FSubr
assert_listp(larg, 'throw')
tag = evalprotect(larg.car)
try:
Stack.append({'eval':None})
result = eprogn(larg.cdr)
finally:
Stack.pop()
raise EvalException(tag, result)
def subr_exit(larg): # FSubr
assert_symbolp(larg.car, 'exit')
return iexit(larg.car, larg.cdr)
def subr_lock(larg): # FSubr
assert_listp(larg, 'lock')
return lock(evalprotect(larg.car), larg.cdr)
# Streams
class TInputStream:
def __init__(self):
self.mode = 'r'
self.inputbuffer = ''
self.current = 1
self.last = 0
self.readinglist = 0
def readline(self):
self.inputbuffer += '\n'
self.current = 0
self.last = len(self.inputbuffer) - 1
class TInputStreamKeyboard(TInputStream):
def __init__(self):
TInputStream.__init__(self)
def readline(self):
sys.stdout.write(Prompt)
self.inputbuffer = input_func()
TInputStream.readline(self)
class TInputStreamFile(TInputStream):
def __init__(self, x):
try:
self.inputfile = open(findfile(x))
except:
error(IoError, 'openi', x)
TInputStream.__init__(self)
def close(self):
self.inputfile.close()
def readline(self):
try:
self.inputbuffer = self.inputfile.readline()
if self.inputbuffer == '':
raise StopIteration
else:
TInputStream.readline(self)
except StopIteration:
if self.readinglist:
readerror(1)
else:
iexit(Eof, Nil)
except Exception as e:
error(IoError, 'read', e.args[0])
def findfile(filename):
if os.path.exists(filename):
return filename
else:
pathname = os.path.join(os.path.dirname(__file__), filename)
if os.path.exists(pathname):
return pathname
else:
return filename
class TOutputStream:
def __init__(self):
self.mode = 'w'
def flush(self):
pass
class TOutputStreamConsole(TOutputStream):
def prinstring(self, x):
sys.stdout.write(x)
class TOutputStreamFile(TOutputStream):
def __init__(self, x):
try:
self.outputfile = open(x, 'w')
except:
error(IoError, 'openo', x)
TOutputStream.__init__(self)
def flush(self):
self.outputfile.flush()
def close(self):
self.outputfile.close()
def prinstring(self, x):
try:
self.outputfile.write(x)
except Exception as e:
error(IoError, 'print', e.args[0])
def InitStreams():
global Streams, inputhandle, inputstream, outputhandle, outputstream, Prompt
Streams = {}
Prompt = '? '
# create and set default streams
Streams[0] = TInputStreamKeyboard()
Streams[1] = TOutputStreamConsole()
inputhandle = 0
outputhandle = 1
inputstream = Streams[0]
outputstream = Streams[1]
def internstream(x):
global Streams
handle = max(Streams.keys()) + 1
Streams[handle] = x
return handle
def subr_openi(filename): # Subr1
return internstream(TInputStreamFile(filename))
def subr_openo(filename): # Subr1
return internstream(TOutputStreamFile(filename))
def subr_close(handle): # Subr1
try:
Streams[handle].close()
Streams[handle] = None
return T
except:
error(IoError, 'close', handle)
def subr_input(handle): # Subr01
global inputhandle, inputstream
if handle != None:
assert_numberp(handle, 'input')
assert_condition(is_handle_valid(handle, 'r'), ArgError, 'input', handle)
inputhandle = handle
inputstream = Streams[handle]
return inputhandle
def subr_output(handle): # Subr01
global outputhandle, outputstream
if handle != None:
assert_numberp(handle, 'output')
assert_condition(is_handle_valid(handle, 'w'), ArgError, 'output', handle)
outputhandle = handle
outputstream = Streams[handle]
return outputhandle
def is_handle_valid(handle, mode):
return (0 <= handle < len(Streams) and
Streams[handle] != None and
Streams[handle].mode == mode)
def subr_prompt(arg): # Subr01
global Prompt
if arg != None:
Prompt = arg
return Prompt
# Access to input buffer
def readchar(): # Subr0
x = peekchar()
inputstream.current += 1
return x
def peekchar(): # Subr0
if inputstream.current > inputstream.last:
inputstream.readline()
return inputstream.inputbuffer[inputstream.current]
def subr_readline(): # Subr0
if inputstream.current > inputstream.last:
inputstream.readline()
r = inputstream.inputbuffer[inputstream.current:].rstrip()
inputstream.current = len(inputstream.inputbuffer)
return r
# Character types
def InitChars():
global typechar
global NullCh, QuoteCh, BComCh, EComCh, SepCh, MacroCh
global StringCh, SymbolCh, SpecialCh, LParCh, RParCh
(NullCh, QuoteCh, BComCh, EComCh, SepCh, MacroCh,
StringCh, SymbolCh, SpecialCh, LParCh, RParCh) = range(11)
typechar = dict(zip([chr(x) for x in range(256)], [NullCh] * 32 + [SymbolCh] * (256 - 32)))
typechar[';' ] = BComCh
typechar['\n'] = EComCh
typechar[' ' ] = SepCh
typechar['\t'] = SepCh
typechar['(' ] = LParCh
typechar[')' ] = RParCh
typechar['"' ] = StringCh
typechar['|' ] = SpecialCh
# Reading and access to valid characters
def readcv():
ch = readchar()
tc = typechar[ch]
if tc == BComCh:
return readcom()
elif tc == EComCh:
return ' '
elif tc == NullCh:
return readcv()
else:
return ch
def peekcv():
ch = peekchar()
tc = typechar[ch]
if tc == BComCh:
return readcom()
elif tc == EComCh:
return ' '
elif tc == NullCh:
readchar()
return peekcv()
else:
return ch
def nextcv():
cv = readcv()
while typechar[cv] == SepCh:
cv = readcv()
return cv
def readcom():
while typechar[readchar()] != EComCh:
pass
return ' '
# Dot may be used in symbol names but not alone. In that case, it is
# considered as the dotted pair marker.
def isdottedpair(cv):
return cv == '.' and typechar[peekcv()] != SymbolCh
# Reading
def readitem():
return readinternal(nextcv())
# Reading an item, cv is the first valid character
def readinternal(cv):
tc = typechar[cv]
if tc == LParCh:
return readlist(nextcv())
elif tc == RParCh:
return readinternal(nextcv())
elif tc == StringCh:
return readstring()
elif tc == SpecialCh:
return readspecial()
elif tc == MacroCh:
return readmacro(cv)
elif isdottedpair(cv):
readerror(2)
else:
return readatom(cv)
# Reading symbols and numbers
def readatom(cv):
x = cv
while typechar[peekcv()] == SymbolCh:
x += readcv()
try:
return int(x)
except ValueError:
return intern(x)
# Reading special symbols and strings
def readspecial():
return intern(readuntil(SpecialCh))
def readstring():
return readuntil(StringCh)
def readuntil(delim):
x = ''
while True:
ch = readchar()
if ch == '\n':
readerror(3)
elif typechar[ch] != delim:
x += ch
elif typechar[peekchar()] == delim:
x += readchar()
else:
return x
# Reading macro characters
def readmacro(c):
sym = intern(c)
func = sym.fval
return evalexpr(sym, func, Nil, Nil)
# Reading lists
def readlist(cv):
if typechar[cv] == RParCh:
return Nil
elif isdottedpair(cv):
readerror(2)
else:
inputstream.readinglist += 1
x = readinternal(cv)
y = readcdr(nextcv())
inputstream.readinglist -= 1
return cons(x, y)
def readcdr(cv):
if typechar[cv] == RParCh:
return Nil
elif isdottedpair(cv):
return readdot(nextcv())
else:
x = readinternal(cv)
y = readcdr(nextcv())
return cons(x, y)
def readdot(cv):
if typechar[cv] == RParCh:
readerror(2)
else:
x = readinternal(cv)
if typechar[nextcv()] != RParCh:
readerror(2)
else:
return x
# Reading error
def readerror(errornum):
error(SyntaxError, 'read', errornum)
# Loading
def load(filename):
global inputstream
inputstreambak = inputstream
inputstreamnew = TInputStreamFile(filename)
try:
inputstream = inputstreamnew
itag(Eof, loadloop)
finally:
inputstream = inputstreambak
return filename
def loadloop():
while True:
eval(readitem())
return Nil
# Printing atoms
def prinatom(x):
outputstream.prinstring(str(x))
# Printing lists
def princons(x):
outputstream.prinstring('(')
while consp(x.cdr):
prin(x.car)
outputstream.prinstring(' ')
x = x.cdr
prin(x.car)
if not null(x.cdr):
outputstream.prinstring(' . ')
prin(x.cdr)
outputstream.prinstring(')')
# Printing functions
def prin(x):
if atom(x):
prinatom(x)
else:
princons(x)
return x
def terpri():
outputstream.prinstring('\n')
return Nil
def printitem(x):
prin(x)
terpri()
return x
# Subr evaluation
# Evaluation functions
def subr_selfvalue(): # SSubr, handled in evalfn
return None
def subr_quote(x): # FSubr
return x.car
# Control functions
def subr_if(larg): # FSubr
if evalprotect(larg.car) != Nil:
return eval(larg.cdr.car)
else:
return eprogn(larg.cdr.cdr)
def subr_while(larg): # FSubr
try:
Stack.append({'eval':None})
while eval(larg.car) != Nil:
eprogn(larg.cdr)
return Nil
finally:
Stack.pop()
# Type predicates
def subr_symbolp(x): # Subr1
return T if symbolp(x) else Nil
def subr_numberp(x): # Subr1
return x if numberp(x) else Nil
def subr_stringp(x): # Subr1
return x if stringp(x) else Nil
def subr_consp(x): # Subr1
return x if consp(x) else Nil
# Equality
def subr_eq(x, y): # Subr2
return T if id(x) == id(y) else Nil
# Symbols
def oblist(): # Subr0
head = cons(Nil, Nil)
tail = head
for x in Oblist:
tail.cdr = cons(intern(x), Nil)
tail = tail.cdr
return head.cdr
def subr_value(sym, val=None): # Subr12
if val == None:
assert_symbolp(sym, 'value')
assert_boundp(sym, 'value')
else:
assert_variablep(sym, 'value' )
sym.cval = val
return sym.cval
def subr_boundp(sym): # Subr1
assert_symbolp(sym, 'boundp')
return T if sym.cval != None else Nil
def subr_makunbound(sym): # Subr1
assert_variablep(sym, 'makunbound')
sym.cval = None
return sym
def subr_fvalue(sym, val=None): # Subr12
assert_symbolp(sym, 'fvalue')
if val == None:
return getfvalue(sym)
else:
return setfvalue(sym, val)
def getfvalue(sym):
if sym.ftype == None:
return Nil
elif sym.ftype in [Lambda, Macro, DMacro]:
return cons(sym.ftype, sym.fval)
elif hasattr(sym.fval, func_name_attribute):
return cons(sym.ftype, getattr(sym.fval, func_name_attribute))
else:
error(ArgError, 'valfn', sym)
def setfvalue(sym, val):
assert_listp(val, 'fvalue')
if val == Nil:
sym.ftype = None
else:
assert_symbolp(val.car, 'fvalue')
sym.ftype = val.car
if stringp(val.cdr):
sym.fval = globals()[val.cdr]
else:
sym.fval = val.cdr
return val
def subr_de (x): # FSubr
return defun(x, Lambda)
def subr_dm (x): # FSubr
return defun(x, Macro)
def subr_dmd(x): # FSubr
return defun(x, DMacro)
def defun(x, type):
func = x.car
func.ftype = type
func.fval = x.cdr
return func
def subr_plist(sym, val=None): # Subr12
assert_symbolp(sym, 'plist')
if val != None:
assert_listp(val, 'plist')
sym.plist = val
return sym.plist
def subr_memprop(sym, ind): # Subr2
assert_symbolp(sym, 'getprop')
list = sym.plist
while list != Nil:
if list.car == ind:
return list
else:
list = list.cdr.cdr
# not found
return Nil
# Lists
def subr_car(x): # Subr1
assert_listp(x, 'car')
return x.car
def subr_cdr(x): # Subr1
assert_listp(x, 'cdr')
return x.cdr
def subr_cons(x, y): # Subr2
return cons(x, y)
def subr_rplaca(x, y): # Subr2
assert_consp(x, 'rplaca')
x.car = y
return x
def subr_rplacd(x, y): # Subr2
assert_consp(x, 'rplacd')
x.cdr = y
return x
def subr_nthcdr(n, l): # Subr2
assert_numberp(n, 'nthcdr')
assert_listp(l, 'nthcdr')
i = 0
while i < n and consp(l):
l = l.cdr
i += 1
return l
# Polymorphic functions
def subr_eqval(x, y): # Subr2
return T if x == y else Nil
def subr_lt(x, y): # Subr2
assert_sametype(x, y, '<')
return T if x < y else Nil
def subr_gt(x, y): # Subr2
assert_sametype(x, y, '>')
return T if x > y else Nil
def subr_add(x, y): # Subr2
if numberp(x):
assert_numberp(y, '+')
return x + y
if stringp(x):
assert_stringp(y, '+')
return x + y
else:
error(ArgError, '+', cons(x, y))
# Numbers
def subr_sub(x, y): # Subr2
assert_numberp(x, '-')
assert_numberp(y, '-')
return x - y
def subr_mul(x, y): # Subr2
assert_numberp(x, '*')
assert_numberp(y, '*')
return x * y
def subr_div(x, y): # Subr2
assert_numberp(x, '/')
assert_numberp(y, '/')
return x // y
def subr_mod(x, y): # Subr2
assert_numberp(x, '%')
assert_numberp(y, '%')
return x % y
# Characters
def subr_typech(char, type=None): # Subr12
assert_charp(char, 'typech')
c = char[0]
if type != None:
typechar[c] = type
return typechar[c]
# Strings
def subr_strlen(x): # Subr1
assert_stringp(x, 'strlen')
return len(x)
def subr_strnth(x, n): # Subr2
assert_stringp(x, 'strnth')
assert_numberp(n, 'strnth')
assert_condition(0 <= n < len(x), IndexError, 'strnth', n)
return x[n]
def subr_strpos(x, y): # Subr2
assert_stringp(x, 'strpos')
assert_stringp(y, 'strpos')
return x.find(y)
def subr_strsub(larg): # NSubr
assert_condition(2 <= len(larg) <= 3, ArgNumberError, 'strsub', len(larg))
assert_stringp(larg[0], 'strsub')
assert_numberp(larg[1], 'strsub')
if len(larg) == 2:
return larg[0][larg[1]:]
else:
assert_numberp(larg[2], 'strsub')
return larg[0][larg[1]:larg[2]]
def subr_string(x): # Subr1
return str(x)
def subr_symbol(x): # Subr1
assert_stringp(x, 'symbol')
return intern(x)
def subr_unique(x): # Subr1
assert_stringp(x, 'unique')
return symbol(x)
def subr_number(x): # Subr1
assert_stringp(x, 'number')
try:
y = int(x)
return y
except ValueError:
error(NotNumberArgError, 'number', x)
# System
def subr_toplevel(): # Subr0
x = eval(readitem())
print('= ', end=' ')
printitem(x)
return x
def subr_end(): # Subr0
sys.exit()
def subr_time(): # Subr0
return int(time.time())
def subr_cls(): # Subr0
print(os.system('cls'), chr(13), ' ', chr(13), end=' ')
return T
def format_stackrec(stackrec):
if 'eval' in stackrec:
return cons('eval', cons(stackrec['eval'], Nil))
if 'tag' in stackrec:
return cons('tag', cons(stackrec['tag'], Nil))
if 'lock' in stackrec:
return cons('lock', Nil)
if 'lambda' in stackrec:
return cons('lambda', cons(stackrec['lambda'], Nil))
return Nil
def subr_stack(): # Subr0
return lisplist([format_stackrec(x) for x in Stack])
# Helpers
def lisplist(iterable):
head = cons(Nil, Nil)
tail = head
for x in iterable:
tail.cdr = cons(x, Nil)
tail = tail.cdr
return head.cdr
def hostlist(list):
r = []
while consp(list):
r.append(list.car)
list = list.cdr
return r
# Initialisation of builtin functions
def InitSubrs():
intern('eval' , Subr1 , eval)
intern('evlis' , Subr1 , evlis)
intern('eprogn' , Subr1 , eprogn)
intern('progn' , FSubr , eprogn)
intern('quote' , FSubr , subr_quote)
intern('apply' , Subr2 , applyform)
intern('if' , FSubr , subr_if)
intern('while' , FSubr , subr_while)
intern('tag' , FSubr , subr_tag)
intern('exit' , FSubr , subr_exit)
intern('lock' , FSubr , subr_lock)
intern('throw' , FSubr , subr_throw)
intern('symbolp' , Subr1 , subr_symbolp)
intern('numberp' , Subr1 , subr_numberp)
intern('stringp' , Subr1 , subr_stringp)
intern('consp' , Subr1 , subr_consp)
intern('eq' , Subr2 , subr_eq)
intern('oblist' , Subr0 , oblist)
intern('value' , Subr12, subr_value)
intern('boundp' , Subr1 , subr_boundp)
intern('makunbound', Subr1, subr_makunbound)
intern('fvalue' , Subr12, subr_fvalue)
intern('de' , FSubr , subr_de)
intern('dm' , FSubr , subr_dm)
intern('dmd' , FSubr , subr_dmd)
intern('plist' , Subr12, subr_plist)
intern('memprop' , Subr2, subr_memprop)
intern('car' , Subr1 , subr_car)
intern('cdr' , Subr1 , subr_cdr)
intern('rplaca' , Subr2 , subr_rplaca)
intern('rplacd' , Subr2 , subr_rplacd)
intern('cons' , Subr2 , subr_cons)
intern('nthcdr' , Subr2 , subr_nthcdr)
intern('=' , Subr2 , subr_eqval)
intern('<' , Subr2 , subr_lt)
intern('>' , Subr2 , subr_gt)
intern('+' , Subr2 , subr_add)
intern('-' , Subr2 , subr_sub)
intern('*' , Subr2 , subr_mul)
intern('/' , Subr2 , subr_div)
intern('%' , Subr2 , subr_mod)
intern('strlen' , Subr1 , subr_strlen)
intern('strnth' , Subr2 , subr_strnth)
intern('strpos' , Subr2 , subr_strpos)
intern('strsub' , NSubr , subr_strsub)
intern('symbol' , Subr1 , subr_symbol)
intern('unique' , Subr1 , subr_unique)
intern('number' , Subr1 , subr_number)
intern('string' , Subr1 , subr_string)
intern('openi' , Subr1 , subr_openi)
intern('openo' , Subr1 , subr_openo)
intern('close' , Subr1 , subr_close)
intern('input' , Subr01, subr_input)
intern('output' , Subr01, subr_output)
intern('prompt' , Subr01, subr_prompt)
intern('readline', Subr0 , subr_readline)
intern('typech' , Subr12, subr_typech)
intern('readchar', Subr0 , readchar)
intern('peekchar', Subr0 , peekchar)
intern('read' , Subr0 , readitem)
intern('load' , Subr1 , load)
intern('print' , Subr1 , printitem)
intern('prin' , Subr1 , prin)
intern('terpri' , Subr0 , terpri)
intern('toplevel', Subr0 , subr_toplevel)
intern('error' , NSubr , subr_error)
intern('time' , Subr0 , subr_time)
intern('end' , Subr0 , subr_end)
intern('cls' , Subr0 , subr_cls)
intern('stack' , Subr0 , subr_stack)
# Assertions
def assert_condition(condition, error_symbol, func, arg):
if not condition:
error(error_symbol, func, arg)
def assert_symbolp(arg, func):
assert_condition(symbolp(arg), NotSymbolArgError, func, arg)
def assert_numberp(arg, func):
assert_condition(numberp(arg), NotNumberArgError, func, arg)
def assert_stringp(arg, func):
assert_condition(stringp(arg), NotStringArgError, func, arg)
def assert_consp(arg, func):
assert_condition(consp(arg), NotConsArgError, func, arg)
def assert_listp(arg, func):
assert_condition(listp(arg), NotListArgError, func, arg)
def assert_variablep(arg, func):
assert_condition(variablep(arg), NotVarError, func, arg)
def assert_boundp(arg, func):
assert_condition(not unboundp(arg), UndefSymError, func, arg)
def assert_sametype(x, y, func):
if (x.__class__ == y.__class__) or (numberp(x) and numberp(y)):
return
else:
error(ArgError, func, cons(x, y))
def assert_charp(arg, func):
assert_condition(charp(arg), NotCharArgError, func, arg)
# Error handling
def printerror(error_symbol, func, arg):
print('\n** ', end=' ')
prin(error_symbol if unboundp(error_symbol) else error_symbol.cval)
print(' : ', end=' ')
prin(func)
print(' : ', end=' ')
prin(arg)
print('\n')
def error(error_symbol, func, arg):
# enable to redefine error function in lisp
applyform(intern('error'), lisplist([error_symbol, func, arg]))
def subr_error(larg): # NSubr
assert_condition(len(larg) == 3, ArgNumberError, 'error', len(larg))
error_symbol, func, arg = larg
printerror(error_symbol, func, arg)
# the result is the name of the error.
# body of exit form being evaluated, constructs ('error_symbol)
iexit(TopError, cons(cons(intern('quote'), cons(error_symbol, Nil)), Nil))
# Main loop
def mainloop():
while True:
try:
itag(TopError, calltoplevel)
except SystemExit:
return
except EvalException as e:
printerror(UndefTagError, 'eval', e.tag)
except (RuntimeError, ZeroDivisionError) as e:
print('\n** Runtime error: %s\n' % e.args)
except KeyboardInterrupt:
print('\n** Interrupt by user.\n')
except:
print('\n** Host error.\n')
raise
def calltoplevel():
# enable to redefine toplevel function in lisp
applyform(intern('toplevel'), Nil)
# Main
def init():
InitChars()
InitStreams()
InitSymbols()
InitSubrs()
sys.setrecursionlimit(8000)
if os.path.exists(findfile('sapid.ini')):
itag(TopError, loadini)
def loadini():
load('sapid.ini')
init()
if __name__ == '__main__':
mainloop()
|
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import math
import ConfigParser
import os
import logging
import StringIO
import sys
import textwrap
import datetime
class configuration(object):
"""Configuration settings. Any user-specific values are read from an external file
and parsed by an instance of the built-in ConfigParser class"""
def __init__(self):
# doesn't do anything
pass
def configure(self, configFile=None, use_logging=True):
# get a logger
logger = logging.getLogger("configuration")
# this (and only this) logger needs to be configured immediately, otherwise it won't work
# we can't use the full user-supplied configuration mechanism in this particular case,
# because we haven't loaded it yet!
#
# so, just use simple console-only logging
logger.setLevel(logging.DEBUG) # this level is hardwired here - should change it to INFO
# add a handler & its formatter - will write only to console
ch = logging.StreamHandler()
logger.addHandler(ch)
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
# first, set up some default configuration values
self.initial_configuration()
# next, load in any user-supplied configuration values
# that might over-ride the default values
self.user_configuration(configFile)
# now that we have loaded the user's configuration, we can load the
# separate config file for logging (the name of that file will be specified in the config file)
if use_logging:
self.logging_configuration()
# finally, set up all remaining configuration values
# that depend upon either default or user-supplied values
self.complete_configuration()
logger.debug('configuration completed')
def initial_configuration(self):
# to be called before loading any user specific values
# things to put here are
# 1. variables that the user cannot change
# 2. variables that need to be set before loading the user's config file
UTTID_REGEX = '(.*)\..*'
def user_configuration(self,configFile=None):
# get a logger
logger = logging.getLogger("configuration")
# load and parse the provided configFile, if provided
if not configFile:
logger.warn('no user configuration file provided; using only built-in default settings')
return
# load the config file
try:
configparser = ConfigParser.ConfigParser()
configparser.readfp(open(configFile))
logger.debug('successfully read and parsed user configuration file %s' % configFile)
except:
logger.fatal('error reading user configuration file %s' % configFile)
raise
#work_dir must be provided before initialising other directories
self.work_dir = None
if self.work_dir == None:
try:
self.work_dir = configparser.get('Paths', 'work')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
if self.work_dir == None:
logger.critical('Paths:work has no value!')
raise Exception
# look for those items that are user-configurable, and get their values
# sptk_bindir= ....
# a list instead of a dict because OrderedDict is not available until 2.7
# and I don't want to import theano here just for that one class
# each entry is a tuple of (variable name, default value, section in config file, option name in config file)
#
# the type of the default value is important and controls the type that the corresponding
# variable will have
#
# to set a default value of 'undefined' use an empty string
# or the special value 'impossible', as appropriate
#
impossible_int=int(-99999)
impossible_float=float(-99999.0)
user_options = [
('work_dir', self.work_dir, 'Paths','work'),
('data_dir', '', 'Paths','data'),
('plot_dir', '', 'Paths','plot'),
('plot', True, 'Utility', 'plot'),
('profile', False, 'Utility', 'profile'),
('file_id_scp' , os.path.join(self.work_dir, 'data/file_id_list.scp') , 'Paths', 'file_id_list'),
('test_id_scp' , os.path.join(self.work_dir, 'data/test_id_list.scp') , 'Paths', 'test_id_list'),
('GV_dir' , os.path.join(self.work_dir, 'data/GV' ) , 'Paths', 'GV_dir'),
('in_stepw_dir' , os.path.join(self.work_dir, 'data/stepw'), 'Paths', 'in_stepw_dir'),
('in_mgc_dir' , os.path.join(self.work_dir, 'data/mgc') , 'Paths', 'in_mgc_dir'),
('in_fft_dir' , os.path.join(self.work_dir, 'data/fft') , 'Paths', 'in_fft_dir'),
('in_samp_dir' , os.path.join(self.work_dir, 'data/samp') , 'Paths', 'in_samp_dir'),
('in_lf0_dir' , os.path.join(self.work_dir, 'data/lf0') , 'Paths', 'in_lf0_dir'),
('in_bap_dir' , os.path.join(self.work_dir, 'data/bap') , 'Paths', 'in_bap_dir'),
('in_sp_dir' , os.path.join(self.work_dir, 'data/sp' ) , 'Paths', 'in_sp_dir'),
('in_seglf0_dir', os.path.join(self.work_dir, 'data/lf03') , 'Paths', 'in_seglf0_dir'),
## for glottHMM
('in_F0_dir' , os.path.join(self.work_dir, 'data/F0') , 'Paths', 'in_F0_dir'),
('in_Gain_dir' , os.path.join(self.work_dir, 'data/Gain') , 'Paths', 'in_Gain_dir'),
('in_HNR_dir' , os.path.join(self.work_dir, 'data/HNR') , 'Paths', 'in_HNR_dir'),
('in_LSF_dir' , os.path.join(self.work_dir, 'data/LSF') , 'Paths', 'in_LSF_dir'),
('in_LSFsource_dir' , os.path.join(self.work_dir, 'data/LSFsource') , 'Paths', 'in_LSFsource_dir'),
## for joint duration
('in_seq_dur_dir' , os.path.join(self.work_dir, 'data/S2S_dur') , 'Paths', 'in_seq_dur_dir'),
('in_dur_dir' , os.path.join(self.work_dir, 'data/dur') , 'Paths', 'in_dur_dir'),
('nn_norm_temp_dir', os.path.join(self.work_dir, 'data/step_hidden9'), 'Paths', 'nn_norm_temp_dir'),
('process_labels_in_work_dir', False, 'Labels', 'process_labels_in_work_dir'),
('label_style' , 'HTS' , 'Labels', 'label_style'),
('label_type' , 'state_align' , 'Labels', 'label_type'),
('in_label_align_dir' , os.path.join(self.work_dir, 'data/label_state_align') , 'Labels', 'label_align'),
('question_file_name' , os.path.join(self.work_dir, 'data/questions.hed') , 'Labels', 'question_file_name'),
('silence_pattern' , ['*-#+*'] , 'Labels', 'silence_pattern'),
('subphone_feats' , 'full' , 'Labels', 'subphone_feats'),
('additional_features', {} , 'Labels', 'additional_features'),
('xpath_file_name', os.path.join(self.work_dir, 'data/xml_labels/xpaths.txt'), 'Labels', 'xpath_file_name'),
('label_config_file', 'configuration/examplelabelconfigfile.py', 'Labels', 'label_config'),
('add_frame_features', True, 'Labels', 'add_frame_features'),
('fill_missing_values', False, 'Labels', 'fill_missing_values'),
('xpath_label_align_dir', os.path.join(self.work_dir, 'data/label_state_align'), 'Labels', 'xpath_label_align'),
('enforce_silence', False, 'Labels', 'enforce_silence'),
('remove_silence_using_binary_labels', False, 'Labels', 'remove_silence_using_binary_labels'),
('precompile_xpaths', True, 'Labels', 'precompile_xpaths'),
('iterate_over_frames', True, 'Labels', 'iterate_over_frames'),
('appended_input_dim' , 0 , 'Labels' , 'appended_input_dim'),
('buffer_size', 200000, 'Data', 'buffer_size'),
('train_file_number', impossible_int, 'Data','train_file_number'),
('valid_file_number', impossible_int, 'Data','valid_file_number'),
('test_file_number' , impossible_int, 'Data','test_file_number'),
('log_path', os.path.join(self.work_dir, 'log'), 'Paths', 'log_path'),
('log_file', '', 'Paths','log_file'),
('log_config_file', 'configuration/exampleloggingconfigfile.conf', 'Paths', 'log_config_file'),
('sptk_bindir', 'tools/bin/SPTK-3.9', 'Paths','sptk'),
('straight_bindir', 'tools/bin/straight', 'Paths','straight'),
('world_bindir', 'tools/bin/WORLD', 'Paths','world'),
('network_type' , 'RNN' , 'Architecture', 'network_type'),
('model_type' , 'DNN' , 'Architecture', 'model_type'),
('hidden_layer_type' , ['TANH', 'TANH', 'TANH', 'TANH', 'TANH', 'TANH'] , 'Architecture', 'hidden_layer_type'),
('output_layer_type' , 'LINEAR' , 'Architecture', 'output_layer_type'),
('sequential_training' , False , 'Architecture', 'sequential_training'),
('dropout_rate' , 0.0 , 'Architecture', 'dropout_rate'),
## some config variables for token projection DNN
('scheme' , 'stagewise' , 'Architecture', 'scheme'),
('index_to_project' , 0 , 'Architecture', 'index_to_project'),
('projection_insize' , 10000 , 'Architecture', 'projection_insize'),
('projection_outsize' , 10 , 'Architecture', 'projection_outsize'),
('initial_projection_distrib' , 'gaussian' , 'Architecture', 'initial_projection_distrib'),
('projection_weights_output_dir' , 'some_path', 'Architecture', 'projection_weights_output_dir'),
('layers_with_projection_input' , [0], 'Architecture', 'layers_with_projection_input'),
('projection_learning_rate_scaling' , 1.0, 'Architecture', 'projection_learning_rate_scaling'),
('learning_rate' , 0.0002 , 'Architecture', 'learning_rate'),
('l2_reg' , 0.00001 , 'Architecture', 'L2_regularization'),
('l1_reg' , 0.0 , 'Architecture', 'L1_regularization'),
('batch_size' , 16 , 'Architecture', 'batch_size'),
('training_epochs' , 25 , 'Architecture', 'training_epochs'),
('hidden_activation' , 'tanh' , 'Architecture', 'hidden_activation'),
('output_activation' , 'linear' , 'Architecture', 'output_activation'),
('do_pretraining' , False , 'Architecture', 'do_pretraining'),
('pretraining_epochs' , 10 , 'Architecture', 'pretraining_epochs'),
('pretraining_lr' , 0.0001 , 'Architecture', 'pretraining_lr'),
('hidden_layer_size' , [1024, 1024, 1024, 1024, 1024, 1024], 'Architecture', 'hidden_layer_size'),
('private_hidden_sizes' , [1024] , 'Architecture', 'private_hidden_sizes'),
('stream_weights' , [1.0] , 'Architecture', 'stream_weights'),
('private_l2_reg' , 0.00001 , 'Architecture', 'private_l2_reg'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('warmup_momentum' , 0.3 , 'Architecture', 'warmup_momentum'),
('momentum' , 0.9 , 'Architecture', 'momentum'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('mdn_component', 1 , 'Architecture', 'mdn_component'),
('var_floor', 0.01 , 'Architecture', 'var_floor'),
('beta_opt', False , 'Architecture', 'beta_opt'),
('eff_sample_size', 0.8 , 'Architecture', 'eff_sample_size'),
('mean_log_det', -100.0 , 'Architecture', 'mean_log_det'),
('start_from_trained_model', '_' , 'Architecture', 'start_from_trained_model'),
('use_rprop', 0 , 'Architecture', 'use_rprop'),
('mgc_dim' ,60 ,'Outputs','mgc'),
('fft_dim' ,512 ,'Outputs','fft'),
('samp_dim' ,180 ,'Outputs','samp'),
('dmgc_dim',60 * 3 ,'Outputs','dmgc'),
('vuv_dim' ,1 ,'Outputs','vuv'),
('lf0_dim' ,1 ,'Outputs','lf0'),
('dlf0_dim',1 * 3 ,'Outputs','dlf0'),
('bap_dim' ,25 ,'Outputs','bap'),
('dbap_dim',25 * 3 ,'Outputs','dbap'),
('cmp_dim' ,(60 * 3) + 1 + (1 * 3) + (25 * 3) ,'Outputs','cmp'),
('stepw_dim' , 55, 'Outputs', 'stepw_dim'),
('temp_sp_dim' , 1025, 'Outputs', 'temp_sp_dim'),
('seglf0_dim' , 7 , 'Outputs', 'seglf0_dim'),
('delta_win' , [-0.5, 0.0, 0.5] , 'Outputs', 'delta_win'),
('acc_win' , [1.0, -2.0, 1.0] , 'Outputs', 'acc_win'),
('do_MLPG' , True , 'Outputs', 'do_MLPG'),
## for GlottHMM
('F0_dim' ,1 ,'Outputs','F0'),
('dF0_dim',1 * 3 ,'Outputs','dF0'),
('Gain_dim' ,1 ,'Outputs','Gain'),
('dGain_dim',1 * 3 ,'Outputs','dGain'),
('HNR_dim' ,5 ,'Outputs','HNR'),
('dHNR_dim',5 * 3 ,'Outputs','dHNR'),
('LSF_dim' ,30 ,'Outputs','LSF'),
('dLSF_dim',30 * 3 ,'Outputs','dLSF'),
('LSFsource_dim' ,10 ,'Outputs','LSFsource'),
('dLSFsource_dim',10 * 3 ,'Outputs','dLSFsource'),
## for joint dur:-
('seq_dur_dim' ,1 ,'Outputs','seq_dur'),
('remove_silence_from_dur' , True , 'Outputs', 'remove_silence_from_dur'),
('dur_dim' ,5 ,'Outputs','dur'),
('dur_feature_type' , 'numerical' , 'Outputs', 'dur_feature_type'),
('output_feature_normalisation', 'MVN', 'Outputs', 'output_feature_normalisation'),
('multistream_switch' , False , 'Streams', 'multistream_switch'),
# ('use_private_hidden' , False, 'Streams', 'use_private_hidden'),
('output_features' , ['mgc','lf0', 'vuv', 'bap'], 'Streams', 'output_features'),
('gen_wav_features', ['mgc', 'bap', 'lf0'] , 'Streams', 'gen_wav_features'),
# ('stream_mgc_hidden_size' , 192 , 'Streams', 'stream_mgc_hidden_size'),
# ('stream_lf0_hidden_size' , 32 , 'Streams', 'stream_lf0_hidden_size'),
# ('stream_vuv_hidden_size' , 32 , 'Streams', 'stream_vuv_hidden_size'),
# ('stream_bap_hidden_size' , 128 , 'Streams', 'stream_bap_hidden_size'),
# ('stream_stepw_hidden_size' , 64 , 'Streams', 'stream_stepw_hidden_size'),
# ('stream_seglf0_hidden_size', 64 , 'Streams', 'stream_seglf0_hidden_size'),
# ('stream_cmp_hidden_size' , 256 , 'Streams', 'stream_cmp_hidden_size'), #when multi-stream is disabled, use this to indicate the final hidden layer size
#if this is also not provided, use the top common hidden layer size
## Glott HMM -- dummy values -- haven't used private streams:--
# ('stream_F0_hidden_size' , 192 , 'Streams', 'stream_F0_hidden_size'),
# ('stream_Gain_hidden_size' , 192 , 'Streams', 'stream_Gain_hidden_size'),
# ('stream_HNR_hidden_size' , 192 , 'Streams', 'stream_HNR_hidden_size'),
# ('stream_LSF_hidden_size' , 192 , 'Streams', 'stream_LSF_hidden_size'),
# ('stream_LSFsource_hidden_size' , 192 , 'Streams', 'stream_LSFsource_hidden_size'),
## joint dur -- dummy values -- haven't used private streams:--
# ('stream_dur_hidden_size' , 192 , 'Streams', 'stream_dur_hidden_size'),
# ('stream_sp_hidden_size' , 1024, 'Streams', 'stream_sp_hidden_size'),
# ('stream_weight_mgc' , 1.0, 'Streams', 'stream_weight_mgc'),
# ('stream_weight_lf0' , 3.0, 'Streams', 'stream_weight_lf0'),
# ('stream_weight_vuv' , 1.0, 'Streams', 'stream_weight_vuv'),
# ('stream_weight_bap' , 1.0, 'Streams', 'stream_weight_bap'),
# ('stream_weight_stepw' , 0.0, 'Streams', 'stream_weight_stepw'),
# ('stream_weight_seglf0', 1.0, 'Streams', 'stream_weight_seglf0'),
# ('stream_weight_sp' , 1.0, 'Streams', 'stream_weight_sp'),
## Glott HMM - unused?
# ('stream_weight_F0' , 1.0, 'Streams', 'stream_weight_F0'),
# ('stream_weight_Gain' , 1.0, 'Streams', 'stream_weight_Gain'),
# ('stream_weight_HNR' , 1.0, 'Streams', 'stream_weight_HNR'),
# ('stream_weight_LSF' , 1.0, 'Streams', 'stream_weight_LSF'),
# ('stream_weight_LSFsource' , 1.0, 'Streams', 'stream_weight_LSFsource'),
## dur - unused?
# ('stream_weight_dur' , 1.0, 'Streams', 'stream_weight_dur'),
# ('stream_lf0_lr' , 0.5, 'Streams', 'stream_lf0_lr'),
# ('stream_vuv_lr' , 0.5, 'Streams', 'stream_vuv_lr'),
('vocoder_type' ,'STRAIGHT' ,'Waveform' , 'vocoder_type'),
('sr' ,48000 ,'Waveform' , 'samplerate'),
('fl' ,4096 ,'Waveform' , 'framelength'),
('shift' ,1000 * 240 / 48000 ,'Waveform' , 'frameshift'),
('sp_dim' ,(4096 / 2) + 1 ,'Waveform' , 'sp_dim'),
# fw_alpha: 'Bark' or 'ERB' allowing deduction of alpha, or explicity float value (e.g. 0.77)
('fw_alpha' ,0.77 ,'Waveform' , 'fw_alpha'),
('pf_coef' ,1.4 ,'Waveform' , 'postfilter_coef'),
('co_coef' ,2047 ,'Waveform' , 'minimum_phase_order'),
('use_cep_ap' ,True ,'Waveform' , 'use_cep_ap'),
('do_post_filtering',True ,'Waveform' , 'do_post_filtering'),
('apply_GV' ,False ,'Waveform' , 'apply_GV'),
('test_synth_dir' ,'test_synthesis/wav' ,'Waveform' , 'test_synth_dir'),
('DurationModel' , False, 'Processes', 'DurationModel'),
('AcousticModel' , False, 'Processes', 'AcousticModel'),
('GenTestList' , False, 'Processes', 'GenTestList'),
('NORMLAB' , False, 'Processes', 'NORMLAB'),
('MAKEDUR' , False, 'Processes', 'MAKEDUR'),
('MAKECMP' , False, 'Processes', 'MAKECMP'),
('NORMCMP' , False, 'Processes', 'NORMCMP'),
('TRAINDNN' , False, 'Processes', 'TRAINDNN'),
('DNNGEN' , False, 'Processes', 'DNNGEN'),
('GENWAV' , False, 'Processes', 'GENWAV'),
('CALMCD' , False, 'Processes', 'CALMCD'),
('NORMSTEP' , False, 'Processes', 'NORMSTEP'),
('GENBNFEA' , False, 'Processes', 'GENBNFEA'),
('mgc_ext' , '.mgc' , 'Extensions', 'mgc_ext'),
('bap_ext' , '.bap' , 'Extensions', 'bap_ext'),
('lf0_ext' , '.lf0' , 'Extensions', 'lf0_ext'),
('cmp_ext' , '.cmp' , 'Extensions', 'cmp_ext'),
('lab_ext' , '.lab' , 'Extensions', 'lab_ext'),
('utt_ext' , '.utt' , 'Extensions', 'utt_ext'),
('stepw_ext' , '.stepw' , 'Extensions', 'stepw_ext'),
('sp_ext' , '.sp' , 'Extensions', 'sp_ext'),
##Ashish
('fft_ext' , '.fft' , 'Extensions', 'fft_ext'),
('samp_ext' , '.samp' , 'Extensions', 'samp_ext'),
## GlottHMM
('F0_ext' , '.F0' , 'Extensions', 'F0_ext'),
('Gain_ext' , '.Gain' , 'Extensions', 'Gain_ext'),
('HNR_ext' , '.HNR' , 'Extensions', 'HNR_ext'),
('LSF_ext' , '.LSF' , 'Extensions', 'LSF_ext'),
('LSFsource_ext' , '.LSFsource' , 'Extensions', 'LSFsource_ext'),
## joint dur
('dur_ext' , '.dur' , 'Extensions', 'dur_ext'),
]
# this uses exec(...) which is potentially dangerous since arbitrary code could be executed
for (variable,default,section,option) in user_options:
value=None
try:
# first, look for a user-set value for this variable in the config file
value = configparser.get(section,option)
user_or_default='user'
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# use default value, if there is one
if (default == None) or \
(default == '') or \
((type(default) == int) and (default == impossible_int)) or \
((type(default) == float) and (default == impossible_float)) :
logger.critical('%20s has no value!' % (section+":"+option) )
raise Exception
else:
value = default
user_or_default='default'
if type(default) == str:
exec('self.%s = "%s"' % (variable,value))
elif type(default) == int:
exec('self.%s = int(%s)' % (variable,value))
elif type(default) == float:
exec('self.%s = float(%s)' % (variable,value))
elif type(default) == bool:
exec('self.%s = bool(%s)' % (variable,value))
elif type(default) == list:
exec('self.%s = list(%s)' % (variable,value))
elif type(default) == dict:
exec('self.%s = dict(%s)' % (variable,value))
else:
logger.critical('Variable %s has default value of unsupported type %s',variable,type(default))
raise Exception('Internal error in configuration settings: unsupported default type')
logger.info('%20s has %7s value %s' % (section+":"+option,user_or_default,value) )
self.combined_feature_name = ''
for feature_name in self.output_features:
self.combined_feature_name += '_'
self.combined_feature_name += feature_name
self.combined_model_name = self.model_type
for hidden_type in self.hidden_layer_type:
self.combined_model_name += '_' + hidden_type
self.combined_model_name += '_' + self.output_layer_type
def complete_configuration(self):
# to be called after reading any user-specific settings
# because the values set here depend on those user-specific settings
print "Configurations ?? Ashish"
# get a logger
logger = logging.getLogger("configuration")
# tools
self.SPTK = {
'X2X' : os.path.join(self.sptk_bindir,'x2x'),
'MERGE' : os.path.join(self.sptk_bindir,'merge'),
'BCP' : os.path.join(self.sptk_bindir,'bcp'),
'MLPG' : os.path.join(self.sptk_bindir,'mlpg'),
'MGC2SP' : os.path.join(self.sptk_bindir,'mgc2sp'),
'VSUM' : os.path.join(self.sptk_bindir,'vsum'),
'VSTAT' : os.path.join(self.sptk_bindir,'vstat'),
'SOPR' : os.path.join(self.sptk_bindir,'sopr'),
'VOPR' : os.path.join(self.sptk_bindir,'vopr'),
'FREQT' : os.path.join(self.sptk_bindir,'freqt'),
'C2ACR' : os.path.join(self.sptk_bindir,'c2acr'),
'MC2B' : os.path.join(self.sptk_bindir,'mc2b'),
'B2MC' : os.path.join(self.sptk_bindir,'b2mc')
}
# self.NND = {
# 'FEATN' : os.path.join(self.nndata_bindir,'FeatureNormalization'),
# 'LF0IP' : os.path.join(self.nndata_bindir,'F0Interpolation'),
# 'F0VUV' : os.path.join(self.nndata_bindir,'F0VUVComposition')
# }
self.STRAIGHT = {
'SYNTHESIS_FFT' : os.path.join(self.straight_bindir, 'synthesis_fft'),
'BNDAP2AP' : os.path.join(self.straight_bindir, 'bndap2ap'),
}
self.WORLD = {
'SYNTHESIS' : os.path.join(self.world_bindir, 'synth'),
'ANALYSIS' : os.path.join(self.world_bindir, 'analysis'),
}
# STILL TO DO - test that all the above tools exist and are executable
###dimensions for the output features
### key name must follow the self.in_dimension_dict.
### If do not want to include dynamic feature, just use the same dimension as that self.in_dimension_dict
### if lf0 is one of the acoustic featues, the out_dimension_dict must have an additional 'vuv' key
### a bit confusing
###need to control the order of the key?
self.in_dir_dict = {} ##dimensions for each raw acoustic (output of NN) feature
self.out_dimension_dict = {}
self.in_dimension_dict = {}
self.private_hidden_sizes = []
self.stream_weights = []
logger.debug('setting up output features')
self.cmp_dim = 0
for feature_name in self.output_features:
logger.debug(' %s' % feature_name)
in_dimension = 0
out_dimension = 0
in_directory = ''
# current_stream_hidden_size = 0
# current_stream_weight = 0.0
# stream_lr_ratio = 0.0
if feature_name == 'mgc':
in_dimension = self.mgc_dim
out_dimension = self.dmgc_dim
in_directory = self.in_mgc_dir
elif feature_name == 'fft':
in_dimension = self.fft_dim
out_dimension = self.fft_dim
in_directory = self.in_fft_dir
elif feature_name == 'samp':
in_dimension = self.samp_dim
out_dimension = self.samp_dim
in_directory = self.in_samp_dir
# current_stream_hidden_size = self.stream_mgc_hidden_size
# current_stream_weight = self.stream_weight_mgc
elif feature_name == 'bap':
in_dimension = self.bap_dim
out_dimension = self.dbap_dim
in_directory = self.in_bap_dir
# current_stream_hidden_size = self.stream_bap_hidden_size
# current_stream_weight = self.stream_weight_bap
elif feature_name == 'lf0':
in_dimension = self.lf0_dim
out_dimension = self.dlf0_dim
in_directory = self.in_lf0_dir
# current_stream_hidden_size = self.stream_lf0_hidden_size
# current_stream_weight = self.stream_weight_lf0
elif feature_name == 'vuv':
out_dimension = 1
# current_stream_hidden_size = self.stream_vuv_hidden_size
# current_stream_weight = self.stream_weight_vuv
elif feature_name == 'stepw':
in_dimension = self.stepw_dim
out_dimension = self.stepw_dim
in_directory = self.in_stepw_dir
# current_stream_hidden_size = self.stream_stepw_hidden_size
# current_stream_weight = self.stream_weight_stepw
elif feature_name == 'sp':
in_dimension = self.sp_dim
out_dimension = self.sp_dim
in_directory = self.in_sp_dir
# current_stream_hidden_size = self.stream_sp_hidden_size
# current_stream_weight = self.stream_weight_sp
elif feature_name == 'seglf0':
in_dimension = self.seglf0_dim
out_dimension = self.seglf0_dim
in_directory = self.in_seglf0_dir
# current_stream_hidden_size = self.stream_seglf0_hidden_size
# current_stream_weight = self.stream_weight_seglf0
## for GlottHMM (start)
elif feature_name == 'F0':
in_dimension = self.F0_dim
out_dimension = self.dF0_dim
in_directory = self.in_F0_dir
# current_stream_hidden_size = self.stream_F0_hidden_size
# current_stream_weight = self.stream_weight_F0
elif feature_name == 'Gain':
in_dimension = self.Gain_dim
out_dimension = self.dGain_dim
in_directory = self.in_Gain_dir
# current_stream_hidden_size = self.stream_Gain_hidden_size
# current_stream_weight = self.stream_weight_Gain
elif feature_name == 'HNR':
in_dimension = self.HNR_dim
out_dimension = self.dHNR_dim
in_directory = self.in_HNR_dir
# current_stream_hidden_size = self.stream_HNR_hidden_size
# current_stream_weight = self.stream_weight_HNR
elif feature_name == 'LSF':
in_dimension = self.LSF_dim
out_dimension = self.dLSF_dim
in_directory = self.in_LSF_dir
# current_stream_hidden_size = self.stream_LSF_hidden_size
# current_stream_weight = self.stream_weight_LSF
elif feature_name == 'LSFsource':
in_dimension = self.LSFsource_dim
out_dimension = self.dLSFsource_dim
in_directory = self.in_LSFsource_dir
# current_stream_hidden_size = self.stream_LSFsource_hidden_size
# current_stream_weight = self.stream_weight_LSFsource
## for GlottHMM (end)
## for joint dur (start)
elif feature_name == 'dur':
in_dimension = self.dur_dim
out_dimension = self.dur_dim
in_directory = self.in_dur_dir
# current_stream_hidden_size = self.stream_dur_hidden_size
# current_stream_weight = self.stream_weight_dur
## for joint dur (end)
else:
logger.critical('%s feature is not supported right now. Please change the configuration.py to support it' %(feature_name))
raise
logger.info(' in_dimension: %d' % in_dimension)
logger.info(' out_dimension : %d' % out_dimension)
logger.info(' in_directory : %s' % in_directory)
# logger.info(' current_stream_hidden_size: %d' % current_stream_hidden_size)
# logger.info(' current_stream_weight: %d' % current_stream_weight)
if in_dimension > 0:
self.in_dimension_dict[feature_name] = in_dimension
if in_directory == '':
logger.critical('please provide the path for %s feature' %(feature_name))
raise
if out_dimension < in_dimension:
logger.critical('the dimensionality setting for %s feature is not correct!' %(feature_name))
raise
self.in_dir_dict[feature_name] = in_directory
if out_dimension > 0:
self.out_dimension_dict[feature_name] = out_dimension
# if (current_stream_hidden_size <= 0 or current_stream_weight <= 0.0) and self.multistream_switch:
# logger.critical('the hidden layer size or stream weight is not corrected setted for %s feature' %(feature_name))
# raise
# if self.multistream_switch:
# self.private_hidden_sizes.append(current_stream_hidden_size)
# self.stream_weights.append(current_stream_weight)
self.cmp_dim += out_dimension
# if not self.multistream_switch:
# self.private_hidden_sizes = []
# if self.stream_cmp_hidden_size > 0:
# self.private_hidden_sizes.append(self.stream_cmp_hidden_size)
# else:
# self.private_hidden_sizes.append(self.hidden_layer_size[-1]) ## use the same number of hidden layers if multi-stream is not supported
# self.stream_weights = []
# self.stream_weights.append(1.0)
self.stream_lr_weights = []
self.multistream_outs = []
if self.multistream_switch:
for feature_name in self.out_dimension_dict.keys():
self.multistream_outs.append(self.out_dimension_dict[feature_name])
# stream_lr_ratio = 0.5
# if feature_name == 'lf0':
# stream_lr_ratio = self.stream_lf0_lr
# if feature_name == 'vuv':
# stream_lr_ratio = self.stream_vuv_lr
# self.stream_lr_weights.append(stream_lr_ratio)
else:
### the new cmp is not the one for HTS, it includes all the features, such as that for main tasks and that for additional tasks
self.multistream_outs.append(self.cmp_dim)
# self.stream_lr_weights.append(0.5)
logger.info('multistream dimensions: %s' %(self.multistream_outs))
# to check whether all the input and output features' file extensions are here
self.file_extension_dict = {}
self.file_extension_dict['mgc'] = self.mgc_ext
self.file_extension_dict['samp'] = self.samp_ext
self.file_extension_dict['fft'] = self.fft_ext
self.file_extension_dict['lf0'] = self.lf0_ext
self.file_extension_dict['bap'] = self.bap_ext
self.file_extension_dict['stepw'] = self.stepw_ext
self.file_extension_dict['cmp'] = self.cmp_ext
self.file_extension_dict['seglf0'] = self.lf0_ext
## gHMM:
self.file_extension_dict['F0'] = self.F0_ext
self.file_extension_dict['Gain'] = self.Gain_ext
self.file_extension_dict['HNR'] = self.HNR_ext
self.file_extension_dict['LSF'] = self.LSF_ext
self.file_extension_dict['LSFsource'] = self.LSFsource_ext
## joint dur
self.file_extension_dict['dur'] = self.dur_ext
## hyper parameters for DNN. need to be setted by the user, as they depend on the architecture
self.hyper_params = { 'learning_rate' : '0.0002', ###
'l2_reg' : '0.00001',
'l1_reg' : '0.0',
'batch_size' : '16',
'training_epochs' : '25',
'early_stop_epochs' : '5',
'hidden_activation' : 'tanh',
'output_activation' : 'linear',
'do_pretraining' : False,
'pretraining_epochs' : '10',
'pretraining_lr' : '0.0001'}
self.hyper_params['warmup_momentum'] = self.warmup_momentum
self.hyper_params['momentum'] = self.momentum
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['learning_rate'] = self.learning_rate
self.hyper_params['l2_reg'] = self.l2_reg
self.hyper_params['l1_reg'] = self.l1_reg
self.hyper_params['batch_size'] = self.batch_size
self.hyper_params['training_epochs'] = self.training_epochs
self.hyper_params['hidden_activation'] = self.hidden_activation
self.hyper_params['output_activation'] = self.output_activation
self.hyper_params['do_pretraining'] = self.do_pretraining
self.hyper_params['pretraining_epochs'] = self.pretraining_epochs
self.hyper_params['pretraining_lr'] = self.pretraining_lr
self.hyper_params['hidden_layer_size'] = self.hidden_layer_size
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['use_rprop'] = self.use_rprop
# self.hyper_params['private_hidden_sizes'] = self.private_hidden_sizes
# self.hyper_params['stream_weights'] = self.stream_weights
# self.hyper_params['private_l2_reg'] = self.private_l2_reg
# self.hyper_params['stream_lr_weights'] = self.stream_lr_weights
# self.hyper_params['use_private_hidden'] = self.use_private_hidden
self.hyper_params['model_type'] = self.model_type
self.hyper_params['hidden_layer_type'] = self.hidden_layer_type
self.hyper_params['index_to_project'] = self.index_to_project
self.hyper_params['projection_insize'] = self.projection_insize
self.hyper_params['projection_outsize'] = self.projection_outsize
self.hyper_params['initial_projection_distrib'] = self.initial_projection_distrib
self.hyper_params['layers_with_projection_input'] = self.layers_with_projection_input
self.hyper_params['projection_learning_rate_scaling'] = self.projection_learning_rate_scaling
self.hyper_params['sequential_training'] = self.sequential_training
self.hyper_params['dropout_rate'] = self.dropout_rate
for hidden_type in self.hidden_layer_type:
if 'LSTM' in hidden_type or 'RNN' in hidden_type or 'GRU' in hidden_type:
self.hyper_params['sequential_training'] = self.sequential_training
#To be recorded in the logging file for reference
for param_name in self.hyper_params.keys():
logger.info('%s : %s' %(param_name, str(self.hyper_params[param_name])))
# input files
# set up the label processing
# currently must be one of two styles
if self.label_style == 'HTS':
# xpath_file_name is now obsolete - to remove
self.xpath_file_name=None
elif self.label_style == 'HTS_duration':
self.xpath_file_name=None
elif self.label_style == 'composed':
self.question_file_name=None
else:
logger.critical('unsupported label style requested: %s' % self.label_style)
raise Exception
def logging_configuration(self):
# get a logger
logger = logging.getLogger("configuration")
# logging configuration, see here for format description
# https://docs.python.org/2/library/logging.config.html#logging-config-fileformat
# what we really want to do is this dicitonary-based configuration, but it's only available from Python 2.7 onwards
# logging.config.dictConfig(cfg.logging_configuration)
# so we will settle for this file-based configuration procedure instead
try:
# open the logging configuration file
fp = open(self.log_config_file,'r')
logger.debug("loading logging configuration from %s" % self.log_config_file)
# load the logging configuration file into a string
config_string = fp.read()
fp.close()
except ValueError:
# this means that cfg.log_config_file does not exist and that no default was provided
# NOTE: currently this will never run
logging.warn('no logging configuration file provided - using default (console only, DEBUG level)')
# set up a default level and default handlers
# first, get the root logger - all other loggers will inherit its configuration
rootogger = logging.getLogger("")
# default logging level is DEBUG (a highly-verbose level)
rootlogger.setLevel(logging.DEBUG)
# add a handler to write to console
ch = logging.StreamHandler()
rootlogger.addHandler(ch)
# and a formatter
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
except IOError:
# this means that open(...) threw an error
logger.critical('could not load logging configuration file %s' % self.log_config_file)
raise
else:
# inject the config lines for the file handler, now that we know the name of the file it will write to
if not os.path.exists(self.log_path):
os.makedirs(self.log_path, 0755)
log_file_name = '%s_%s_%d_%d_%d_%d_%f_%s.log' %(self.combined_model_name, self.combined_feature_name, self.train_file_number,
self.cmp_dim, len(self.hidden_layer_size),
self.hidden_layer_size[-1], self.learning_rate,
datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y"))
self.log_file = os.path.join(self.log_path, log_file_name)
to_inject="""
[handler_file]
class=FileHandler
formatter=file
args=('"""+self.log_file+"""', 'w')
"""
# config file format doesn't allow leading white space on lines, so remove it with dedent
config_string = config_string + textwrap.dedent(to_inject)
try:
# pass that string as a filehandle
fh = StringIO.StringIO(config_string)
logging.config.fileConfig(fh)
fh.close()
logger.info("logging is now fully configured")
except IOError:
logger.critical('could not configure logging: perhaps log file path is wrong?')
sys.exit(1)
|
|
#!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
# from twisted.internet import reactor
from twisted.internet import task
from twisted.python import log
from html.parser import HTMLParser
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
import requests
import re
import datetime
from builtins import KeyError
__all__ = ['TopologyAgent']
class ConMapHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.table = []
self.phase = 0
self.start_tags = {'table', 'tr', 'td'}
self.classes = {
'connmap-clr',
'connmap-dis',
'connmap-con',
'connmap-lock',
}
self.patch_pannel = None
def handle_starttag(self, tag, attrs):
if self.phase == 0 and tag == 'tr':
self.phase = 1
elif self.phase == 1:
if tag == 'td':
my_attrs = dict(attrs)
if (
'class' in my_attrs and
my_attrs['class'] in self.classes and
'title' in my_attrs
):
if self.patch_pannel is None:
self.patch_pannel = []
self.patch_pannel.append(my_attrs)
else:
self.phase_to_zero()
else:
self.phase_to_zero()
def phase_to_zero(self):
self.phase = 0
if self.patch_pannel:
self.table.append(self.patch_pannel)
self.patch_pannel = None
def handle_endtag(self, tag):
if self.phase == 1 and tag == 'tr':
self.phase_to_zero()
def get_table(self):
return self.table
class AGUpdateException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AGUpdater():
def __init__(self, db, ag):
self.db = db
self.ag = ag
def update(self):
group = dict()
tables = dict()
ans = self.db.analyzer.filter_by(analyzer_group=self.ag.uuid).all()
for an in ans:
group[an.analyzer_id_in_group] = dict()
tables[an.analyzer_id_in_group] = self.get_data(
an.data_url, an.username, an.password
)
for pp in self.db.patch_panel.filter_by(analyzer=an.uuid).all():
group[an.analyzer_id_in_group][pp.pp_id_in_analyzer] = {
port.position_on_pp: port for port in self.db.port.filter_by(patch_panel=pp.uuid).all()
}
connections = self.get_connections(ans, tables)
for an_key, an in group.items():
for pp_key, pp in an.items():
for port_key, port in pp.items():
try:
connection = connections[an_key][pp_key][port_key]
except KeyError:
log.msg(
'No data about port analyzer group: {0}, '
'unit: {1}, patch panel: {2}, port: {3}'
.format(self.ag.name, an_key, pp_key, port_key)
)
if connection is not None:
try:
port.connect_to = group[connection[0]][connection[1]][connection[2]].uuid
except KeyError:
log.msg(
'No data in database about port analyzer group: {0}, '
'unit: {1}, patch panel: {2}, port: {3}'
.format(self.ag.name, connection[0], connection[1], connection[2])
)
else:
port.connect_to = None
self.ag.last_update = datetime.datetime.now()
self.db.commit()
def get_data(self, url, username, password):
s = s = requests.Session()
r = s.get(url, timeout=(2, 5))
if r.status_code != 401:
self.db.commit()
raise AGUpdateException((
'Invalid response from server {0} in {1}. phase of '
'authentication, status code {2}, expected {3}'
).format(url, 1, r.status_code, 401)
)
r = s.get(url, auth=HTTPBasicAuth(username, password))
if r.status_code != 200:
self.db.commit()
raise AGUpdateException((
'Invalid response from server {0} in {1}. phase of '
'authentication, status code {2}, expected {3}'
).format(url, 2, r.status_code, 200)
)
p = ConMapHTMLParser()
p.feed(r.text)
return p.get_table()
def get_connections(selg, sources, tables):
analyzers = dict()
for source in sources:
table = tables[source.analyzer_id_in_group]
analyzers[source.analyzer_id_in_group] = dict()
for db in range(len(table)):
patch_panel = dict()
for port in range(len(table[db])):
patch_panel[port + 1] = None
analyzers[source.analyzer_id_in_group][db + 1] = patch_panel
for source in sources:
table = tables[source.analyzer_id_in_group]
for db in range(len(table)):
for port in range(len(table[db])):
if table[db][port]['class'] == 'connmap-con':
connection = re.split(
r'[ #]', table[db][port]['title']
)[2:]
analyzer_id = int(connection[1])
patch_panel = int(connection[3]) - 1
port_id = int(connection[5]) - 1
analyzers[source.analyzer_id_in_group][db + 1][port + 1] = \
(analyzer_id, patch_panel + 1, port_id + 1)
return analyzers
class TopologyAgent(object):
def __init__(self, db, update_period):
self.db = db
self.update_period = int(update_period)
def start(self):
"""Start the periodic checking."""
self.periodic = task.LoopingCall(self.update)
self.periodic.start(self.update_period, True)
def update_analyzer_group(self, ag_updater):
try:
ag_updater.update()
except (RequestException, AGUpdateException, Exception) as e:
log.msg(
'Failed to update analyzer group {0}, error: {1!r}'
.format(ag_updater.ag.name, e)
)
finally:
ag_updater.db.commit()
def update(self):
log.msg('Topology sync started')
for ag in self.db.analyzer_group.all():
# thread pool over ag, access to db can't be threaded in twisted
# (sqlalchemy is not compatible with twisted thread pool)
# reactor.callInThread(
# self.update_analyzer_group, AGUpdater(self.db, ag)
# )
self.update_analyzer_group(AGUpdater(self.db, ag))
log.msg('Topology sync finished')
# vim:set sw=4 ts=4 et:
# -*- coding: utf-8 -*-
|
|
# Generated by Django 2.2.4 on 2019-08-23 17:35
import apps.adjudication.fields
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.contrib.postgres.fields.ranges
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_fsm
import model_utils.fields
import phonenumber_field.modelfields
import timezone_field.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Appearance',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', django_fsm.FSMIntegerField(choices=[(-30, 'Disqualified'), (-20, 'Scratched'), (-10, 'Completed'), (0, 'New'), (7, 'Built'), (10, 'Started'), (20, 'Finished'), (25, 'Variance'), (30, 'Verified'), (40, 'Advanced')], default=0, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('num', models.IntegerField(help_text='The order of appearance for this round.')),
('draw', models.IntegerField(blank=True, help_text='The draw for the next round.', null=True)),
('is_private', models.BooleanField(default=False, help_text='Copied from entry.')),
('is_single', models.BooleanField(default=False, help_text='Single-round group')),
('participants', models.CharField(blank=True, default='', help_text='Director(s) or Members (listed TLBB)', max_length=255)),
('representing', models.CharField(blank=True, default='', help_text='Representing entity', max_length=255)),
('onstage', models.DateTimeField(blank=True, help_text='\n The actual appearance datetime.', null=True)),
('actual_start', models.DateTimeField(blank=True, help_text='\n The actual appearance datetime.', null=True)),
('actual_finish', models.DateTimeField(blank=True, help_text='\n The actual appearance datetime.', null=True)),
('pos', models.IntegerField(blank=True, help_text='Actual Participants-on-Stage', null=True)),
('stats', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('base', models.FloatField(blank=True, help_text='\n The incoming base score used to determine most-improved winners.', null=True)),
('variance_report', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('variance_report'))),
('csa_report', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('csa_report'))),
('group_id', models.UUIDField(blank=True, null=True)),
('name', models.CharField(blank=True, default='', help_text='\n The name of the resource.\n ', max_length=255)),
('kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (46, 'VLQ')], help_text='\n The kind of group.\n ', null=True)),
('gender', models.IntegerField(blank=True, choices=[(10, 'Male'), (20, 'Female'), (30, 'Mixed')], help_text='\n The gender of group.\n ', null=True)),
('district', models.IntegerField(blank=True, choices=[(110, 'BHS'), (200, 'CAR'), (205, 'CSD'), (210, 'DIX'), (215, 'EVG'), (220, 'FWD'), (225, 'ILL'), (230, 'JAD'), (235, 'LOL'), (240, 'MAD'), (345, 'NED'), (350, 'NSC'), (355, 'ONT'), (360, 'PIO'), (365, 'RMD'), (370, 'SLD'), (375, 'SUN'), (380, 'SWD')], null=True)),
('division', models.IntegerField(blank=True, choices=[('EVG', [(10, 'EVG Division I'), (20, 'EVG Division II'), (30, 'EVG Division III'), (40, 'EVG Division IV'), (50, 'EVG Division V')]), ('FWD', [(60, 'FWD Arizona'), (70, 'FWD Northeast'), (80, 'FWD Northwest'), (90, 'FWD Southeast'), (100, 'FWD Southwest')]), ('LOL', [(110, 'LOL 10000 Lakes'), (120, 'LOL Division One'), (130, 'LOL Northern Plains'), (140, 'LOL Packerland'), (150, 'LOL Southwest')]), ('MAD', [(170, 'MAD Central'), (180, 'MAD Northern'), (190, 'MAD Southern')]), ('NED', [(210, 'NED Granite and Pine'), (220, 'NED Mountain'), (230, 'NED Patriot'), (240, 'NED Sunrise'), (250, 'NED Yankee')]), ('SWD', [(260, 'SWD Northeast'), (270, 'SWD Northwest'), (280, 'SWD Southeast'), (290, 'SWD Southwest')])], null=True)),
('bhs_id', models.IntegerField(blank=True, null=True)),
('code', models.CharField(blank=True, default='', help_text='\n Short-form code.', max_length=255)),
],
options={
'ordering': ['num'],
},
),
migrations.CreateModel(
name='Panelist',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', django_fsm.FSMIntegerField(choices=[(-10, 'Inactive'), (-5, 'Released'), (0, 'New'), (10, 'Active')], default=10, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('num', models.IntegerField(blank=True, null=True)),
('kind', models.IntegerField(choices=[(10, 'Official'), (20, 'Practice'), (30, 'Observer')])),
('category', models.IntegerField(blank=True, choices=[(5, 'DRCJ'), (10, 'CA'), (30, 'Music'), (40, 'Performance'), (50, 'Singing')], null=True)),
('psa_report', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('psa_report'))),
('person_id', models.UUIDField(blank=True, null=True)),
('name', models.CharField(blank=True, default='', help_text='\n The prefix of the person.', max_length=255)),
('first_name', models.CharField(blank=True, default='', help_text='\n The first name of the person.', max_length=255)),
('last_name', models.CharField(blank=True, default='', help_text='\n The last name of the person.', max_length=255)),
('district', models.IntegerField(blank=True, choices=[(110, 'BHS'), (200, 'CAR'), (205, 'CSD'), (210, 'DIX'), (215, 'EVG'), (220, 'FWD'), (225, 'ILL'), (230, 'JAD'), (235, 'LOL'), (240, 'MAD'), (345, 'NED'), (350, 'NSC'), (355, 'ONT'), (360, 'PIO'), (365, 'RMD'), (370, 'SLD'), (375, 'SUN'), (380, 'SWD')], null=True)),
('representing', models.CharField(blank=True, default='', help_text='\n District', max_length=10)),
('email', apps.adjudication.fields.LowerEmailField(blank=True, help_text='\n The contact email of the resource.', max_length=254, null=True)),
('cell_phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, help_text='\n The cell phone number of the resource. Include country code.', max_length=128, null=True, region=None)),
('airports', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=3), blank=True, default=list, null=True, size=None)),
('image', models.ImageField(blank=True, null=True, upload_to=apps.adjudication.fields.UploadPath('image'))),
('bhs_id', models.IntegerField(blank=True, null=True)),
('owners', models.ManyToManyField(related_name='panelists', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Song',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', django_fsm.FSMIntegerField(choices=[(0, 'New')], default=0, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('num', models.IntegerField()),
('asterisks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, default=list, size=None)),
('dixons', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, default=list, size=None)),
('penalties', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(choices=[(10, 'Primarily Patriotic/Religious Intent'), (30, 'Instrumental Accompaniment'), (40, 'Chorus Exceeding 4-Part Texture'), (50, 'Sound Equipment or Electronic Enhancement')]), blank=True, default=list, size=None)),
('stats', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('chart_id', models.UUIDField(blank=True, null=True)),
('title', models.CharField(blank=True, default='', max_length=255)),
('arrangers', models.CharField(blank=True, default='', max_length=255)),
('appearance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='songs', to='adjudication.Appearance')),
],
options={
'get_latest_by': ['num'],
},
),
migrations.CreateModel(
name='Score',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', django_fsm.FSMIntegerField(choices=[(0, 'New'), (10, 'Verified'), (25, 'Cleared'), (30, 'Flagged'), (35, 'Revised'), (40, 'Confirmed')], default=0, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('points', models.IntegerField(blank=True, help_text='\n The number of points (0-100)', null=True, validators=[django.core.validators.MaxValueValidator(100, message='Points must be between 0 - 100'), django.core.validators.MinValueValidator(0, message='Points must be between 0 - 100')])),
('panelist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scores', to='adjudication.Panelist')),
('song', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='scores', to='adjudication.Song')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Round',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', django_fsm.FSMIntegerField(choices=[(0, 'New'), (10, 'Built'), (20, 'Started'), (25, 'Completed'), (27, 'Verified'), (30, 'Published')], default=0, help_text='DO NOT CHANGE MANUALLY unless correcting a mistake. Use the buttons to change state.')),
('kind', models.IntegerField(choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')])),
('num', models.IntegerField(default=0)),
('spots', models.IntegerField(default=0)),
('date', models.DateField(blank=True, null=True)),
('footnotes', models.TextField(blank=True, help_text='\n Freeform text field; will print on OSS.')),
('oss_report', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('oss_report'))),
('sa_report', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('sa_report'))),
('legacy_oss', models.FileField(blank=True, default='', upload_to=apps.adjudication.fields.UploadPath('legacy_oss'))),
('is_reviewed', models.BooleanField(default=False, help_text='Reviewed for history app')),
('convention_id', models.UUIDField(blank=True, null=True)),
('nomen', models.CharField(blank=True, default='', max_length=255)),
('timezone', timezone_field.fields.TimeZoneField(blank=True, help_text='\n The local timezone of the convention.', null=True)),
('image', models.ImageField(blank=True, max_length=255, null=True, upload_to=apps.adjudication.fields.UploadPath('image'))),
('session_id', models.UUIDField(blank=True, null=True)),
('session_kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')], help_text='\n The kind of session. Generally this will be either quartet or chorus.\n ', null=True)),
('owners', models.ManyToManyField(related_name='rounds', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': ['num'],
},
),
migrations.AddField(
model_name='panelist',
name='round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='panelists', to='adjudication.Round'),
),
migrations.CreateModel(
name='Outcome',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('status', models.IntegerField(choices=[(-10, 'Inactive'), (0, 'New'), (10, 'Active')], default=0)),
('num', models.IntegerField(blank=True, null=True)),
('winner', models.CharField(blank=True, max_length=1024, null=True)),
('award_id', models.UUIDField(blank=True, null=True)),
('name', models.CharField(blank=True, help_text='Award Name.', max_length=255, null=True)),
('kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet')], null=True)),
('gender', models.IntegerField(blank=True, choices=[(10, 'Male'), (20, 'Female'), (30, 'Mixed')], help_text='\n The gender to which the award is restricted. If unselected, this award is open to all combinations.\n ', null=True)),
('level', models.IntegerField(blank=True, choices=[(10, 'Championship'), (30, 'Qualifier'), (45, 'Representative'), (50, 'Deferred'), (60, 'Manual'), (70, 'Improved - Raw'), (80, 'Improved - Standard')], null=True)),
('season', models.IntegerField(blank=True, choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')], null=True)),
('description', models.TextField(blank=True, help_text='\n The Public description of the award.', max_length=1000, null=True)),
('district', models.CharField(blank=True, max_length=255, null=True)),
('division', models.IntegerField(blank=True, choices=[(10, 'EVG Division I'), (20, 'EVG Division II'), (30, 'EVG Division III'), (40, 'EVG Division IV'), (50, 'EVG Division V'), (60, 'FWD Arizona'), (70, 'FWD Northeast'), (80, 'FWD Northwest'), (90, 'FWD Southeast'), (100, 'FWD Southwest'), (110, 'LOL 10000 Lakes'), (120, 'LOL Division One'), (130, 'LOL Northern Plains'), (140, 'LOL Packerland'), (150, 'LOL Southwest'), (170, 'MAD Central'), (180, 'MAD Northern'), (190, 'MAD Southern'), (210, 'NED Granite and Pine'), (220, 'NED Mountain'), (230, 'NED Patriot'), (240, 'NED Sunrise'), (250, 'NED Yankee'), (260, 'SWD Northeast'), (270, 'SWD Northwest'), (280, 'SWD Southeast'), (290, 'SWD Southwest')], null=True)),
('age', models.IntegerField(blank=True, choices=[(10, 'Seniors'), (20, 'Novice'), (30, 'Youth')], null=True)),
('is_novice', models.BooleanField(blank=True, default=False, null=True)),
('size', models.IntegerField(blank=True, choices=[(100, 'Plateau 1'), (110, 'Plateau 2'), (120, 'Plateau 3'), (130, 'Plateau 4'), (140, 'Plateau A'), (150, 'Plateau AA'), (160, 'Plateau AAA'), (170, 'Plateau AAAA'), (180, 'Plateau B'), (190, 'Plateau I'), (200, 'Plateau II'), (210, 'Plateau III'), (220, 'Plateau IV'), (230, 'Small')], null=True)),
('size_range', django.contrib.postgres.fields.ranges.IntegerRangeField(blank=True, null=True)),
('scope', models.IntegerField(blank=True, choices=[(100, 'Plateau 1'), (110, 'Plateau 2'), (120, 'Plateau 3'), (130, 'Plateau 4'), (140, 'Plateau A'), (150, 'Plateau AA'), (160, 'Plateau AAA'), (170, 'Plateau AAAA'), (175, 'Plateau AAAAA')], null=True)),
('scope_range', django.contrib.postgres.fields.ranges.DecimalRangeField(blank=True, null=True)),
('tree_sort', models.IntegerField(blank=True, editable=False, null=True)),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='outcomes', to='adjudication.Round')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='appearance',
name='outcomes',
field=models.ManyToManyField(blank=True, related_name='appearances', to='adjudication.Outcome'),
),
migrations.AddField(
model_name='appearance',
name='owners',
field=models.ManyToManyField(related_name='appearances', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='appearance',
name='round',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='appearances', to='adjudication.Round'),
),
]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnConnectionsOperations(object):
"""VpnConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.VpnConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
vpn_connection_parameters, # type: "_models.VpnConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnConnection"]
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist else updates the
existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or Update a VPN Connection.
:type vpn_connection_parameters: ~azure.mgmt.network.v2019_09_01.models.VpnConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.VpnConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name, # type: str
gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnConnectionsResult"]
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ListVpnConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'} # type: ignore
|
|
'''
The MIT License (MIT)
Copyright (C) 2014, 2015 Seven Watt <info@sevenwatt.com>
<http://www.sevenwatt.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import sys
import codecs
import struct
import errno, socket
import threading
import traceback
import posixpath
import os
from urllib.parse import unquote
from base64 import b64encode
from hashlib import sha1
from http.server import SimpleHTTPRequestHandler
from io import StringIO
from email.message import Message
class WebSocketError(Exception):
pass
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# PleaseThe implementation of RootedHTTPRequestHandler refers to:
# http://louistiao.me/posts/python-simplehttpserver-recipe-serve-specific-directory/
def translate_path(self, path):
path = posixpath.normpath(unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.base_path
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
print('trying to access {}'.format(path))
return path
class HTTPWebSocketsHandler(RootedHTTPRequestHandler):
_ws_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
_opcode_continu = 0x0
_opcode_text = 0x1
_opcode_binary = 0x2
_opcode_close = 0x8
_opcode_ping = 0x9
_opcode_pong = 0xa
mutex = threading.Lock()
def on_ws_message(self, message):
'''Override this handler to process incoming websocket messages.'''
pass
def on_ws_connected(self):
'''Override this handler.'''
pass
def on_ws_closed(self):
'''Override this handler.'''
pass
def send_message(self, message):
self._send_message(self._opcode_text, message)
def setup(self):
SimpleHTTPRequestHandler.setup(self)
self.connected = False
# def finish(self):
# #needed when wfile is used, or when self.close_connection is not used
# #
# #catch errors in SimpleHTTPRequestHandler.finish() after socket disappeared
# #due to loss of network connection
# try:
# SimpleHTTPRequestHandler.finish(self)
# except (socket.error, TypeError) as err:
# self.log_message('finish(): Exception: in SimpleHTTPRequestHandler.finish(): %s' % str(err.args))
# def handle(self):
# #needed when wfile is used, or when self.close_connection is not used
# #
# #catch errors in SimpleHTTPRequestHandler.handle() after socket disappeared
# #due to loss of network connection
# try:
# SimpleHTTPRequestHandler.handle(self)
# except (socket.error, TypeError) as err:
# self.log_message('handle(): Exception: in SimpleHTTPRequestHandler.handle(): %s' % str(err.args))
def checkAuthentication(self):
auth = self.headers.get('Authorization')
if auth != 'Basic %s' % self.server.auth:
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="Plugwise"')
self.end_headers();
return False
return True
def do_GET(self):
if self.server.auth and not self.checkAuthentication():
return
if self.headers.get('Upgrade', None) == 'websocket':
self._handshake()
#This handler is in websocket mode now.
#do_GET only returns after client close or socket error.
self._read_messages()
else:
SimpleHTTPRequestHandler.do_GET(self)
def _read_messages(self):
while self.connected == True:
try:
self._read_next_message()
except (socket.error, WebSocketError) as e:
#websocket content error, time-out or disconnect.
self.log_message('RCV: Close connection: Socket Error %s' % str(e.args))
self._ws_close()
except Exception as err:
#unexpected error in websocket connection.
traceback.print_exc()
self.log_error('RCV: Exception: in _read_messages: %s' % str(err.args))
self._ws_close()
def _read_bytes(self, length):
raw_data = self.rfile.read(length)
return raw_data
def _read_next_message(self):
#self.rfile.read(n) is blocking.
#it returns however immediately when the socket is closed.
try:
byte = self._read_bytes(1)
self.opcode = ord(byte) & 0x0F
length = ord(self._read_bytes(1)) & 0x7F
if length == 126:
length = struct.unpack('>H', self._read_bytes(2))[0]
elif length == 127:
length = struct.unpack('>Q', self._read_bytes(8))[0]
masks = self._read_bytes(4)
decoded = bytearray()
datastream = self._read_bytes(length)
for char in datastream:
decoded.append(char ^ masks[len(decoded) % 4])
decoded = bytes(decoded)
self._on_message(decoded)
except (struct.error, TypeError) as e:
traceback.print_exc()
#catch exceptions from ord() and struct.unpack()
if self.connected:
raise WebSocketError('Websocket read aborted while listening')
else:
#the socket was closed while waiting for input
self.log_error('RCV: _read_next_message aborted after closed connection')
pass
def _send_impl(self, msg):
data = bytearray()
if type(msg) == int:
data = bytes([msg])
elif type(msg) == bytes:
data = msg
elif type(msg) == str:
data = msg.encode()
self.request.send(data)
def _send_message(self, opcode, message):
try:
#use of self.wfile.write gives socket exception after socket is closed. Avoid.
self._send_impl(0x80 + opcode)
length = len(message)
if length <= 125:
self._send_impl(length)
elif length >= 126 and length <= 65535:
self._send_impl(126)
self._send_impl(struct.pack('>H', length))
else:
self._send_impl(127)
self._send_impl(struct.pack('>Q', length))
if length > 0:
self._send_impl(message)
except socket.error as e:
#websocket content error, time-out or disconnect.
traceback.print_exc()
self.log_message('SND: Close connection: Socket Error %s' % str(e.args))
self._ws_close()
except Exception as err:
#unexpected error in websocket connection.
traceback.print_exc()
self.log_error('SND: Exception: in _send_message: %s' % str(err.args))
self._ws_close()
def _handshake(self):
headers=self.headers
if headers.get('Upgrade', None) != 'websocket':
return
key = headers['Sec-WebSocket-Key']
coded_ID = (key + self._ws_GUID).encode('ascii')
hexed = sha1(coded_ID).hexdigest()
hex_decoded = codecs.decode(hexed, 'hex_codec')
digest = b64encode(hex_decoded).decode()
self.send_response(101, 'Switching Protocols')
self.send_header('Upgrade', 'websocket')
self.send_header('Connection', 'Upgrade')
self.send_header('Sec-WebSocket-Accept', digest)
self.end_headers()
self.connected = True
#self.close_connection = 0
self.on_ws_connected()
def _ws_close(self):
#avoid closing a single socket two time for send and receive.
self.mutex.acquire()
try:
if self.connected:
self.connected = False
#Terminate BaseHTTPRequestHandler.handle() loop:
self.close_connection = 1
#send close and ignore exceptions. An error may already have occurred.
try:
self._send_close()
except:
pass
self.on_ws_closed()
else:
self.log_message('_ws_close websocket in closed state. Ignore.')
pass
finally:
self.mutex.release()
def _on_message(self, message):
#self.log_message('_on_message: opcode: %02X msg: %s' % (self.opcode, message))
# close
if self.opcode == self._opcode_close:
self.connected = False
#Terminate BaseHTTPRequestHandler.handle() loop:
self.close_connection = 1
try:
self._send_close()
except:
pass
self.on_ws_closed()
# ping
elif self.opcode == self._opcode_ping:
_send_message(self._opcode_pong, message)
# pong
elif self.opcode == self._opcode_pong:
pass
# data
elif (self.opcode == self._opcode_continu or
self.opcode == self._opcode_text or
self.opcode == self._opcode_binary):
self.on_ws_message(message)
def _send_close(self):
#Dedicated _send_close allows for catch all exception handling
msg = bytearray()
msg.append(0x80 + self._opcode_close)
msg.append(0x00)
self._send_impl(msg)
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
from contextlib import contextmanager
from typing import List, Mapping, Optional, Tuple
from pants.base.build_environment import get_buildroot
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.exception_sink import ExceptionSink
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, Exiter
from pants.base.specs import Specs
from pants.base.workunit import WorkUnit
from pants.bin.goal_runner import GoalRunner
from pants.build_graph.build_configuration import BuildConfiguration
from pants.engine.native import Native
from pants.engine.rules import UnionMembership
from pants.engine.scheduler import SchedulerSession
from pants.goal.run_tracker import RunTracker
from pants.help.help_printer import HelpPrinter
from pants.init.engine_initializer import EngineInitializer, LegacyGraphSession
from pants.init.logging import setup_logging_from_options
from pants.init.options_initializer import BuildConfigInitializer, OptionsInitializer
from pants.init.repro import Reproducer
from pants.init.specs_calculator import SpecsCalculator
from pants.option.arg_splitter import UnknownGoalHelp
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.reporting.reporting import Reporting
from pants.reporting.streaming_workunit_handler import StreamingWorkunitHandler
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import maybe_profiled
logger = logging.getLogger(__name__)
class LocalExiter(Exiter):
@classmethod
@contextmanager
def wrap_global_exiter(cls, run_tracker, repro):
with ExceptionSink.exiter_as(
lambda previous_exiter: cls(run_tracker, repro, previous_exiter)
):
yield
def __init__(self, run_tracker, repro, previous_exiter: Exiter) -> None:
self._run_tracker = run_tracker
self._repro = repro
super().__init__(previous_exiter)
def exit(self, result=PANTS_SUCCEEDED_EXIT_CODE, msg=None, *args, **kwargs):
# These strings are prepended to the existing exit message when calling the superclass .exit().
additional_messages = []
try:
if not self._run_tracker.has_ended():
if result == PANTS_SUCCEEDED_EXIT_CODE:
outcome = WorkUnit.SUCCESS
elif result == PANTS_FAILED_EXIT_CODE:
outcome = WorkUnit.FAILURE
else:
run_tracker_msg = (
"unrecognized exit code {} provided to {}.exit() -- "
"interpreting as a failure in the run tracker".format(
result, type(self).__name__
)
)
# Log the unrecognized exit code to the fatal exception log.
ExceptionSink.log_exception(run_tracker_msg)
# Ensure the unrecognized exit code message is also logged to the terminal.
additional_messages.append(run_tracker_msg)
outcome = WorkUnit.FAILURE
self._run_tracker.set_root_outcome(outcome)
run_tracker_result = self._run_tracker.end()
assert (
result == run_tracker_result
), "pants exit code not correctly recorded by run tracker"
except ValueError as e:
# If we have been interrupted by a signal, calling .end() sometimes writes to a closed file,
# so we just log that fact here and keep going.
exception_string = str(e)
ExceptionSink.log_exception(exception_string)
additional_messages.append(exception_string)
finally:
if self._repro:
# TODO: Have Repro capture the 'after' state (as a diff) as well? (in reference to the below
# 'before' state comment)
# NB: this writes to the logger, which is expected to still be alive if we are exiting from
# a signal.
self._repro.log_location_of_repro_file()
if additional_messages:
msg = "{}\n\n{}".format("\n".join(additional_messages), msg or "")
super().exit(result=result, msg=msg, *args, **kwargs)
class LocalPantsRunner(ExceptionSink.AccessGlobalExiterMixin):
"""Handles a single pants invocation running in the process-local context."""
@staticmethod
def parse_options(
args: List[str],
env: Mapping[str, str],
options_bootstrapper: Optional[OptionsBootstrapper] = None,
) -> Tuple[Options, BuildConfiguration, OptionsBootstrapper]:
options_bootstrapper = options_bootstrapper or OptionsBootstrapper.create(
args=args, env=env
)
build_config = BuildConfigInitializer.get(options_bootstrapper)
options = OptionsInitializer.create(options_bootstrapper, build_config)
return options, build_config, options_bootstrapper
@staticmethod
def _maybe_init_graph_session(
graph_session: Optional[LegacyGraphSession],
options_bootstrapper: OptionsBootstrapper,
build_config: BuildConfiguration,
options: Options,
) -> Tuple[LegacyGraphSession, SchedulerSession]:
if not graph_session:
native = Native()
native.set_panic_handler()
graph_scheduler_helper = EngineInitializer.setup_legacy_graph(
native, options_bootstrapper, build_config
)
v2_ui = options.for_global_scope().get("v2_ui", False)
zipkin_trace_v2 = options.for_scope("reporting").zipkin_trace_v2
# TODO(#8658) This should_report_workunits flag must be set to True for
# StreamingWorkunitHandler to receive WorkUnits. It should eventually
# be merged with the zipkin_trace_v2 flag, since they both involve most
# of the same engine functionality, but for now is separate to avoid
# breaking functionality associated with zipkin tracing while iterating on streaming workunit reporting.
stream_workunits = len(options.for_global_scope().streaming_workunits_handlers) != 0
graph_session = graph_scheduler_helper.new_session(
zipkin_trace_v2,
RunTracker.global_instance().run_id,
v2_ui,
should_report_workunits=stream_workunits,
)
return graph_session, graph_session.scheduler_session
@staticmethod
def _maybe_init_specs(
specs: Optional[Specs],
graph_session: LegacyGraphSession,
options: Options,
build_root: str,
) -> Specs:
if specs:
return specs
global_options = options.for_global_scope()
return SpecsCalculator.create(
options=options,
build_root=build_root,
session=graph_session.scheduler_session,
exclude_patterns=tuple(global_options.exclude_target_regexp),
tags=tuple(global_options.tag),
)
@classmethod
def create(
cls,
args: List[str],
env: Mapping[str, str],
specs: Optional[Specs] = None,
daemon_graph_session: Optional[LegacyGraphSession] = None,
options_bootstrapper: Optional[OptionsBootstrapper] = None,
) -> "LocalPantsRunner":
"""Creates a new LocalPantsRunner instance by parsing options.
:param args: The arguments (e.g. sys.argv) for this run.
:param env: The environment (e.g. os.environ) for this run.
:param specs: The specs for this run, i.e. either the address or filesystem specs.
:param daemon_graph_session: The graph helper for this session.
:param options_bootstrapper: The OptionsBootstrapper instance to reuse.
"""
build_root = get_buildroot()
options, build_config, options_bootstrapper = cls.parse_options(
args, env, options_bootstrapper=options_bootstrapper,
)
global_options = options.for_global_scope()
# This works as expected due to the encapsulated_logger in DaemonPantsRunner and
# we don't have to gate logging setup anymore.
setup_logging_from_options(global_options)
# Option values are usually computed lazily on demand,
# but command line options are eagerly computed for validation.
for scope in options.scope_to_flags.keys():
options.for_scope(scope)
# Verify configs.
if global_options.verify_config:
options_bootstrapper.verify_configs_against_options(options)
union_membership = UnionMembership(build_config.union_rules())
# If we're running with the daemon, we'll be handed a session from the
# resident graph helper - otherwise initialize a new one here.
graph_session, scheduler_session = cls._maybe_init_graph_session(
daemon_graph_session, options_bootstrapper, build_config, options
)
specs = cls._maybe_init_specs(specs, graph_session, options, build_root)
profile_path = env.get("PANTS_PROFILE")
return cls(
build_root=build_root,
options=options,
options_bootstrapper=options_bootstrapper,
build_config=build_config,
specs=specs,
graph_session=graph_session,
scheduler_session=scheduler_session,
union_membership=union_membership,
is_daemon=daemon_graph_session is not None,
profile_path=profile_path,
)
def __init__(
self,
build_root: str,
options: Options,
options_bootstrapper: OptionsBootstrapper,
build_config: BuildConfiguration,
specs: Specs,
graph_session: LegacyGraphSession,
scheduler_session: SchedulerSession,
union_membership: UnionMembership,
is_daemon: bool,
profile_path: Optional[str],
) -> None:
"""
:param build_root: The build root for this run.
:param options: The parsed options for this run.
:param options_bootstrapper: The OptionsBootstrapper instance to use.
:param build_config: The parsed build configuration for this run.
:param specs: The specs for this run, i.e. either the address or filesystem specs.
:param graph_session: A LegacyGraphSession instance for graph reuse.
:param is_daemon: Whether or not this run was launched with a daemon graph helper.
:param profile_path: The profile path - if any (from from the `PANTS_PROFILE` env var).
"""
self._build_root = build_root
self._options = options
self._options_bootstrapper = options_bootstrapper
self._build_config = build_config
self._specs = specs
self._graph_session = graph_session
self._scheduler_session = scheduler_session
self._union_membership = union_membership
self._is_daemon = is_daemon
self._profile_path = profile_path
self._run_start_time = None
self._run_tracker = None
self._reporting = None
self._repro = None
self._global_options = options.for_global_scope()
def set_start_time(self, start_time):
# Launch RunTracker as early as possible (before .run() is called).
self._run_tracker = RunTracker.global_instance()
# Propagates parent_build_id to pants runs that may be called from this pants run.
os.environ["PANTS_PARENT_BUILD_ID"] = self._run_tracker.run_id
self._reporting = Reporting.global_instance()
self._run_start_time = start_time
self._reporting.initialize(
self._run_tracker, self._options, start_time=self._run_start_time
)
spec_parser = CmdLineSpecParser(get_buildroot())
specs = [spec_parser.parse_spec(spec).to_spec_string() for spec in self._options.specs]
# Note: This will not include values from `--changed-*` flags.
self._run_tracker.run_info.add_info("specs_from_command_line", specs, stringify=False)
# Capture a repro of the 'before' state for this build, if needed.
self._repro = Reproducer.global_instance().create_repro()
if self._repro:
self._repro.capture(self._run_tracker.run_info.get_as_dict())
def run(self):
with LocalExiter.wrap_global_exiter(self._run_tracker, self._repro), maybe_profiled(
self._profile_path
):
self._run()
def _maybe_handle_help(self):
"""Handle requests for `help` information."""
if self._options.help_request:
help_printer = HelpPrinter(
options=self._options, union_membership=self._union_membership
)
result = help_printer.print_help()
return result
def _maybe_run_v1(self):
v1_goals, ambiguous_goals, _ = self._options.goals_by_version
if not self._global_options.v1:
if v1_goals:
HelpPrinter(
options=self._options,
help_request=UnknownGoalHelp(v1_goals),
union_membership=self._union_membership,
).print_help()
return PANTS_FAILED_EXIT_CODE
return PANTS_SUCCEEDED_EXIT_CODE
if not v1_goals and not ambiguous_goals:
return PANTS_SUCCEEDED_EXIT_CODE
# Setup and run GoalRunner.
return (
GoalRunner.Factory(
self._build_root,
self._options_bootstrapper,
self._options,
self._build_config,
self._run_tracker,
self._reporting,
self._graph_session,
self._specs,
self._exiter,
)
.create()
.run()
)
def _maybe_run_v2(self):
# N.B. For daemon runs, @goal_rules are invoked pre-fork -
# so this path only serves the non-daemon run mode.
if self._is_daemon:
return PANTS_SUCCEEDED_EXIT_CODE
_, ambiguous_goals, v2_goals = self._options.goals_by_version
goals = v2_goals + (ambiguous_goals if self._global_options.v2 else tuple())
self._run_tracker.set_v2_goal_rule_names(goals)
if not goals:
return PANTS_SUCCEEDED_EXIT_CODE
return self._graph_session.run_goal_rules(
options_bootstrapper=self._options_bootstrapper,
union_membership=self._union_membership,
options=self._options,
goals=goals,
specs=self._specs,
)
@staticmethod
def _compute_final_exit_code(*codes):
"""Returns the exit code with higher abs value in case of negative values."""
max_code = None
for code in codes:
if max_code is None or abs(max_code) < abs(code):
max_code = code
return max_code
def _update_stats(self):
metrics = self._scheduler_session.metrics()
self._run_tracker.pantsd_stats.set_scheduler_metrics(metrics)
engine_workunits = self._scheduler_session.engine_workunits(metrics)
if engine_workunits:
self._run_tracker.report.bulk_record_workunits(engine_workunits)
def _run(self):
global_options = self._options.for_global_scope()
streaming_handlers = global_options.streaming_workunits_handlers
report_interval = global_options.streaming_workunits_report_interval
callbacks = Subsystem.get_streaming_workunit_callbacks(streaming_handlers)
streaming_reporter = StreamingWorkunitHandler(
self._scheduler_session, callbacks=callbacks, report_interval_seconds=report_interval
)
help_output = self._maybe_handle_help()
if help_output is not None:
self._exiter.exit(help_output)
with streaming_reporter.session():
try:
engine_result = self._maybe_run_v2()
goal_runner_result = self._maybe_run_v1()
finally:
run_tracker_result = self._finish_run()
final_exit_code = self._compute_final_exit_code(
engine_result, goal_runner_result, run_tracker_result
)
self._exiter.exit(final_exit_code)
def _finish_run(self):
try:
self._update_stats()
return self._run_tracker.end()
except ValueError as e:
# Calling .end() sometimes writes to a closed file, so we return a dummy result here.
logger.exception(e)
return PANTS_SUCCEEDED_EXIT_CODE
|
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def bitcoin_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/bitcoin-wallet' + self.config["environment"]["EXEEXT"]
args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain] + list(args)
return subprocess.Popen([binary] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.bitcoin_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error('Invalid command: foo', 'foo')
# `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`.
self.assert_raises_tool_error('Invalid command: help', 'help')
self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('Error loading wallet.dat. Is wallet being used by other process?', '-wallet=wallet.dat', 'info')
self.assert_raises_tool_error('Error: no wallet file at nonexistent.dat', '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert(self.wallet_permissions() in ['400', '666']) # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 0
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert(self.wallet_permissions() in ['600', '666']) # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2
Transactions: 1
Address Book: 3
''')
self.assert_tool_output(out, '-wallet=wallet.dat', 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = textwrap.dedent('''\
Topping up keypool...
Wallet info
===========
Encrypted: no
HD (hd seed available): yes
Keypool Size: 2000
Transactions: 0
Address Book: 0
''')
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', 'wallet.dat')
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
if __name__ == '__main__':
ToolWalletTest().main()
|
|
#!/usr/bin/env python
import datetime as tm
import re
import sys
import os
from pandas import read_csv, to_numeric, concat
from poplerGUI.logiclayer.class_metaverify import MetaVerifier
from poplerGUI.logiclayer.class_helpers import check_registration
from poplerGUI.logiclayer.class_tablebuilder import (
Study_Site_Table_Builder, Table_Builder_Director,
Project_Table_Builder, Taxa_Table_Builder,
Observation_Table_Builder, UpdaterTableBuilder
)
from poplerGUI.logiclayer import class_logconfig as log
from poplerGUI.logiclayer import class_mergedtoupload as mrg
from poplerGUI.logiclayer.datalayer import config as orm
from poplerGUI.logiclayer.datalayer.class_filehandles import (
Caretaker, DataFileOriginator, DataOriginator, Memento
)
if getattr(sys, 'frozen', False):
# we are running in a bundle
rootpath = os.path.dirname(sys.executable)
else:
# we are running in a normal Python environment
rootpath = os.path.dirname(os.path.dirname(os.path.dirname( __file__ )))
all = ['Facade']
class Facade:
'''
This is the facade class to handle the interaction
between the user inputs and the data
'''
# Class attributes are related to managing
# various commands from user
data_caretaker = Caretaker()
data_originator = DataOriginator(None, 'Initializing')
def __init__(self):
'''
Initialize facade with a dictionary to track
user inputs (for logging and session management).
Class instances will be registered with the
input dictionary.
In addtion a filecaretaker will be instantiated
when a raw data file is loaded. This will help track
changes to data
'''
self.clsinstance = None
self._inputs = {}
self._valueregister = {
'globalid': None,
'lterid': None,
'siteid': None,
'sitelevels': None,
'study_site_key': None
}
self._data = None
self._dbtabledict = {
'study_site_table': Study_Site_Table_Builder(),
'project_table': Project_Table_Builder(),
'taxa_table': Taxa_Table_Builder(),
'timetable': None,
'count_table': Observation_Table_Builder(),
'biomass_table': Observation_Table_Builder(),
'density_table': Observation_Table_Builder(),
'percent_cover_table': Observation_Table_Builder(),
'individual_table': Observation_Table_Builder(),
'covartable': None,
'updatetable': UpdaterTableBuilder()
}
self._datamerged = {
'raw_main': None,
'raw_main_taxa': None
}
self._tablelog = {
'study_site_table': None,
'project_table': None,
'maintable': None,
'maintable_update': None,
'timetable': None,
'taxa_table': None,
'count_table': None,
'bimoass_table': None,
'density_table': None,
'percent_cover_table': None,
'individual_table': None,
'covartable': None,
'climatesite': None,
'climateobs': None,
'addsite': None,
'widetolong': None,
'changecolumn': None,
'changecell': None,
'replacevalue': None,
'splitcolumn': None
}
self._colinputlog = {
'siteinfo': None,
'maininfo': None,
'taxainfo': None,
'timeinfo': None,
'rawinfo': None,
'covarinfo': None
}
self.push_tables = {
'study_site_table': None,
'project_table': None,
'taxa_table': None,
'timetable': None,
'count_table': None,
'biomass_table': None,
'density_table': None,
'percent_cover_table': None,
'individual_table': None,
'covariates': None,
'covartable': None
}
self.pushtables = None
self.sitepushed = None
self.mainpushed = None
self.siteinproject = None
self.taxapushed = None
self.rawpushed = None
def input_register(self, clsinstance):
'''
Sets user instantiated classes into the facade
_input dictionary.
All other operations performed
by the program will take the _inputs dictionary
within methods to direct the behavior of the program
'''
self.clsinstance = clsinstance
try:
self._inputs[self.clsinstance.name] = self.clsinstance
except:
raise AttributeError(
'Wrong class input for program facade.')
def meta_verify(self):
'''
Adapter method:
Takes 'fileoption' input and MetaVerifier class
for logic checks.
'''
check_registration(self, 'metacheck')
verifier = MetaVerifier(self._inputs['metacheck'])
if self._inputs['metacheck'].verify is None:
pass
else:
verifier._meta = read_csv(os.path.join(
rootpath, 'Cataloged_Data_Current_sorted.csv'),
encoding='iso-8859-11')
try:
assert verifier.verify_entries()
except Exception as e:
raise AttributeError(str(e))
self._valueregister['globalid'] = (
self._inputs['metacheck'].lnedentry['globalid']
)
self._valueregister['lterid'] = (
self._inputs['metacheck'].lnedentry['lter']
)
def load_data(self):
''' Using commander classes to peform the following
commands. Note All commands are executed by the
self.input_manager attribute. This meas all
commands are registered with the invoker and all
loaded data is regeristered with the file caretaker
1) Load Data via the LoadDataCommand (register with
invoker and registed data loaded with file caretaker)
2) Generate proxy data from MakeProxyCommander (
register command with invoker and register proxy
data with file caretaker)
return a proxy of the original dataset loaded.
'''
try:
assert self._inputs[
'fileoptions'].filename is not None
except:
raise AttributeError('No file selected to load.')
data_file_originator = DataFileOriginator(
self._inputs['fileoptions']
)
self.data_caretaker.save(
data_file_originator.save_to_memento()
)
self.data_originator.restore_from_memento(
self.data_caretaker.restore()
)
self._data = self.data_originator._data.copy()
def register_site_levels(self, sitelevels):
'''
Method to store the unique sitelevel in the
facade class
'''
try:
assert isinstance(sitelevels, list)
except Exception as e:
print(str(e))
raise TypeError('Site levels input is not a list')
sitelevels.sort()
self._valueregister['sitelevels'] = sitelevels
def create_log_record(self, tablename):
'''
Method to initialize a logger; appends the file the log
saves to with relavent information regarding what table
and type of information is being recorded
'''
try:
globalid = self._inputs['metacheck'].lnedentry['globalid']
filename = os.path.split(
self._inputs[
'fileoptions'].filename)[1]
dt = (str(
tm.datetime.now()).split()[0]).replace("-", "_")
except Exception as e:
print(str(e))
raise AttributeError(
'Global ID and data file not set')
self._tablelog[tablename] =(
log.configure_logger('tableformat', os.path.join(
rootpath, 'logs', '{}_{}_{}_{}.log'.format(
globalid, tablename,filename,dt))))
def make_table(self, inputname):
'''
Method to take user inputs and create dataframes
that contain informatoin that will be pushed into
the database. The formating of the tables is handled by
class_tablebuilder.py module.
Additionally logging of table specific informatoin
is initiated here.
'''
uniqueinput = self._inputs[inputname]
print('uqinput facade:', uniqueinput)
tablename = self._inputs[inputname].tablename
print('tbl name facade: ', tablename)
globalid = self._inputs['metacheck'].lnedentry['globalid']
print('globalid facade: ', globalid)
sitecol = self._inputs['siteinfo'].lnedentry['study_site_key']
uqsitelevels = self._valueregister['sitelevels']
director = Table_Builder_Director()
builder = self._dbtabledict[tablename]
director.set_user_input(uniqueinput)
director.set_globalid(globalid)
director.set_builder(builder)
if tablename != 'project_table':
director.set_data(self._data)
else:
metaverify = MetaVerifier(self._inputs['metacheck'])
metadata = metaverify._meta
director.set_data(metadata[metadata['global_id'] == globalid].copy())
director.set_sitelevels(uqsitelevels)
director.set_siteid(sitecol)
return director.get_database_table()
def push_merged_data(self):
'''
Method in facade class to check if all data tables
have been completed by the user (although
site table can be empty if records are already in the
database).
'''
# Tables created from use input
study_site_table_df = self.push_tables['study_site_table']
project_table_df = self.push_tables['project_table']
taxa_table_df = self.push_tables['taxa_table']
time_table_df = self.push_tables['timetable']
print('facade time table: ', time_table_df)
print('facade time table col: ', time_table_df.columns)
observation_table_df = concat(
[time_table_df,self.push_tables[
self._inputs['rawinfo'].tablename]], axis=1)
covariate_table_df = self.push_tables['covariates']
site_levels = self._valueregister['sitelevels']
print('facade site levels: ', site_levels)
site_location = self._valueregister['study_site_key']
print('facade site label: ', site_location)
lter = self._valueregister['lterid']
# -------------------------------------- #
# --- Pushing study site table data --- #
# -------------------------------------- #
try:
assert (study_site_table_df is not None) is True
assert (project_table_df is not None) is True
assert (taxa_table_df is not None) is True
assert (time_table_df is not None) is True
assert (observation_table_df is not None) is True
except Exception as e:
print(str(e))
raise ValueError('Not all tables have been made')
if study_site_table_df.loc[0, 'study_site_key'] != 'NULL':
if self.sitepushed is None:
study_site_table_numeric_columns = [
'lat_study_site', 'lng_study_site'
]
# convert datatype to string/object
study_site_table_df[
study_site_table_df.columns.difference(study_site_table_numeric_columns)] = study_site_table_df[
study_site_table_df.columns.difference(study_site_table_numeric_columns)].applymap(str)
# Strip strings of leading and trailing whitespace
study_site_table_df[
study_site_table_df.columns.difference(study_site_table_numeric_columns)] = study_site_table_df[
study_site_table_df.columns.difference(study_site_table_numeric_columns)].applymap(
lambda x: x.strip())
try:
study_site_table_df.to_sql(
'study_site_table',
orm.conn, if_exists='append', index=False)
self.sitepushed = True
print('PUSHED STUDY')
except Exception as e:
print(str(e))
self._tablelog['study_site_table'].debug(str(e))
raise ValueError(
'Could not push study site table data: ' + str(e)
)
else:
pass
else:
pass
# -------------------------------------- #
# --- Pushing project table data --- #
# -------------------------------------- #
if self.mainpushed is None:
try:
project_table_numeric_columns = [
'studystartyr', 'studyendyr',
'spatial_replication_level_1_extent',
'spatial_replication_level_1_number_of_unique_reps',
'spatial_replication_level_2_extent',
'spatial_replication_level_2_number_of_unique_reps',
'spatial_replication_level_3_extent',
'spatial_replication_level_3_number_of_unique_reps',
'spatial_replication_level_4_extent',
'spatial_replication_level_4_number_of_unique_reps',
'spatial_replication_level_5_extent',
'spatial_replication_level_5_number_of_unique_reps',
]
# Converting data types
project_table_df.loc[
:, project_table_df.columns.difference(project_table_numeric_columns)] = project_table_df.loc[
:, project_table_df.columns.difference(project_table_numeric_columns)].applymap(str).values
# Striping strings
project_table_df.loc[
:, project_table_df.columns.difference(project_table_numeric_columns)] = project_table_df.loc[
:, project_table_df.columns.difference(project_table_numeric_columns)].applymap(
lambda x: x.strip()).values
project_table_df.loc[:, project_table_numeric_columns] = project_table_df.loc[:, project_table_numeric_columns].apply(
to_numeric, errors='ignore'
)
project_table_df['lter_project_fkey'] = lter
project_table_df.to_sql(
'project_table', orm.conn,
if_exists='append', index=False
)
self.mainpushed = True
print('PUSHED PROJECT')
except Exception as e:
print(str(e))
#self._tablelog['project_table'].debug(str(e))
raise ValueError(
'Could not push project table data: ' + str(e)
)
else:
pass
# -------------------------------------- #
# --- Pushing site in project table data --- #
# -------------------------------------- #
if self.siteinproject is None:
pass
else:
pass
merge_object = mrg.MergeToUpload()
site_in_project_key_df = merge_object.site_in_proj_key_df(
studysitetabledf=study_site_table_df,
projecttabledf=project_table_df,
observationtabledf=time_table_df,
lterlocation=lter,
studysitelabel=site_location,
studysitelevels=site_levels
)
merge_object.merge_for_taxa_table_upload(
formated_taxa_table=taxa_table_df,
siteinprojkeydf=site_in_project_key_df,
sitelabel=site_location
)
taxa_column_in_data = [
x[1] for x in
list(self._inputs['taxainfo'].lnedentry.items())
]
taxa_column_in_push_table = [
x[0] for x in
list(self._inputs['taxainfo'].lnedentry.items())
]
print('past taxa')
merge_object.merge_for_datatype_table_upload(
raw_dataframe=time_table_df,
formated_dataframe=observation_table_df,
formated_dataframe_name=(
'{}'.format(
re.sub(
'_table', '', self._inputs['rawinfo'].tablename))
),
covariate_dataframe=covariate_table_df,
siteinprojkeydf=site_in_project_key_df,
raw_data_taxa_columns=taxa_column_in_data,
uploaded_taxa_columns=taxa_column_in_push_table
)
obs_columns_in_data = [
x[1] for x in
list(self._inputs['rawinfo'].lnedentry.items())
]
obs_columns_in_push_table = [
x[0] for x in
list(self._inputs['rawinfo'].lnedentry.items())
]
merge_object.update_project_table(
spatial_rep_columns_from_og_df=obs_columns_in_data,
spatial_rep_columns_from_formated_df=obs_columns_in_push_table
)
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.storage import (
object_access_control_pb2,
)
from google3.cloud.graphite.mmv2.services.google.storage import (
object_access_control_pb2_grpc,
)
from typing import List
class ObjectAccessControl(object):
def __init__(
self,
project: str = None,
bucket: str = None,
domain: str = None,
email: str = None,
entity: str = None,
entity_id: str = None,
project_team: dict = None,
role: str = None,
id: str = None,
object: str = None,
generation: int = None,
service_account_file: str = "",
):
channel.initialize()
self.project = project
self.bucket = bucket
self.entity = entity
self.role = role
self.object = object
self.service_account_file = service_account_file
def apply(self):
stub = object_access_control_pb2_grpc.StorageObjectAccessControlServiceStub(
channel.Channel()
)
request = object_access_control_pb2.ApplyStorageObjectAccessControlRequest()
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.bucket):
request.resource.bucket = Primitive.to_proto(self.bucket)
if Primitive.to_proto(self.entity):
request.resource.entity = Primitive.to_proto(self.entity)
if ObjectAccessControlRoleEnum.to_proto(self.role):
request.resource.role = ObjectAccessControlRoleEnum.to_proto(self.role)
if Primitive.to_proto(self.object):
request.resource.object = Primitive.to_proto(self.object)
request.service_account_file = self.service_account_file
response = stub.ApplyStorageObjectAccessControl(request)
self.project = Primitive.from_proto(response.project)
self.bucket = Primitive.from_proto(response.bucket)
self.domain = Primitive.from_proto(response.domain)
self.email = Primitive.from_proto(response.email)
self.entity = Primitive.from_proto(response.entity)
self.entity_id = Primitive.from_proto(response.entity_id)
self.project_team = ObjectAccessControlProjectTeam.from_proto(
response.project_team
)
self.role = ObjectAccessControlRoleEnum.from_proto(response.role)
self.id = Primitive.from_proto(response.id)
self.object = Primitive.from_proto(response.object)
self.generation = Primitive.from_proto(response.generation)
def delete(self):
stub = object_access_control_pb2_grpc.StorageObjectAccessControlServiceStub(
channel.Channel()
)
request = object_access_control_pb2.DeleteStorageObjectAccessControlRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.bucket):
request.resource.bucket = Primitive.to_proto(self.bucket)
if Primitive.to_proto(self.entity):
request.resource.entity = Primitive.to_proto(self.entity)
if ObjectAccessControlRoleEnum.to_proto(self.role):
request.resource.role = ObjectAccessControlRoleEnum.to_proto(self.role)
if Primitive.to_proto(self.object):
request.resource.object = Primitive.to_proto(self.object)
response = stub.DeleteStorageObjectAccessControl(request)
@classmethod
def list(self, project, bucket, object, service_account_file=""):
stub = object_access_control_pb2_grpc.StorageObjectAccessControlServiceStub(
channel.Channel()
)
request = object_access_control_pb2.ListStorageObjectAccessControlRequest()
request.service_account_file = service_account_file
request.Project = project
request.Bucket = bucket
request.Object = object
return stub.ListStorageObjectAccessControl(request).items
def to_proto(self):
resource = object_access_control_pb2.StorageObjectAccessControl()
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.bucket):
resource.bucket = Primitive.to_proto(self.bucket)
if Primitive.to_proto(self.entity):
resource.entity = Primitive.to_proto(self.entity)
if ObjectAccessControlRoleEnum.to_proto(self.role):
resource.role = ObjectAccessControlRoleEnum.to_proto(self.role)
if Primitive.to_proto(self.object):
resource.object = Primitive.to_proto(self.object)
return resource
class ObjectAccessControlProjectTeam(object):
def __init__(self, project_number: str = None, team: str = None):
self.project_number = project_number
self.team = team
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = object_access_control_pb2.StorageObjectAccessControlProjectTeam()
if Primitive.to_proto(resource.project_number):
res.project_number = Primitive.to_proto(resource.project_number)
if ObjectAccessControlProjectTeamTeamEnum.to_proto(resource.team):
res.team = ObjectAccessControlProjectTeamTeamEnum.to_proto(resource.team)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return ObjectAccessControlProjectTeam(
project_number=Primitive.from_proto(resource.project_number),
team=ObjectAccessControlProjectTeamTeamEnum.from_proto(resource.team),
)
class ObjectAccessControlProjectTeamArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [ObjectAccessControlProjectTeam.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [ObjectAccessControlProjectTeam.from_proto(i) for i in resources]
class ObjectAccessControlProjectTeamTeamEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return object_access_control_pb2.StorageObjectAccessControlProjectTeamTeamEnum.Value(
"StorageObjectAccessControlProjectTeamTeamEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return object_access_control_pb2.StorageObjectAccessControlProjectTeamTeamEnum.Name(
resource
)[
len("StorageObjectAccessControlProjectTeamTeamEnum") :
]
class ObjectAccessControlRoleEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return object_access_control_pb2.StorageObjectAccessControlRoleEnum.Value(
"StorageObjectAccessControlRoleEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return object_access_control_pb2.StorageObjectAccessControlRoleEnum.Name(
resource
)[len("StorageObjectAccessControlRoleEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
import os
import glob
import os.path
import shutil
import subprocess
import time
import unittest
import tempfile
import re
def my_check_output(*popenargs, **kwargs):
"""
If we had python 2.7, we should simply use subprocess.check_output.
This is a stop-gap solution for python 2.6
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stderr=subprocess.PIPE, stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise Exception("Exit code is not 0. It is %d. Command: %s" %
(retcode, cmd))
return output
def run_err_null(cmd):
return os.system(cmd + " 2>/dev/null ")
class LDBTestCase(unittest.TestCase):
def setUp(self):
self.TMP_DIR = tempfile.mkdtemp(prefix="ldb_test_")
self.DB_NAME = "testdb"
def tearDown(self):
assert(self.TMP_DIR.strip() != "/"
and self.TMP_DIR.strip() != "/tmp"
and self.TMP_DIR.strip() != "/tmp/") #Just some paranoia
shutil.rmtree(self.TMP_DIR)
def dbParam(self, dbName):
return "--db=%s" % os.path.join(self.TMP_DIR, dbName)
def assertRunOKFull(self, params, expectedOutput, unexpected=False,
isPattern=False):
"""
All command-line params must be specified.
Allows full flexibility in testing; for example: missing db param.
"""
output = my_check_output("./ldb %s |grep -v \"Created bg thread\"" %
params, shell=True)
if not unexpected:
if isPattern:
self.assertNotEqual(expectedOutput.search(output.strip()),
None)
else:
self.assertEqual(output.strip(), expectedOutput.strip())
else:
if isPattern:
self.assertEqual(expectedOutput.search(output.strip()), None)
else:
self.assertNotEqual(output.strip(), expectedOutput.strip())
def assertRunFAILFull(self, params):
"""
All command-line params must be specified.
Allows full flexibility in testing; for example: missing db param.
"""
try:
my_check_output("./ldb %s >/dev/null 2>&1 |grep -v \"Created bg \
thread\"" % params, shell=True)
except Exception, e:
return
self.fail(
"Exception should have been raised for command with params: %s" %
params)
def assertRunOK(self, params, expectedOutput, unexpected=False):
"""
Uses the default test db.
"""
self.assertRunOKFull("%s %s" % (self.dbParam(self.DB_NAME), params),
expectedOutput, unexpected)
def assertRunFAIL(self, params):
"""
Uses the default test db.
"""
self.assertRunFAILFull("%s %s" % (self.dbParam(self.DB_NAME), params))
def testSimpleStringPutGet(self):
print "Running testSimpleStringPutGet..."
self.assertRunFAIL("put x1 y1")
self.assertRunOK("put --create_if_missing x1 y1", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunFAIL("get x2")
self.assertRunOK("put x2 y2", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunOK("get x2", "y2")
self.assertRunFAIL("get x3")
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2")
self.assertRunOK("put x3 y3", "OK")
self.assertRunOK("scan --from=x1 --to=z", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --to=x2", "x1 : y1")
self.assertRunOK("scan --from=x1 --to=z --max_keys=1", "x1 : y1")
self.assertRunOK("scan --from=x1 --to=z --max_keys=2",
"x1 : y1\nx2 : y2")
self.assertRunOK("scan --from=x1 --to=z --max_keys=3",
"x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x1 --to=z --max_keys=4",
"x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("scan --from=x1 --to=x2", "x1 : y1")
self.assertRunOK("scan --from=x2 --to=x4", "x2 : y2\nx3 : y3")
self.assertRunFAIL("scan --from=x4 --to=z") # No results => FAIL
self.assertRunFAIL("scan --from=x1 --to=z --max_keys=foo")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3")
self.assertRunOK("delete x1", "OK")
self.assertRunOK("scan", "x2 : y2\nx3 : y3")
self.assertRunOK("delete NonExistentKey", "OK")
# It is weird that GET and SCAN raise exception for
# non-existent key, while delete does not
self.assertRunOK("checkconsistency", "OK")
def dumpDb(self, params, dumpFile):
return 0 == run_err_null("./ldb dump %s > %s" % (params, dumpFile))
def loadDb(self, params, dumpFile):
return 0 == run_err_null("cat %s | ./ldb load %s" % (dumpFile, params))
def testStringBatchPut(self):
print "Running testStringBatchPut..."
self.assertRunOK("batchput x1 y1 --create_if_missing", "OK")
self.assertRunOK("scan", "x1 : y1")
self.assertRunOK("batchput x2 y2 x3 y3 \"x4 abc\" \"y4 xyz\"", "OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 abc : y4 xyz")
self.assertRunFAIL("batchput")
self.assertRunFAIL("batchput k1")
self.assertRunFAIL("batchput k1 v1 k2")
def testCountDelimDump(self):
print "Running testCountDelimDump..."
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
def testCountDelimIDump(self):
print "Running testCountDelimIDump..."
self.assertRunOK("batchput x.1 x1 --create_if_missing", "OK")
self.assertRunOK("batchput y.abc abc y.2 2 z.13c pqr", "OK")
self.assertRunOK("dump --count_delim", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("dump --count_delim=\".\"", "x => count:1\tsize:5\ny => count:2\tsize:12\nz => count:1\tsize:8")
self.assertRunOK("batchput x,2 x2 x,abc xabc", "OK")
self.assertRunOK("dump --count_delim=\",\"", "x => count:2\tsize:14\nx.1 => count:1\tsize:5\ny.2 => count:1\tsize:4\ny.abc => count:1\tsize:8\nz.13c => count:1\tsize:8")
def testInvalidCmdLines(self):
print "Running testInvalidCmdLines..."
# db not specified
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
# No param called he
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
# max_keys is not applicable for put
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
# hex has invalid boolean value
def testHexPutGet(self):
print "Running testHexPutGet..."
self.assertRunOK("put a1 b1 --create_if_missing", "OK")
self.assertRunOK("scan", "a1 : b1")
self.assertRunOK("scan --hex", "0x6131 : 0x6231")
self.assertRunFAIL("put --hex 6132 6232")
self.assertRunOK("put --hex 0x6132 0x6232", "OK")
self.assertRunOK("scan --hex", "0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan", "a1 : b1\na2 : b2")
self.assertRunOK("get a1", "b1")
self.assertRunOK("get --hex 0x6131", "0x6231")
self.assertRunOK("get a2", "b2")
self.assertRunOK("get --hex 0x6132", "0x6232")
self.assertRunOK("get --key_hex 0x6132", "b2")
self.assertRunOK("get --key_hex --value_hex 0x6132", "0x6232")
self.assertRunOK("get --value_hex a2", "0x6232")
self.assertRunOK("scan --key_hex --value_hex",
"0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan --hex --from=0x6131 --to=0x6133",
"0x6131 : 0x6231\n0x6132 : 0x6232")
self.assertRunOK("scan --hex --from=0x6131 --to=0x6132",
"0x6131 : 0x6231")
self.assertRunOK("scan --key_hex", "0x6131 : b1\n0x6132 : b2")
self.assertRunOK("scan --value_hex", "a1 : 0x6231\na2 : 0x6232")
self.assertRunOK("batchput --hex 0x6133 0x6233 0x6134 0x6234", "OK")
self.assertRunOK("scan", "a1 : b1\na2 : b2\na3 : b3\na4 : b4")
self.assertRunOK("delete --hex 0x6133", "OK")
self.assertRunOK("scan", "a1 : b1\na2 : b2\na4 : b4")
self.assertRunOK("checkconsistency", "OK")
def testTtlPutGet(self):
print "Running testTtlPutGet..."
self.assertRunOK("put a1 b1 --ttl --create_if_missing", "OK")
self.assertRunOK("scan --hex", "0x6131 : 0x6231", True)
self.assertRunOK("dump --ttl ", "a1 ==> b1", True)
self.assertRunOK("dump --hex --ttl ",
"0x6131 ==> 0x6231\nKeys in range: 1")
self.assertRunOK("scan --hex --ttl", "0x6131 : 0x6231")
self.assertRunOK("get --value_hex a1", "0x6231", True)
self.assertRunOK("get --ttl a1", "b1")
self.assertRunOK("put a3 b3 --create_if_missing", "OK")
# fails because timstamp's length is greater than value's
self.assertRunFAIL("get --ttl a3")
self.assertRunOK("checkconsistency", "OK")
def testInvalidCmdLines(self):
print "Running testInvalidCmdLines..."
# db not specified
self.assertRunFAILFull("put 0x6133 0x6233 --hex --create_if_missing")
# No param called he
self.assertRunFAIL("put 0x6133 0x6233 --he --create_if_missing")
# max_keys is not applicable for put
self.assertRunFAIL("put 0x6133 0x6233 --max_keys=1 --create_if_missing")
# hex has invalid boolean value
self.assertRunFAIL("put 0x6133 0x6233 --hex=Boo --create_if_missing")
def testDumpLoad(self):
print "Running testDumpLoad..."
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
"OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
# Dump and load without any additional params specified
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump1")
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load in hex
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump2")
self.assertTrue(self.dumpDb("--db=%s --hex" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --hex --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump only a portion of the key range
dumpFilePath = os.path.join(self.TMP_DIR, "dump3")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump3")
self.assertTrue(self.dumpDb(
"--db=%s --from=x1 --to=x3" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "x1 : y1\nx2 : y2")
# Dump upto max_keys rows
dumpFilePath = os.path.join(self.TMP_DIR, "dump4")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump4")
self.assertTrue(self.dumpDb(
"--db=%s --max_keys=3" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3")
# Load into an existing db, create_if_missing is not specified
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb("--db=%s" % loadedDbPath, dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load with WAL disabled
dumpFilePath = os.path.join(self.TMP_DIR, "dump5")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump5")
self.assertTrue(self.dumpDb("--db=%s" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --disable_wal --create_if_missing" % loadedDbPath,
dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump and load with lots of extra params specified
extraParams = " ".join(["--bloom_bits=14", "--block_size=1024",
"--auto_compaction=true",
"--write_buffer_size=4194304",
"--file_size=2097152"])
dumpFilePath = os.path.join(self.TMP_DIR, "dump6")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump6")
self.assertTrue(self.dumpDb(
"--db=%s %s" % (origDbPath, extraParams), dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s %s --create_if_missing" % (loadedDbPath, extraParams),
dumpFilePath))
self.assertRunOKFull("scan --db=%s" % loadedDbPath,
"x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
# Dump with count_only
dumpFilePath = os.path.join(self.TMP_DIR, "dump7")
loadedDbPath = os.path.join(self.TMP_DIR, "loaded_from_dump7")
self.assertTrue(self.dumpDb(
"--db=%s --count_only" % origDbPath, dumpFilePath))
self.assertTrue(self.loadDb(
"--db=%s --create_if_missing" % loadedDbPath, dumpFilePath))
# DB should have atleast one value for scan to work
self.assertRunOKFull("put --db=%s k1 v1" % loadedDbPath, "OK")
self.assertRunOKFull("scan --db=%s" % loadedDbPath, "k1 : v1")
# Dump command fails because of typo in params
dumpFilePath = os.path.join(self.TMP_DIR, "dump8")
self.assertFalse(self.dumpDb(
"--db=%s --create_if_missing" % origDbPath, dumpFilePath))
def testMiscAdminTask(self):
print "Running testMiscAdminTask..."
# These tests need to be improved; for example with asserts about
# whether compaction or level reduction actually took place.
self.assertRunOK("batchput --create_if_missing x1 y1 x2 y2 x3 y3 x4 y4",
"OK")
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
origDbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb reduce_levels --db=%s --new_levels=2" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb reduce_levels --db=%s --new_levels=3" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s --from=x1 --to=x3" % origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
self.assertTrue(0 == run_err_null(
"./ldb compact --db=%s --hex --from=0x6131 --to=0x6134"
% origDbPath))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
#TODO(dilip): Not sure what should be passed to WAL.Currently corrupted.
self.assertTrue(0 == run_err_null(
"./ldb dump_wal --db=%s --walfile=%s --header" % (
origDbPath, os.path.join(origDbPath, "LOG"))))
self.assertRunOK("scan", "x1 : y1\nx2 : y2\nx3 : y3\nx4 : y4")
def testCheckConsistency(self):
print "Running testCheckConsistency..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
self.assertRunOK("put x2 y2", "OK")
self.assertRunOK("get x1", "y1")
self.assertRunOK("checkconsistency", "OK")
sstFilePath = my_check_output("ls %s" % os.path.join(dbPath, "*.sst"),
shell=True)
# Modify the file
my_check_output("echo 'evil' > %s" % sstFilePath, shell=True)
self.assertRunFAIL("checkconsistency")
# Delete the file
my_check_output("rm -f %s" % sstFilePath, shell=True)
self.assertRunFAIL("checkconsistency")
def dumpLiveFiles(self, params, dumpFile):
return 0 == run_err_null("./ldb dump_live_files %s > %s" % (
params, dumpFile))
def testDumpLiveFiles(self):
print "Running testDumpLiveFiles..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
self.assertRunOK("put x2 y2", "OK")
dumpFilePath = os.path.join(self.TMP_DIR, "dump1")
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
self.assertRunOK("delete x1", "OK")
self.assertRunOK("put x3 y3", "OK")
dumpFilePath = os.path.join(self.TMP_DIR, "dump2")
self.assertTrue(self.dumpLiveFiles("--db=%s" % dbPath, dumpFilePath))
def getManifests(self, directory):
return glob.glob(directory + "/MANIFEST-*")
def copyManifests(self, src, dest):
return 0 == run_err_null("cp " + src + " " + dest)
def testManifestDump(self):
print "Running testManifestDump..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put 1 1 --create_if_missing", "OK")
self.assertRunOK("put 2 2", "OK")
self.assertRunOK("put 3 3", "OK")
# Pattern to expect from manifest_dump.
num = "[0-9]+"
st = ".*"
subpat = st + " @ " + num + ": " + num
regex = num + ":" + num + "\[" + subpat + ".." + subpat + "\]"
expected_pattern = re.compile(regex)
cmd = "manifest_dump --db=%s"
manifest_files = self.getManifests(dbPath)
self.assertTrue(len(manifest_files) == 1)
# Test with the default manifest file in dbPath.
self.assertRunOKFull(cmd % dbPath, expected_pattern,
unexpected=False, isPattern=True)
self.copyManifests(manifest_files[0], manifest_files[0] + "1")
manifest_files = self.getManifests(dbPath)
self.assertTrue(len(manifest_files) == 2)
# Test with multiple manifest files in dbPath.
self.assertRunFAILFull(cmd % dbPath)
# Running it with the copy we just created should pass.
self.assertRunOKFull((cmd + " --path=%s")
% (dbPath, manifest_files[1]),
expected_pattern, unexpected=False,
isPattern=True)
def testListColumnFamilies(self):
print "Running testListColumnFamilies..."
dbPath = os.path.join(self.TMP_DIR, self.DB_NAME)
self.assertRunOK("put x1 y1 --create_if_missing", "OK")
cmd = "list_column_families %s | grep -v \"Column families\""
# Test on valid dbPath.
self.assertRunOKFull(cmd % dbPath, "{default}")
# Test on empty path.
self.assertRunFAILFull(cmd % "")
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
"""
The Requester module is in charge of simplifying HTTP requests and
automatically log HTTP transactions by calling the DB module.
"""
import sys
import httplib
import logging
import urllib
import urllib2
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import RequesterInterface
from framework.http import transaction
from framework.lib.general import *
# Intercept raw request trick from:
# http://stackoverflow.com/questions/6085709/get-headers-sent-in-urllib2-http-request
class MyHTTPConnection(httplib.HTTPConnection):
def send(self, s):
global raw_request
# Saving to global variable for Requester class to see.
raw_request.append(s)
httplib.HTTPConnection.send(self, s)
class MyHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
try:
return self.do_open(MyHTTPConnection, req)
except KeyboardInterrupt:
raise KeyboardInterrupt # Not handled here.
except Exception:
# Can't have OWTF crash due to a library exception -i.e. raise BadStatusLine(line)-
return ''
class MyHTTPSConnection(httplib.HTTPSConnection):
def send(self, s):
global raw_request
# Saving to global variable for Requester class to see.
raw_request.append(s)
httplib.HTTPSConnection.send(self, s)
class MyHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
try:
return self.do_open(MyHTTPSConnection, req)
except KeyboardInterrupt:
raise KeyboardInterrupt # Not handled here.
except Exception:
# Can't have OWTF crash due to a library exception -i.e. raise BadStatusLine(line)-.
return ''
# SmartRedirectHandler is courtesy of:
# http://www.diveintopython.net/http_web_services/redirects.html
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
class Requester(BaseComponent, RequesterInterface):
COMPONENT_NAME = "requester"
def __init__(self, proxy):
self.register_in_service_locator()
self.db_config = self.get_component("db_config")
self.target = self.get_component("target")
self.transaction = self.get_component("transaction")
self.url_manager = self.get_component("url_manager")
self.error_handler = self.get_component("error_handler")
self.plugin_handler = self.get_component("plugin_handler")
self.timer = self.get_component("timer")
self.http_transaction = None
self.Headers = {'User-Agent': self.db_config.Get('USER_AGENT')}
self.RequestCountRefused = 0
self.RequestCountTotal = 0
self.LogTransactions = False
self.Proxy = proxy
if proxy is None:
logging.debug(
"WARNING: No outbound proxy selected. It is recommended to "
"use an outbound proxy for tactical fuzzing later")
# FIXME: "Smart" redirect handler not really working.
self.Opener = urllib2.build_opener(MyHTTPHandler, MyHTTPSHandler, SmartRedirectHandler)
else: # All requests must use the outbound proxy.
logging.debug("Setting up proxy(inbound) for OWTF requests..")
ip, port = proxy
proxy_conf = {'http': 'http://%s:%s' % (ip, port), 'https': 'http://%s:%s' % (ip, port)}
proxy_handler = urllib2.ProxyHandler(proxy_conf)
# FIXME: Works except no raw request on https.
self.Opener = urllib2.build_opener(proxy_handler, MyHTTPHandler, MyHTTPSHandler, SmartRedirectHandler)
urllib2.install_opener(self.Opener)
def log_transactions(self, log_transactions=True):
backup = self.LogTransactions
self.LogTransactions = log_transactions
return backup
def NeedToAskBeforeRequest(self):
return not self.plugin_handler.NormalRequestsAllowed()
def IsTransactionAlreadyAdded(self, url):
return self.transaction.IsTransactionAlreadyAdded({'url': url.strip()})
def is_request_possible(self):
return self.plugin_handler.RequestsPossible()
def ProxyCheck(self):
# Verify proxy works! www.google.com might not work in a restricted network, try target URL :)
if self.Proxy is not None and self.is_request_possible():
url = self.db_config.Get('PROXY_CHECK_URL')
refused_before = self.RequestCountRefused
cprint("Proxy Check: Avoid logging request again if already in DB..")
log_setting_backup = False
if self.IsTransactionAlreadyAdded(url):
log_setting_backup = self.log_transactions(False)
if log_setting_backup:
self.log_transactions(log_setting_backup)
refused_after = self.RequestCountRefused
if refused_before < refused_after: # Proxy is refusing connections.
return [False, "ERROR: Proxy Check error: The proxy is not listening or is refusing connections"]
else:
return [True, "Proxy Check OK: The proxy appears to be working"]
return [True, "Proxy Check OK: No proxy is setup or no HTTP requests will be made"]
def GetHeaders(self):
return self.Headers
def SetHeaders(self, headers):
self.Headers = headers
def SetHeader(self, header, value):
self.Headers[header] = value
def StringToDict(self, string):
dict = defaultdict(list)
count = 0
prev_item = ''
for item in string.strip().split('='):
if count % 2 == 1: # Key.
dict[prev_item] = item
else: # Value.
dict[item] = ''
prev_item = item
count += 1
return dict
def DerivePOSTToStr(self, post=None):
post = self.DerivePOST(post)
if post is None:
return ''
return post
def DerivePOST(self, post=None):
if '' == post:
post = None
if post is not None:
if isinstance(post, str) or isinstance(post, unicode):
# Must be a dictionary prior to urlencode.
post = self.StringToDict(post)
post = urllib.urlencode(post)
return post
def perform_request(self, request):
return urllib2.urlopen(request)
def set_succesful_transaction(self, raw_request, response):
return self.http_transaction.SetTransaction(True, raw_request[0], response)
def log_transaction(self):
self.transaction.LogTransaction(self.http_transaction)
def Request(self, url, method=None, post=None):
# kludge: necessary to get around urllib2 limitations: Need this to get the exact request that was sent.
global raw_request
url = str(url)
raw_request = [] # Init Raw Request to blank list.
post = self.DerivePOST(post)
method = DeriveHTTPMethod(method, post)
url = url.strip() # Clean up URL.
request = urllib2.Request(url, post, self.Headers) # GET request.
if method is not None:
# kludge: necessary to do anything other that GET or POST with urllib2
request.get_method = lambda: method
# MUST create a new Transaction object each time so that lists of
# transactions can be created and process at plugin-level
# Pass the timer object to avoid instantiating each time.
self.http_transaction = transaction.HTTP_Transaction(self.timer)
self.http_transaction.Start(url, post, method, self.target.IsInScopeURL(url))
self.RequestCountTotal += 1
try:
response = self.perform_request(request)
self.set_succesful_transaction(raw_request, response)
except urllib2.HTTPError as Error: # page NOT found.
# Error is really a response for anything other than 200 OK in urllib2 :)
self.http_transaction.SetTransaction(False, raw_request[0], Error)
except urllib2.URLError as Error: # Connection refused?
err_message = self.ProcessHTTPErrorCode(Error, url)
self.http_transaction.SetError(err_message)
except IOError:
err_message = "ERROR: Requester Object -> Unknown HTTP Request error: %s\n%s" % (url, str(sys.exc_info()))
self.http_transaction.SetError(err_message)
if self.LogTransactions:
# Log transaction in DB for analysis later and return modified Transaction with ID.
self.log_transaction()
return self.http_transaction
def ProcessHTTPErrorCode(self, error, url):
message = ""
if str(error.reason).startswith("[Errno 111]"):
message = "ERROR: The connection was refused!: %s" % str(error)
self.RequestCountRefused += 1
elif str(error.reason).startswith("[Errno -2]"):
self.error_handler.FrameworkAbort("ERROR: cannot resolve hostname!: %s" % str(error))
else:
message = "ERROR: The connection was not refused, unknown error!"
log = logging.getLogger('general')
log.info(message)
return "%s (Requester Object): %s\n%s" % (message, url, str(sys.exc_info()))
def GET(self, url):
return self.Request(url)
def POST(self, url, data):
return self.Request(url, 'POST', data)
def TRACE(self, url):
return self.Request(url, 'TRACE', None)
def OPTIONS(self, url):
return self.Request(url, 'OPTIONS', None)
def HEAD(self, url):
return self.Request(url, 'HEAD', None)
def DEBUG(self, url):
self.BackupHeaders()
self.Headers['Command'] = 'start-debug'
result = self.Request(url, 'DEBUG', None)
self.RestoreHeaders()
return result
def PUT(self, url, content_type='text/plain'):
self.BackupHeaders()
self.Headers['Content-Type'] = content_type
self.Headers['Content-Length'] = "0"
result = self.Request(url, 'PUT', None)
self.RestoreHeaders()
return result
def BackupHeaders(self):
self.HeadersBackup = dict.copy(self.Headers)
def RestoreHeaders(self):
self.Headers = dict.copy(self.HeadersBackup)
def GetTransaction(self, use_cache, url, method=None, data=None):
criteria = {'url': url.strip()}
if method is not None:
criteria['method'] = method
# Must clean-up data to ensure match is found.
if data is not None:
criteria['data'] = self.DerivePOSTToStr(data)
# Visit URL if not already visited.
if (not use_cache or not self.transaction.IsTransactionAlreadyAdded(criteria)):
if method in ['', 'GET', 'POST', 'HEAD', 'TRACE', 'OPTIONS']:
return self.Request(url, method, data)
elif method == 'DEBUG':
return self.DEBUG(url)
elif method == 'PUT':
return self.PUT(url, data)
else: # Retrieve from DB = faster.
# Important since there is no transaction ID with transactions objects created by Requester.
return self.transaction.GetFirst(criteria)
def GetTransactions(self, use_cache, url_list, method=None, data=None, unique=True):
transactions = []
if unique:
url_list = set(url_list)
for url in url_list:
url = url.strip() # Clean up the URL first.
if not url:
continue # Skip blank lines.
if not self.url_manager.IsURL(url):
self.error_handler.Add("Minor issue: %s is not a valid URL and has been ignored, processing continues" %
str(url))
continue # Skip garbage URLs.
transaction = self.GetTransaction(use_cache, url, method=method, data=data)
if transaction is not None:
transactions.append(transaction)
return transactions
|
|
# -*- encoding: utf-8 -*-
"""
Sends slack notifications for alarms events
"""
import datetime as dt
import json
import os
import attr
import boto3
import requests
from wellcome_aws_utils.lambda_utils import log_on_error
from cloudwatch_alarms import (
build_cloudwatch_url,
datetime_to_cloudwatch_ts,
ThresholdMessage,
)
from platform_alarms import (
get_human_message,
guess_cloudwatch_log_group,
guess_cloudwatch_search_terms,
is_critical_error,
simplify_message,
)
@attr.s
class Interval:
start = attr.ib()
end = attr.ib()
class MessageHasNoDateError(Exception):
pass
@attr.s
class Alarm:
message = attr.ib(converter=json.loads)
@property
def name(self):
return self.message["AlarmName"]
@property
def state_reason(self):
return self.message["NewStateReason"]
@property
def should_be_sent_to_main_channel(self):
return self.name not in [
"lambda-reporting_miro_transformer-errors",
"lambda-reporting_miro_inventory_transformer-errors",
"lambda-reporting_sierra_transformer-errors",
]
# Sometimes there's enough data in the alarm to make an educated guess
# about useful CloudWatch logs to check, so we include that in the alarm.
# The methods and properties below pull out the relevant info.
def cloudwatch_timeframe(self):
"""
Try to work out a likely timeframe for CloudWatch errors.
"""
threshold = ThresholdMessage.from_message(self.state_reason)
try:
return Interval(
start=threshold.date - dt.timedelta(seconds=300),
end=threshold.date + dt.timedelta(seconds=300),
)
except TypeError:
# Raised when threshold.date is None.
raise MessageHasNoDateError()
def cloudwatch_urls(self):
"""
Return some CloudWatch URLs that might be useful to check.
"""
try:
log_group_name = guess_cloudwatch_log_group(alarm_name=self.name)
timeframe = self.cloudwatch_timeframe()
return [
build_cloudwatch_url(
search_term=search_term,
log_group_name=log_group_name,
start_date=timeframe.start,
end_date=timeframe.end,
)
for search_term in guess_cloudwatch_search_terms(alarm_name=self.name)
]
except MessageHasNoDateError:
pass
except ValueError as err:
print(f"Error in cloudwatch_urls: {err}")
return []
def cloudwatch_messages(self):
"""
Try to find some CloudWatch messages that might be relevant.
"""
client = boto3.client("logs")
messages = []
try:
log_group_name = guess_cloudwatch_log_group(alarm_name=self.name)
# CloudWatch wants these parameters specified as seconds since
# 1 Jan 1970 00:00:00, so convert to that first.
timeframe = self.cloudwatch_timeframe()
startTime = datetime_to_cloudwatch_ts(timeframe.start)
endTime = datetime_to_cloudwatch_ts(timeframe.end)
# We only get the first page of results. If there's more than
# one page, we have so many errors that not getting them all
# in the Slack alarm is the least of our worries!
for term in guess_cloudwatch_search_terms(alarm_name=self.name):
resp = client.filter_log_events(
logGroupName=log_group_name,
startTime=startTime,
endTime=endTime,
filterPattern=term,
)
messages.extend([e["message"] for e in resp["events"]])
except MessageHasNoDateError:
pass
except Exception as err:
print(f"Error in cloudwatch_messages: {err!r}")
return messages
def to_bitly(sess, url, access_token):
"""
Try to shorten a URL with bit.ly. If it fails, just return the
original URL.
"""
resp = sess.get(
"https://api-ssl.bitly.com/v3/user/link_save",
params={"access_token": access_token, "longUrl": url},
)
try:
return resp.json()["data"]["link_save"]["link"]
except TypeError: # thrown if "data" = null
print(f"response from bit.ly: {resp.json()}")
return url
def prepare_slack_payload(alarm, bitly_access_token, sess=None):
if is_critical_error(alarm_name=alarm.name):
slack_data = {"username": "cloudwatch-alarm", "icon_emoji": ":rotating_light:"}
alarm_color = "danger"
else:
slack_data = {"username": "cloudwatch-warning", "icon_emoji": ":warning:"}
alarm_color = "warning"
slack_data["attachments"] = [
{
"color": alarm_color,
"fallback": alarm.name,
"title": alarm.name,
"fields": [
{
"value": get_human_message(
alarm_name=alarm.name, state_reason=alarm.state_reason
)
}
],
}
]
messages = alarm.cloudwatch_messages()
if messages:
cloudwatch_message_str = "\n".join(set([simplify_message(m) for m in messages]))
slack_data["attachments"][0]["fields"].append(
{"title": "CloudWatch messages", "value": cloudwatch_message_str}
)
cloudwatch_urls = alarm.cloudwatch_urls()
if cloudwatch_urls:
sess = sess or requests.Session()
cloudwatch_url_str = " / ".join(
[
to_bitly(sess=sess, url=url, access_token=bitly_access_token)
for url in cloudwatch_urls
]
)
slack_data["attachments"][0]["fields"].append({"value": cloudwatch_url_str})
return slack_data
@log_on_error
def main(event, _ctxt=None):
bitly_access_token = os.environ["BITLY_ACCESS_TOKEN"]
alarm = Alarm(event["Records"][0]["Sns"]["Message"])
if alarm.should_be_sent_to_main_channel:
webhook_url = os.environ["CRITICAL_SLACK_WEBHOOK"]
else:
webhook_url = os.environ["NONCRITICAL_SLACK_WEBHOOK"]
slack_data = prepare_slack_payload(alarm, bitly_access_token)
print("Sending message %s" % json.dumps(slack_data))
response = requests.post(
webhook_url,
data=json.dumps(slack_data),
headers={"Content-Type": "application/json"},
)
response.raise_for_status()
|
|
# stdlib
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Type
from typing import Union
# third party
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
# syft absolute
import syft as sy
# relative
from ......grid import GridURL
from ......logger import error
from ......logger import info
from .....common.message import ImmediateSyftMessageWithReply
from .....common.message import SignedImmediateSyftMessageWithReply
from ....domain.domain_interface import DomainInterface
from ....domain.enums import AssociationRequestResponses
from ...exceptions import AuthorizationError
from ...exceptions import MissingRequestKeyError
from ...node_service.vpn.vpn_messages import VPNStatusMessageWithReply
from ...node_table.association_request import AssociationRequest
from ..auth import service_auth
from ..node_service import ImmediateNodeServiceWithReply
from ..success_resp_message import SuccessResponseMessage
from .association_request_messages import DeleteAssociationRequestMessage
from .association_request_messages import GetAssociationRequestMessage
from .association_request_messages import GetAssociationRequestResponse
from .association_request_messages import GetAssociationRequestsMessage
from .association_request_messages import GetAssociationRequestsResponse
from .association_request_messages import ReceiveAssociationRequestMessage
from .association_request_messages import RespondAssociationRequestMessage
from .association_request_messages import SendAssociationRequestMessage
def get_vpn_status_metadata(node: DomainInterface) -> Dict[str, Any]:
vpn_status_msg = (
VPNStatusMessageWithReply()
.to(address=node.address, reply_to=node.address)
.sign(signing_key=node.signing_key)
)
vpn_status = node.recv_immediate_msg_with_reply(msg=vpn_status_msg)
vpn_status_message_contents = vpn_status.message
status = vpn_status_message_contents.payload.kwargs # type: ignore
network_vpn_ip = status["host"]["ip"]
node_name = status["host"]["hostname"]
metadata = {
"host_or_ip": str(network_vpn_ip),
"node_id": str(node.target_id.id.no_dash),
"node_name": str(node_name),
"type": f"{str(type(node).__name__).lower()}",
}
return metadata
def check_if_is_vpn(host_or_ip: str) -> bool:
VPN_IP_SUBNET = "100.64.0."
return host_or_ip.startswith(VPN_IP_SUBNET)
# domain gets this message from a user and will try to send to the network
def send_association_request_msg(
msg: SendAssociationRequestMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> SuccessResponseMessage:
# Check Key permissions
info(
f"Node {node} - send_association_request_msg: got SendAssociationRequestMessage. "
f"Info: {msg.source} - {msg.target}"
)
allowed = node.users.can_manage_infrastructure(verify_key=verify_key)
if allowed:
user = node.users.get_user(verify_key=verify_key)
info(
f"Node {node} - send_association_request_msg: {node} got user performing the action. User: {user}"
)
# Build an association request to send to the target
user_priv_key = SigningKey(
node.users.get_user(verify_key).private_key.encode(), encoder=HexEncoder # type: ignore
)
metadata = dict(msg.metadata)
# get domain metadata to send to the network
try:
# TODO: refactor to not stuff our vpn_metadata into the normal metadata
# because it gets blindly **splatted into the database
vpn_metadata = get_vpn_status_metadata(node=node)
metadata.update(vpn_metadata)
except Exception as e:
error(f"failed to get vpn status. {e}")
metadata["node_name"] = (
node.name if node.name else ""
) # tell the network what our name is
grid_url = GridURL.from_url(msg.target).with_path("/api/v1")
target_client = sy.connect(url=str(grid_url))
target_msg: SignedImmediateSyftMessageWithReply = (
ReceiveAssociationRequestMessage(
address=target_client.address,
reply_to=node.address,
metadata=metadata,
source=vpn_metadata["host_or_ip"],
target=msg.target,
).sign(signing_key=user_priv_key)
)
# Send the message to the target
info(
f"Node {node} - send_association_request_msg: sending ReceiveAssociationRequestMessage."
)
try:
# we need target
target_client.send_immediate_msg_with_reply(msg=target_msg)
except Exception as e:
error(f"Failed to send ReceiveAssociationRequestMessage. {e}")
info(
f"Node {node} - send_association_request_msg: received the answer from ReceiveAssociationRequestMessage."
)
# Create a new association request object
info(
f"Node {node} - send_association_request_msg: adding requests to the Database."
)
node.association_requests.create_association_request(
node_name=target_client.name, # type: ignore
node_address=target_client.target_id.id.no_dash, # type: ignore
status=AssociationRequestResponses.PENDING,
source=msg.source,
target=msg.target,
)
else: # If not authorized
raise AuthorizationError("You're not allowed to create an Association Request!")
info(f"Node: {node} received the answer from ReceiveAssociationRequestMessage.")
return SuccessResponseMessage(
address=msg.reply_to,
resp_msg="Association request sent!",
)
# network gets the above message first and then later the domain gets this message as well
def recv_association_request_msg(
msg: ReceiveAssociationRequestMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> SuccessResponseMessage:
_previous_request = node.association_requests.contain(
source=msg.source, target=msg.target
)
info(
f"Node {node} - recv_association_request_msg: prev request exists {not _previous_request}."
)
# Create a new Association Request if the handshake value doesn't exist in the database
if not _previous_request:
# this side happens on the network
info(
f"Node {node} - recv_association_request_msg: creating a new association request."
)
if node.settings.DOMAIN_ASSOCIATION_REQUESTS_AUTOMATICALLY_ACCEPTED:
status = AssociationRequestResponses.ACCEPT
else:
status = AssociationRequestResponses.PENDING
node.association_requests.create_association_request(
node_name=msg.metadata["node_name"],
node_address=msg.reply_to.target_id.id.no_dash,
status=status,
source=msg.source,
target=msg.target,
)
else:
# this side happens on the domain
info(
f"Node {node} - recv_association_request_msg: answering an existing association request."
)
node.association_requests.set(source=msg.source, target=msg.target, response=msg.response) # type: ignore
# get or create a new node and node_route which represents the opposing node which
# is supplied in the metadata
try:
node_id = node.node.create_or_get_node( # type: ignore
node_uid=msg.metadata["node_id"], node_name=msg.metadata["node_name"]
)
is_vpn = check_if_is_vpn(host_or_ip=msg.metadata["host_or_ip"])
node.node_route.update_route_for_node( # type: ignore
node_id=node_id, host_or_ip=msg.metadata["host_or_ip"], is_vpn=is_vpn
)
except Exception as e:
error(f"Failed to save the node and node_route rows. {e}")
return SuccessResponseMessage(
address=msg.reply_to,
resp_msg="Association request received!",
)
# network owner user approves the request and sends this to the network
def respond_association_request_msg(
msg: RespondAssociationRequestMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> SuccessResponseMessage:
# Check if handshake/address/value fields are empty
missing_paramaters = not msg.target or not msg.response
if missing_paramaters:
raise MissingRequestKeyError(
message="Invalid request payload, empty fields (target/handshake/value)!"
)
# Check Key permissions
allowed = node.users.can_manage_infrastructure(verify_key=verify_key)
info(
f"Node {node} - respond_association_request_msg: user can approve/deny association requests."
)
if allowed:
# Set the status of the Association Request according to the "value" field received
node.association_requests.set(source=msg.source, target=msg.target, response=msg.response) # type: ignore
user_priv_key = SigningKey(
node.users.get_user(verify_key).private_key.encode(), encoder=HexEncoder # type: ignore
)
metadata = {}
# get network metadata to send back to domain
try:
metadata = get_vpn_status_metadata(node=node)
except Exception as e:
error(f"Failed to get vpn status. {e}")
# create a client to the source
grid_url = GridURL.from_url(msg.source).with_path("/api/v1")
source_client = sy.connect(url=str(grid_url))
try:
node_msg: SignedImmediateSyftMessageWithReply = (
ReceiveAssociationRequestMessage(
address=source_client.address,
response=msg.response,
reply_to=node.address,
metadata=metadata,
source=msg.source,
target=msg.target,
).sign(signing_key=user_priv_key)
)
info(
f"Node {node} - respond_association_request_msg: sending ReceiveAssociationRequestMessage."
)
source_client.send_immediate_msg_with_reply(msg=node_msg)
info(
f"Node {node} - respond_association_request_msg: ReceiveAssociationRequestMessage got back."
)
except Exception as e:
error(f"Failed to send ReceiveAssociationRequestMessage to the domain. {e}")
else: # If not allowed
raise AuthorizationError("You're not allowed to create an Association Request!")
return SuccessResponseMessage(
address=msg.reply_to,
resp_msg="Association request replied!",
)
def get_association_request_msg(
msg: GetAssociationRequestMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> GetAssociationRequestResponse:
# Check Key Permissions
allowed = node.users.can_manage_infrastructure(verify_key=verify_key)
# If allowed
if allowed:
association_request: AssociationRequest = node.association_requests.first(
id=msg.association_id
)
else: # Otherwise
raise AuthorizationError(
"You're not allowed to get Association Request information!"
)
return GetAssociationRequestResponse(
address=msg.reply_to,
content=association_request.get_metadata(),
source=association_request.source,
target=association_request.target,
)
def get_all_association_request_msg(
msg: GetAssociationRequestsMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> GetAssociationRequestsResponse:
allowed = node.users.can_manage_infrastructure(verify_key=verify_key)
# If allowed
if allowed:
association_requests = node.association_requests.all()
association_requests_json = [
association_request.get_metadata()
for association_request in association_requests
]
else: # Otherwise
raise AuthorizationError(
"You're not allowed to get Association Request information!"
)
return GetAssociationRequestsResponse(
address=msg.reply_to,
content=association_requests_json,
)
def del_association_request_msg(
msg: DeleteAssociationRequestMessage,
node: DomainInterface,
verify_key: VerifyKey,
) -> SuccessResponseMessage:
# Check Key permissions
allowed = node.users.can_manage_infrastructure(
verify_key=verify_key
) and node.association_requests.contain(id=msg.association_id)
# If allowed
if allowed:
node.association_requests.delete(id=msg.association_id)
else: # Otherwise
raise AuthorizationError(
"You're not allowed to delete this Association Request information!"
)
return SuccessResponseMessage(
address=msg.reply_to,
resp_msg="Association request deleted!",
)
class AssociationRequestService(ImmediateNodeServiceWithReply):
msg_handler_map: Dict[type, Callable] = {
SendAssociationRequestMessage: send_association_request_msg,
ReceiveAssociationRequestMessage: recv_association_request_msg,
GetAssociationRequestMessage: get_association_request_msg,
GetAssociationRequestsMessage: get_all_association_request_msg,
DeleteAssociationRequestMessage: del_association_request_msg,
RespondAssociationRequestMessage: respond_association_request_msg,
}
@staticmethod
@service_auth(guests_welcome=True)
def process(
node: DomainInterface,
msg: Union[
SendAssociationRequestMessage,
ReceiveAssociationRequestMessage,
GetAssociationRequestMessage,
DeleteAssociationRequestMessage,
],
verify_key: VerifyKey,
) -> Union[
SuccessResponseMessage,
GetAssociationRequestsResponse,
GetAssociationRequestResponse,
]:
return AssociationRequestService.msg_handler_map[type(msg)](
msg=msg, node=node, verify_key=verify_key
)
@staticmethod
def message_handler_types() -> List[Type[ImmediateSyftMessageWithReply]]:
return [
SendAssociationRequestMessage,
ReceiveAssociationRequestMessage,
GetAssociationRequestMessage,
GetAssociationRequestsMessage,
DeleteAssociationRequestMessage,
RespondAssociationRequestMessage,
]
|
|
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from pyparsing import *
from collections import defaultdict, namedtuple
from dimod.vartypes import Vartype
from dimod.quadratic import QuadraticModel
MINIMIZE = 1
MAXIMIZE = -1
obj_senses = {"max": MAXIMIZE, "maximum": MAXIMIZE, "maximize": MAXIMIZE, "min": MINIMIZE, "minimum": MINIMIZE,
"minimize": MINIMIZE}
GEQ = -1
EQ = 1
LEQ = 1
constraint_senses = {"<": LEQ, "<=": LEQ, "=<": LEQ, "=": EQ, ">": GEQ, ">=": GEQ, "=>": GEQ}
constraint_symbols = {"<": "<=", "<=": "<=", "=<": "<=", "=": "==", ">": ">=", ">=": ">=", "=>": ">="}
infinity = 1E30
def make_lp_grammar() -> And:
"""Build the grammar of LP files"""
# name char ranges for objective, constraint or variable
all_name_chars = alphanums + "!\"#$%&()/,.;?@_'`{}|~"
first_char = remove_strings(all_name_chars, nums)
name = Word(first_char, all_name_chars, max=255)
# keywords in the LP file
keywords = ["inf", "infinity",
"max", "maximum", "maximize",
"min", "minimum", "minimize",
"s.t.", "st",
"bound", "bounds",
"bin", "binaries", "binary",
"gen", "general", "generals",
"end"]
py_keyword = MatchFirst(map(CaselessKeyword, keywords))
valid_name = ~py_keyword + name
valid_name = valid_name.setResultsName("name")
# second variable for quadratic terms
second_name = ~py_keyword + name
second_name = second_name.setResultsName("second_var_name")
squared_name = ~py_keyword + name
squared_name = squared_name.setResultsName('squared_name')
colon = Suppress(oneOf(": ::"))
plus_minus = oneOf("+ -")
inf = oneOf("inf infinity", caseless=True)
number = Word(nums + ".")
sense = oneOf("< <= =< = > >= =>").setResultsName("sense")
# section tags
obj_tag_max = oneOf("max maximum maximize", caseless=True)
obj_tag_min = oneOf("min minimum minimize", caseless=True)
obj_tag = (obj_tag_max | obj_tag_min).setResultsName("obj_sense")
constraints_tag = oneOf(["subj to", "subject to", "s.t.", "st"], caseless=True)
bounds_tag = oneOf("bound bounds", caseless=True)
bin_tag = oneOf("bin binaries binary", caseless=True)
gen_tag = oneOf("gen general generals", caseless=True)
end_tag = CaselessLiteral("end")
# coefficient on a variable (includes sign)
first_var_coef = Optional(plus_minus, "+") + Optional(number, "1")
first_var_coef.setParseAction(lambda tokens: eval("".join(tokens)))
coef = plus_minus + Optional(number, "1")
coef.setParseAction(lambda tokens: eval("".join(tokens)))
# variable (coefficient and name)
first_var = Group(first_var_coef.setResultsName("coef") + valid_name)
var = Group(coef.setResultsName("coef") + valid_name)
# linear expression
lin_expr = first_var + ZeroOrMore(var)
lin_expr = lin_expr.setResultsName("lin_expr")
# bilinear expression
quad_vars = Group(first_var_coef.setResultsName("coef") + valid_name + Literal('*').suppress() + second_name)
# squared expression
square_vars = Group(
first_var_coef.setResultsName("coef") + squared_name + Literal('^').suppress() + Literal('2').suppress())
quadratic_terms = quad_vars | square_vars
quad_expr = Optional(Literal('+').suppress()) + Literal('[').suppress() + quadratic_terms + \
ZeroOrMore(quadratic_terms) + Literal(']').suppress()
quad_expr = quad_expr.setResultsName("quad_expr")
# for the objective function the standard is having [ quad expression ] / 2
quad_expr_obj = Optional(Literal('+').suppress()) + Literal('[').suppress() + quadratic_terms + \
ZeroOrMore(quadratic_terms) + Literal(']').suppress() + Literal('/').suppress() + \
Literal('2').suppress()
quad_expr_obj = quad_expr_obj.setResultsName("quad_expr")
# objective
objective = obj_tag + Optional(valid_name + colon) + Optional(lin_expr) + Optional(quad_expr_obj)
objective = objective.setResultsName("objective")
# constraint rhs
rhs = Optional(plus_minus, "+") + number
rhs = rhs.setResultsName("rhs")
rhs.setParseAction(lambda tokens: eval("".join(tokens)))
# constraints (can be quadratic)
constraint_word = Group(Optional(valid_name + colon) + Optional(lin_expr) + Optional(quad_expr) + sense + rhs)
constraints = ZeroOrMore(constraint_word)
constraints = constraints.setResultsName("constraints")
# bounds
signed_inf = (plus_minus + inf).setParseAction(lambda tokens: (tokens[0] == "+") * infinity)
signed_number = (Optional(plus_minus, "+") + number).setParseAction(lambda tokens: eval("".join(tokens)))
number_or_inf = (signed_number | signed_inf).setResultsName("numberOrInf")
# splitting the bounds in left inequality and right inequality
left_ineq = number_or_inf + sense
right_ineq = sense + number_or_inf
bounded_var = Group(Optional(left_ineq).setResultsName("leftbound") + valid_name +
Optional(right_ineq).setResultsName("rightbound"))
free_var = Group(valid_name + Literal("free"))
bounds_word = free_var | bounded_var
bounds = bounds_tag + ZeroOrMore(bounds_word).setResultsName("bounds")
# generals (integer variables)
generals = gen_tag + ZeroOrMore(valid_name).setResultsName("generals")
# binaries (binary variables)
binaries = bin_tag + ZeroOrMore(valid_name).setResultsName("binaries")
var_info = ZeroOrMore(bounds | generals | binaries)
# full LP file grammar
grammar = Optional(objective) + constraints_tag + constraints + var_info + end_tag
# commenting
comment_style = Literal("\\") + restOfLine
grammar.ignore(comment_style)
return grammar
def remove_strings(string, strings_to_remove):
"""Replace an iterable of strings in removables
if removables is a string, each character is removed """
for r in strings_to_remove:
try:
string = string.replace(r, "")
except TypeError:
raise TypeError("Strings_to_remove contains a non-string element")
return string
def get_variables_from_parsed_lp(parse_output: ParseResults,
lower_bound_default: typing.Optional[int] = None,
upper_bound_default: typing.Optional[int] = None) -> QuadraticModel:
"""Return a quadratic model containing all the variables included in the CQM
Args:
parse_output: the parse results encoding the LP file
lower_bound_default: the lower bound of the integer variables, in case they are not specified
upper_bound_default: the upper bound of the integer variables in case they are not specified
Returns:
the quadratic model with all variables
"""
obj = QuadraticModel()
all_vars = set()
# default variable type for LP file
# should be continuous, even though
# they are not supported in CQMs
Var = namedtuple('Variable', ['vartype', 'lb', 'ub'])
variables_info = defaultdict(lambda: Var(vartype="c", lb=lower_bound_default, ub=upper_bound_default))
# scan the objective
for oe in parse_output.objective:
if isinstance(oe, str):
continue
else:
if len(oe) == 2:
if oe.name != "":
all_vars.add(oe.name[0])
else:
all_vars.add(oe.squared_name[0])
elif len(oe) == 3:
all_vars.update([oe.name[0], oe.second_var_name[0]])
# scan the constraints
for c in parse_output.constraints:
# scan linear terms
if c.lin_expr:
all_vars.update([le.name[0] for le in c.lin_expr])
# scan quadratic terms of constraints
if c.quad_expr:
for qe in c.quad_expr:
if len(qe) == 3:
all_vars.update([qe.name[0] for qe in c.quad_expr])
all_vars.update([qe.second_var_name[0] for qe in c.quad_expr])
elif len(qe) == 2:
all_vars.add(qe.squared_name[0])
# scan the bounds
for b in parse_output.bounds:
n = b.name[0]
# if b.free, default is fine
if b.leftbound:
if constraint_senses[b.sense] <= 0: # NUM >= var
variables_info[n] = variables_info[n]._replace(ub=b.leftbound[0])
if constraint_senses[b.sense] >= 0: # NUM <= var
variables_info[n] = variables_info[n]._replace(lb=b.leftbound[0])
if b.rightbound:
if constraint_senses[b.sense] <= 0: # var >= NUM
variables_info[n] = variables_info[n]._replace(lb=b.rightbound[1])
if constraint_senses[b.sense] >= 0: # var <= NUM
variables_info[n] = variables_info[n]._replace(ub=b.rightbound[1])
# check the binary variables:
for n in parse_output.binaries:
variables_info[n] = variables_info[n]._replace(vartype=Vartype.BINARY)
# check for integer variables
for n in parse_output.generals:
variables_info[n] = variables_info[n]._replace(vartype=Vartype.INTEGER)
for n, var_info in variables_info.items():
if var_info.vartype is Vartype.BINARY:
obj.add_variable(Vartype.BINARY, n)
elif var_info.vartype is Vartype.INTEGER:
lb = var_info.lb
ub = var_info.ub
if lb is not None:
obj.add_variable(Vartype.INTEGER, n, lower_bound=lb, upper_bound=ub)
else:
obj.add_variable(Vartype.INTEGER, n, upper_bound=ub)
else:
raise ValueError("Unexpected Vartype: {} for variable: {}".format(var_info.vartype, n))
return obj
|
|
import hashlib
from . import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask import current_app, url_for
from flask.ext.login import UserMixin, current_user, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from datetime import datetime
from app.exceptions import ValidationError
from flask.ext.sqlalchemy import BaseQuery
from sqlalchemy_searchable import SearchQueryMixin
from sqlalchemy_searchable import make_searchable
from sqlalchemy_utils.types import TSVectorType
make_searchable()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
"""
Association Table to resolve M:M Relationship between
Note and Tag
"""
note_tag = db.Table(
'note_tag',
db.Column(
'note_id',
db.Integer,
db.ForeignKey('notes.id', ondelete="CASCADE")),
db.Column(
'tag_id',
db.Integer,
db.ForeignKey('tags.id', ondelete="CASCADE")))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(254), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(256))
confirmed = db.Column(db.Boolean, default=False)
avatar_hash = db.Column(db.String(32))
created_date = db.Column(db.DateTime(), default=datetime.utcnow)
updated_date = db.Column(db.DateTime(), default=datetime.utcnow)
notes = db.relationship('Note', backref='author', lazy='dynamic')
notebooks = db.relationship('Notebook', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py as f
seed()
for i in range(count):
u = User(
email=f.internet.email_address(),
username=f.internet.user_name(True),
password=f.lorem_ipsum.word(),
confirmed=True)
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(
password,
method='pbkdf2:sha512')
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
db.session.commit()
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
db.session.commit()
return True
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
db.session.commit()
return True
def gravatar(self, size=100, default='identicon', rating='g'):
url = 'https://secure.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def generate_auth_token(self, expiration):
s = Serializer(
current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def to_json(self):
json_user = {
'url': url_for('api.get_note', id=self.id, _external=True),
'username': self.username,
'created_date': self.created_date,
'notes': url_for('api.get_user_notes', id=self.id, _external=True),
'note_count': self.notes.count()
}
return json_user
def __repr__(self):
return '<User {0}>'.format(self.username)
class AnonymousUser(AnonymousUserMixin):
def can(self):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
class NoteQuery(BaseQuery, SearchQueryMixin):
pass
class Note(db.Model):
query_class = NoteQuery
__tablename__ = 'notes'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
body = db.Column(db.Text)
body_html = db.Column(db.Text)
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
updated_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
notebook_id = db.Column(db.Integer, db.ForeignKey('notebooks.id'))
is_deleted = db.Column(db.Boolean, default=False)
is_favorite = db.Column(db.Boolean, default=False)
tags = db.relationship(
"Tag", secondary=note_tag,
backref="Note", passive_deletes=True)
# Full Text Search
search_vector = db.Column(TSVectorType('title', 'body'))
def to_json(self):
json_note = {
'url': url_for('api.get_note', id=self.id, _external=True),
'body': self.body,
'body_html': self.body_html,
'created_date': self.created_date,
'author': self.author_id,
}
return json_note
def get_notebook(self, id):
notebook = Notebook.query.filter_by(
id=id).first()
return notebook
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('note does not have a body')
return Note(body=body)
def _find_or_create_tag(self, tag):
q = Tag.query.filter_by(tag=tag)
t = q.first()
if not (t):
t = Tag(tag=tag.strip())
return t
def _get_tags(self):
return [x.tag for x in self.tags]
def _set_tags(self, value):
while self.tags:
del self.tags[0]
for tag in value:
self.tags.append(self._find_or_create_tag(tag))
# simple wrapper for tags relationship
str_tags = property(_get_tags,
_set_tags)
class Notebook(db.Model):
__tablename__ = 'notebooks'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
is_deleted = db.Column(db.Boolean, default=False)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
notes = db.relationship('Note', backref='notebook')
def _show_notes(self):
notes = []
for note in self.notes:
if note.is_deleted is False:
notes.append(note)
return notes
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
tag = db.Column(db.String(200))
notes = db.relationship("Note", secondary=note_tag, backref="Tag")
def _get_notes(self):
notes = []
for note in self.notes:
if note.author == current_user:
notes.append(note)
return notes
|
|
import sublime
import sublime_plugin
import re
attributes = {}
attributes["exit \"$1\";"] = []
attributes["pageencoding \"$1\";"] = []
attributes["include \"$1\";"] = []
attributes["import \"$1\";"] = []
attributes["throw \"$1\";"] = []
attributes["rethrow"] = []
attributes["location"] = []
attributes["abort"] = []
attributes["trace"] = []
attributes["break"] = []
attributes["continue"] = []
attributes["try"] = []
attributes["finally"] = []
attributes["component"] = [
("extends\t@extends", "extends=\"$1\"$0"),
("initmethod\t@initmethod", "initmethod=\"$1\"$0"),
("implements\t@implements", "implements=\"$1\"$0"),
("output\t@output", "output=\"$1\"$0"),
("output=\"true\"\toutput", "output=\"${1:true}\"$0"),
("output=\"false\"\toutput", "output=\"${1:false}\"$0"),
("displayname\t@displayname", "displayname=\"$1\"$0"),
("hint\t@hint", "hint=\"$1\"$0"),
("style\t@style", "style=\"$1\"$0"),
("style=\"rpc\"\tstyle", "style=\"${1:rpc}\"$0"),
("style=\"document\"\tstyle", "style=\"${1:document}\"$0"),
("style=\"wrapped\"\tstyle", "style=\"${1:wrapped}\"$0"),
("namespace\t@namespace", "namespace=\"$1\"$0"),
("serviceportname\t@serviceportname", "serviceportname=\"$1\"$0"),
("porttypename\t@porttypename", "porttypename=\"$1\"$0"),
("bindingname\t@bindingname", "bindingname=\"$1\"$0"),
("wsdlfile\t@wsdlfile", "wsdlfile=\"$1\"$0"),
("serviceaddress\t@serviceaddress", "serviceaddress=\"$1\"$0"),
("persistent\t@persistent", "persistent=\"$1\"$0"),
("persistent=\"true\"\tpersistent", "persistent=\"${1:true}\"$0"),
("persistent=\"false\"\tpersistent", "persistent=\"${1:false}\"$0"),
("entityName\t@entityName", "entityName=\"$1\"$0"),
("table\t@table", "table=\"$1\"$0"),
("schema\t@schema", "schema=\"$1\"$0"),
("catalog\t@catalog", "catalog=\"$1\"$0"),
("dynamicinsert\t@dynamicinsert", "dynamicinsert=\"$1\"$0"),
("dynamicinsert=\"true\"\tdynamicinsert", "dynamicinsert=\"${1:true}\"$0"),
("dynamicinsert=\"false\"\tdynamicinsert", "dynamicinsert=\"${1:false}\"$0"),
("dynamicupdate\t@dynamicupdate", "dynamicupdate=\"$1\"$0"),
("dynamicupdate=\"true\"\tdynamicupdate", "dynamicupdate=\"${1:true}\"$0"),
("dynamicupdate=\"false\"\tdynamicupdate", "dynamicupdate=\"${1:false}\"$0"),
("readonly\t@readonly", "readonly=\"$1\"$0"),
("readonly=\"true\"\treadonly", "readonly=\"${1:true}\"$0"),
("readonly=\"false\"\treadonly", "readonly=\"${1:false}\"$0"),
("selectbeforeupdate\t@selectbeforeupdate", "selectbeforeupdate=\"$1\"$0"),
("selectbeforeupdate=\"true\"\tselectbeforeupdate", "selectbeforeupdate=\"${1:true}\"$0"),
("selectbeforeupdate=\"false\"\tselectbeforeupdate", "selectbeforeupdate=\"${1:false}\"$0"),
("batchsize\t@batchsize", "batchsize=\"$1\"$0"),
("optimisticlock\t@optimisticlock", "optimisticlock=\"$1\"$0"),
("optimisticlock=\"none\"\toptimisticlock", "optimisticlock=\"${1:none}\"$0"),
("optimisticlock=\"dirty\"\toptimisticlock", "optimisticlock=\"${1:dirty}\"$0"),
("optimisticlock=\"all\"\toptimisticlock", "optimisticlock=\"${1:all}\"$0"),
("optimisticlock=\"version\"\toptimisticlock", "optimisticlock=\"${1:version}\"$0"),
("lazy\t@lazy", "lazy=\"$1\"$0"),
("lazy=\"true\"\tlazy", "lazy=\"${1:true}\"$0"),
("lazy=\"false\"\tlazy", "lazy=\"${1:false}\"$0"),
("rowid\t@rowid", "rowid=\"$1\"$0"),
("discriminatorColumn\t@discriminatorColumn", "discriminatorColumn=\"$1\"$0"),
("discriminatorValue\t@discriminatorValue", "discriminatorValue=\"$1\"$0"),
("joinColumn\t@joinColumn", "joinColumn=\"$1\"$0"),
("embedded\t@embedded", "embedded=\"$1\"$0"),
("embedded=\"true\"\tembedded", "embedded=\"${1:true}\"$0"),
("embedded=\"false\"\tembedded", "embedded=\"${1:false}\"$0"),
("cacheUse\t@cacheUse", "cacheUse=\"$1\"$0"),
("cacheUse=\"read-only\"\tcacheUse", "cacheUse=\"${1:read-only}\"$0"),
("cacheUse=\"nonstrict-read-write\"\tcacheUse", "cacheUse=\"${1:nonstrict-read-write}\"$0"),
("cacheUse=\"read-write\"\tcacheUse", "cacheUse=\"${1:read-write}\"$0"),
("cacheUse=\"transactional\"\tcacheUse", "cacheUse=\"${1:transactional}\"$0"),
("cacheName\t@cacheName", "cacheName=\"$1\"$0"),
("saveMapping\t@saveMapping", "saveMapping=\"$1\"$0"),
("saveMapping=\"true\"\tsaveMapping", "saveMapping=\"${1:true}\"$0"),
("saveMapping=\"false\"\tsaveMapping", "saveMapping=\"${1:false}\"$0"),
("accessors\t@accessors", "accessors=\"$1\"$0"),
("accessors=\"true\"\taccessors", "accessors=\"${1:true}\"$0"),
("accessors=\"false\"\taccessors", "accessors=\"${1:false}\"$0"),
("serializable\t@serializable", "serializable=\"$1\"$0"),
("serializable=\"true\"\tserializable", "serializable=\"${1:true}\"$0"),
("serializable=\"false\"\tserializable", "serializable=\"${1:false}\"$0"),
("alias\t@alias", "alias=\"$1\"$0"),
("datasource\t@datasource", "datasource=\"$1\"$0"),
("mappedSuperClass\t@mappedSuperClass", "mappedSuperClass=\"$1\"$0"),
("mappedSuperClass=\"true\"\tmappedSuperClass", "mappedSuperClass=\"${1:true}\"$0"),
("mappedSuperClass=\"false\"\tmappedSuperClass", "mappedSuperClass=\"${1:false}\"$0"),
("rest\t@rest", "rest=\"$1\"$0"),
("rest=\"true\"\trest", "rest=\"${1:true}\"$0"),
("rest=\"false\"\trest", "rest=\"${1:false}\"$0"),
("restPath\t@restPath", "restPath=\"$1\"$0"),
("httpMethod\t@httpMethod", "httpMethod=\"$1\"$0"),
("httpMethod=\"GET\"\thttpMethod", "httpMethod=\"${1:GET}\"$0"),
("httpMethod=\"DELETE\"\thttpMethod", "httpMethod=\"${1:DELETE}\"$0"),
("httpMethod=\"POST\"\thttpMethod", "httpMethod=\"${1:POST}\"$0"),
("httpMethod=\"PUT\"\thttpMethod", "httpMethod=\"${1:PUT}\"$0"),
("httpMethod=\"HEAD\"\thttpMethod", "httpMethod=\"${1:HEAD}\"$0"),
("httpMethod=\"OPTIONS\"\thttpMethod", "httpMethod=\"${1:OPTIONS}\"$0"),
("produces\t@produces", "produces=\"$1\"$0"),
("consumes\t@consumes", "consumes=\"$1\"$0"),
("indexable\t@indexable", "indexable=\"$1\"$0"),
("indexable=\"true\"\tindexable", "indexable=\"${1:true}\"$0"),
("indexable=\"false\"\tindexable", "indexable=\"${1:false}\"$0"),
("indexLanguage\t@indexLanguage", "indexLanguage=\"$1\"$0"),
("autoindex\t@autoindex", "autoindex=\"$1\"$0"),
("autoindex=\"true\"\tautoindex", "autoindex=\"${1:true}\"$0"),
("autoindex=\"false\"\tautoindex", "autoindex=\"${1:false}\"$0"),
("wsversion\t@wsversion", "wsversion=\"$1\"$0"),
("wsversion=\"1\"\twsversion", "wsversion=\"${1:1}\"$0"),
("wsversion=\"2\"\twsversion", "wsversion=\"${1:2}\"$0")
]
attributes["lock"] = [
("timeout\t@timeout", "timeout=\"$1\"$0"),
("scope\t@scope", "scope=\"$1\"$0"),
("scope=\"Application\"\tscope", "scope=\"${1:Application}\"$0"),
("scope=\"request\"\tscope", "scope=\"${1:request}\"$0"),
("scope=\"Server\"\tscope", "scope=\"${1:Server}\"$0"),
("scope=\"Session\"\tscope", "scope=\"${1:Session}\"$0"),
("name\t@name", "name=\"$1\"$0"),
("throwontimeout\t@throwontimeout", "throwontimeout=\"$1\"$0"),
("throwontimeout=\"true\"\tthrowontimeout", "throwontimeout=\"${1:true}\"$0"),
("throwontimeout=\"false\"\tthrowontimeout", "throwontimeout=\"${1:false}\"$0"),
("type\t@type", "type=\"$1\"$0"),
("type=\"readonly\"\ttype", "type=\"${1:readonly}\"$0"),
("type=\"exclusive\"\ttype", "type=\"${1:exclusive}\"$0")
]
attributes["schedule"] = [
("action\t@action", "action=\"$1\"$0"),
("action=\"delete\"\taction", "action=\"${1:delete}\"$0"),
("action=\"update\"\taction", "action=\"${1:update}\"$0"),
("action=\"run\"\taction", "action=\"${1:run}\"$0"),
("action=\"pause\"\taction", "action=\"${1:pause}\"$0"),
("action=\"resume\"\taction", "action=\"${1:resume}\"$0"),
("action=\"list\"\taction", "action=\"${1:list}\"$0"),
("action=\"pauseall\"\taction", "action=\"${1:pauseall}\"$0"),
("action=\"resumeall\"\taction", "action=\"${1:resumeall}\"$0"),
("task\t@task", "task=\"$1\"$0"),
("operation\t@operation", "operation=\"$1\"$0"),
("operation=\"HTTPRequest\"\toperation", "operation=\"${1:HTTPRequest}\"$0"),
("file\t@file", "file=\"$1\"$0"),
("path\t@path", "path=\"$1\"$0"),
("startdate\t@startdate", "startdate=\"$1\"$0"),
("starttime\t@starttime", "starttime=\"$1\"$0"),
("URL\t@URL", "URL=\"$1\"$0"),
("port\t@port", "port=\"$1\"$0"),
("publish\t@publish", "publish=\"$1\"$0"),
("publish=\"true\"\tpublish", "publish=\"${1:true}\"$0"),
("publish=\"false\"\tpublish", "publish=\"${1:false}\"$0"),
("endDate\t@endDate", "endDate=\"$1\"$0"),
("endTime\t@endTime", "endTime=\"$1\"$0"),
("interval\t@interval", "interval=\"$1\"$0"),
("interval=\"once\"\tinterval", "interval=\"${1:once}\"$0"),
("interval=\"daily\"\tinterval", "interval=\"${1:daily}\"$0"),
("interval=\"weekly\"\tinterval", "interval=\"${1:weekly}\"$0"),
("interval=\"monthly\"\tinterval", "interval=\"${1:monthly}\"$0"),
("requesttimeout\t@requesttimeout", "requesttimeout=\"$1\"$0"),
("username\t@username", "username=\"$1\"$0"),
("password\t@password", "password=\"$1\"$0"),
("proxyserver\t@proxyserver", "proxyserver=\"$1\"$0"),
("proxyport\t@proxyport", "proxyport=\"$1\"$0"),
("proxyuser\t@proxyuser", "proxyuser=\"$1\"$0"),
("proxypassword\t@proxypassword", "proxypassword=\"$1\"$0"),
("resolveurl\t@resolveurl", "resolveurl=\"$1\"$0"),
("resolveurl=\"true\"\tresolveurl", "resolveurl=\"${1:true}\"$0"),
("resolveurl=\"false\"\tresolveurl", "resolveurl=\"${1:false}\"$0"),
("group\t@group", "group=\"$1\"$0"),
("onException\t@onException", "onException=\"$1\"$0"),
("onException=\"REFIRE\"\tonException", "onException=\"${1:REFIRE}\"$0"),
("onException=\"PAUSE\"\tonException", "onException=\"${1:PAUSE}\"$0"),
("onException=\"INVOKEHANDLER\"\tonException", "onException=\"${1:INVOKEHANDLER}\"$0"),
("repeat\t@repeat", "repeat=\"$1\"$0"),
("onComplete\t@onComplete", "onComplete=\"$1\"$0"),
("cronTime\t@cronTime", "cronTime=\"$1\"$0"),
("priority\t@priority", "priority=\"$1\"$0"),
("eventHandler\t@eventHandler", "eventHandler=\"$1\"$0"),
("exclude\t@exclude", "exclude=\"$1\"$0"),
("cluster\t@cluster", "cluster=\"$1\"$0"),
("cluster=\"true\"\tcluster", "cluster=\"${1:true}\"$0"),
("cluster=\"false\"\tcluster", "cluster=\"${1:false}\"$0"),
("onMisfire\t@onMisfire", "onMisfire=\"$1\"$0"),
("onMisfire=\"true\"\tonMisfire", "onMisfire=\"${1:true}\"$0"),
("onMisfire=\"false\"\tonMisfire", "onMisfire=\"${1:false}\"$0"),
("retrycount\t@retrycount", "retrycount=\"$1\"$0"),
("mode\t@mode", "mode=\"$1\"$0"),
("mode=\"SERVER\"\tmode", "mode=\"${1:SERVER}\"$0"),
("mode=\"APPLICATION\"\tmode", "mode=\"${1:APPLICATION}\"$0"),
("result\t@result", "result=\"$1\"$0"),
("overwrite\t@overwrite", "overwrite=\"$1\"$0"),
("overwrite=\"true\"\toverwrite", "overwrite=\"${1:true}\"$0"),
("overwrite=\"false\"\toverwrite", "overwrite=\"${1:false}\"$0")
]
attributes["savecontent"] = [
("variable\t@variable", "variable=\"$1\"$0")
]
attributes["interface"] = [
("displayName\t@displayName", "displayName=\"$1\"$0"),
("extends\t@extends", "extends=\"$1\"$0"),
("hint\t@hint", "hint=\"$1\"$0")
]
attributes["thread"] = [
("action\t@action", "action=\"$1\"$0"),
("action=\"join\"\taction", "action=\"${1:join}\"$0"),
("action=\"run\"\taction", "action=\"${1:run}\"$0"),
("action=\"sleep\"\taction", "action=\"${1:sleep}\"$0"),
("action=\"terminate\"\taction", "action=\"${1:terminate}\"$0"),
("duration\t@duration", "duration=\"$1\"$0"),
("name\t@name", "name=\"$1\"$0"),
("priority\t@priority", "priority=\"$1\"$0"),
("priority=\"HIGH\"\tpriority", "priority=\"${1:HIGH}\"$0"),
("priority=\"LOW\"\tpriority", "priority=\"${1:LOW}\"$0"),
("priority=\"NORMAL\"\tpriority", "priority=\"${1:NORMAL}\"$0"),
("timeout\t@timeout", "timeout=\"$1\"$0")
]
attributes["property"] = [
("name\t@name", "name=\"$1\"$0"),
("type\t@type", "type=\"$1\"$0"),
("type=\"any\"\ttype", "type=\"${1:any}\"$0"),
("type=\"array\"\ttype", "type=\"${1:array}\"$0"),
("type=\"binary\"\ttype", "type=\"${1:binary}\"$0"),
("type=\"boolean\"\ttype", "type=\"${1:boolean}\"$0"),
("type=\"date\"\ttype", "type=\"${1:date}\"$0"),
("type=\"guid\"\ttype", "type=\"${1:guid}\"$0"),
("type=\"numeric\"\ttype", "type=\"${1:numeric}\"$0"),
("type=\"query\"\ttype", "type=\"${1:query}\"$0"),
("type=\"string\"\ttype", "type=\"${1:string}\"$0"),
("type=\"struct\"\ttype", "type=\"${1:struct}\"$0"),
("type=\"uuid\"\ttype", "type=\"${1:uuid}\"$0"),
("type=\"variablename\"\ttype", "type=\"${1:variablename}\"$0"),
("required\t@required", "required=\"$1\"$0"),
("required=\"true\"\trequired", "required=\"${1:true}\"$0"),
("required=\"false\"\trequired", "required=\"${1:false}\"$0"),
("default\t@default", "default=\"$1\"$0"),
("displayname\t@displayname", "displayname=\"$1\"$0"),
("hint\t@hint", "hint=\"$1\"$0"),
("fieldtype\t@fieldtype", "fieldtype=\"$1\"$0"),
("fieldtype=\"id\"\tfieldtype", "fieldtype=\"${1:id}\"$0"),
("fieldtype=\"column\"\tfieldtype", "fieldtype=\"${1:column}\"$0"),
("fieldtype=\"one-to-one\"\tfieldtype", "fieldtype=\"${1:one-to-one}\"$0"),
("fieldtype=\"one-to-many\"\tfieldtype", "fieldtype=\"${1:one-to-many}\"$0"),
("fieldtype=\"many-to-many\"\tfieldtype", "fieldtype=\"${1:many-to-many}\"$0"),
("fieldtype=\"many-to-one\"\tfieldtype", "fieldtype=\"${1:many-to-one}\"$0"),
("fieldtype=\"collection\"\tfieldtype", "fieldtype=\"${1:collection}\"$0"),
("fieldtype=\"timestamp\"\tfieldtype", "fieldtype=\"${1:timestamp}\"$0"),
("fieldtype=\"version\"\tfieldtype", "fieldtype=\"${1:version}\"$0"),
("ormType\t@ormType", "ormType=\"$1\"$0"),
("ormType=\"string\"\tormType", "ormType=\"${1:string}\"$0"),
("ormType=\"character\"\tormType", "ormType=\"${1:character}\"$0"),
("ormType=\"char\"\tormType", "ormType=\"${1:char}\"$0"),
("ormType=\"short\"\tormType", "ormType=\"${1:short}\"$0"),
("ormType=\"integer\"\tormType", "ormType=\"${1:integer}\"$0"),
("ormType=\"int\"\tormType", "ormType=\"${1:int}\"$0"),
("ormType=\"long\"\tormType", "ormType=\"${1:long}\"$0"),
("ormType=\"big_decimal\"\tormType", "ormType=\"${1:big_decimal}\"$0"),
("ormType=\"float\"\tormType", "ormType=\"${1:float}\"$0"),
("ormType=\"double\"\tormType", "ormType=\"${1:double}\"$0"),
("ormType=\"boolean\"\tormType", "ormType=\"${1:boolean}\"$0"),
("ormType=\"yes_no\"\tormType", "ormType=\"${1:yes_no}\"$0"),
("ormType=\"true_false\"\tormType", "ormType=\"${1:true_false}\"$0"),
("ormType=\"text\"\tormType", "ormType=\"${1:text}\"$0"),
("ormType=\"date\"\tormType", "ormType=\"${1:date}\"$0"),
("ormType=\"timestamp\"\tormType", "ormType=\"${1:timestamp}\"$0"),
("ormType=\"binary\"\tormType", "ormType=\"${1:binary}\"$0"),
("ormType=\"serializable\"\tormType", "ormType=\"${1:serializable}\"$0"),
("ormType=\"blob\"\tormType", "ormType=\"${1:blob}\"$0"),
("ormType=\"clob\"\tormType", "ormType=\"${1:clob}\"$0"),
("column\t@column", "column=\"$1\"$0"),
("generator\t@generator", "generator=\"$1\"$0"),
("generator=\"increment\"\tgenerator", "generator=\"${1:increment}\"$0"),
("generator=\"identity\"\tgenerator", "generator=\"${1:identity}\"$0"),
("generator=\"sequence\"\tgenerator", "generator=\"${1:sequence}\"$0"),
("generator=\"seqhilo\"\tgenerator", "generator=\"${1:seqhilo}\"$0"),
("generator=\"uuid\"\tgenerator", "generator=\"${1:uuid}\"$0"),
("generator=\"guid\"\tgenerator", "generator=\"${1:guid}\"$0"),
("generator=\"native\"\tgenerator", "generator=\"${1:native}\"$0"),
("generator=\"assigned\"\tgenerator", "generator=\"${1:assigned}\"$0"),
("generator=\"select\"\tgenerator", "generator=\"${1:select}\"$0"),
("generator=\"foreign\"\tgenerator", "generator=\"${1:foreign}\"$0"),
("generator=\"sequence-indentity\"\tgenerator", "generator=\"${1:sequence-indentity}\"$0"),
("sequence\t@sequence", "sequence=\"$1\"$0"),
("selectkey\t@selectkey", "selectkey=\"$1\"$0"),
("params\t@params", "params=\"$1\"$0"),
("length\t@length", "length=\"$1\"$0"),
("precision\t@precision", "precision=\"$1\"$0"),
("index\t@index", "index=\"$1\"$0"),
("setter\t@setter", "setter=\"$1\"$0"),
("setter=\"true\"\tsetter", "setter=\"${1:true}\"$0"),
("setter=\"false\"\tsetter", "setter=\"${1:false}\"$0"),
("getter\t@getter", "getter=\"$1\"$0"),
("getter=\"true\"\tgetter", "getter=\"${1:true}\"$0"),
("getter=\"false\"\tgetter", "getter=\"${1:false}\"$0"),
("source\t@source", "source=\"$1\"$0"),
("source=\"vm\"\tsource", "source=\"${1:vm}\"$0"),
("source=\"db\"\tsource", "source=\"${1:db}\"$0"),
("elementcolumn\t@elementcolumn", "elementcolumn=\"$1\"$0"),
("elementtype\t@elementtype", "elementtype=\"$1\"$0"),
("elementtype=\"string\"\telementtype", "elementtype=\"${1:string}\"$0"),
("elementtype=\"character\"\telementtype", "elementtype=\"${1:character}\"$0"),
("elementtype=\"char\"\telementtype", "elementtype=\"${1:char}\"$0"),
("elementtype=\"short\"\telementtype", "elementtype=\"${1:short}\"$0"),
("elementtype=\"integer\"\telementtype", "elementtype=\"${1:integer}\"$0"),
("elementtype=\"int\"\telementtype", "elementtype=\"${1:int}\"$0"),
("elementtype=\"long\"\telementtype", "elementtype=\"${1:long}\"$0"),
("elementtype=\"big_decimal\"\telementtype", "elementtype=\"${1:big_decimal}\"$0"),
("elementtype=\"float\"\telementtype", "elementtype=\"${1:float}\"$0"),
("elementtype=\"double\"\telementtype", "elementtype=\"${1:double}\"$0"),
("elementtype=\"boolean\"\telementtype", "elementtype=\"${1:boolean}\"$0"),
("elementtype=\"yes_no\"\telementtype", "elementtype=\"${1:yes_no}\"$0"),
("elementtype=\"true_false\"\telementtype", "elementtype=\"${1:true_false}\"$0"),
("elementtype=\"text\"\telementtype", "elementtype=\"${1:text}\"$0"),
("elementtype=\"date\"\telementtype", "elementtype=\"${1:date}\"$0"),
("elementtype=\"timestamp\"\telementtype", "elementtype=\"${1:timestamp}\"$0"),
("elementtype=\"binary\"\telementtype", "elementtype=\"${1:binary}\"$0"),
("elementtype=\"serializable\"\telementtype", "elementtype=\"${1:serializable}\"$0"),
("elementtype=\"blob\"\telementtype", "elementtype=\"${1:blob}\"$0"),
("elementtype=\"clob\"\telementtype", "elementtype=\"${1:clob}\"$0"),
("structkeytype\t@structkeytype", "structkeytype=\"$1\"$0"),
("structkeytype=\"string\"\tstructkeytype", "structkeytype=\"${1:string}\"$0"),
("structkeytype=\"character\"\tstructkeytype", "structkeytype=\"${1:character}\"$0"),
("structkeytype=\"char\"\tstructkeytype", "structkeytype=\"${1:char}\"$0"),
("structkeytype=\"short\"\tstructkeytype", "structkeytype=\"${1:short}\"$0"),
("structkeytype=\"integer\"\tstructkeytype", "structkeytype=\"${1:integer}\"$0"),
("structkeytype=\"int\"\tstructkeytype", "structkeytype=\"${1:int}\"$0"),
("structkeytype=\"long\"\tstructkeytype", "structkeytype=\"${1:long}\"$0"),
("structkeytype=\"big_decimal\"\tstructkeytype", "structkeytype=\"${1:big_decimal}\"$0"),
("structkeytype=\"float\"\tstructkeytype", "structkeytype=\"${1:float}\"$0"),
("structkeytype=\"double\"\tstructkeytype", "structkeytype=\"${1:double}\"$0"),
("structkeytype=\"boolean\"\tstructkeytype", "structkeytype=\"${1:boolean}\"$0"),
("structkeytype=\"yes_no\"\tstructkeytype", "structkeytype=\"${1:yes_no}\"$0"),
("structkeytype=\"true_false\"\tstructkeytype", "structkeytype=\"${1:true_false}\"$0"),
("structkeytype=\"text\"\tstructkeytype", "structkeytype=\"${1:text}\"$0"),
("structkeytype=\"date\"\tstructkeytype", "structkeytype=\"${1:date}\"$0"),
("structkeytype=\"timestamp\"\tstructkeytype", "structkeytype=\"${1:timestamp}\"$0"),
("structkeytype=\"binary\"\tstructkeytype", "structkeytype=\"${1:binary}\"$0"),
("structkeytype=\"serializable\"\tstructkeytype", "structkeytype=\"${1:serializable}\"$0"),
("structkeytype=\"blob\"\tstructkeytype", "structkeytype=\"${1:blob}\"$0"),
("structkeytype=\"clob\"\tstructkeytype", "structkeytype=\"${1:clob}\"$0"),
("structkeycolumn\t@structkeycolumn", "structkeycolumn=\"$1\"$0"),
("inversejoincolumn\t@inversejoincolumn", "inversejoincolumn=\"$1\"$0"),
("linkschema\t@linkschema", "linkschema=\"$1\"$0"),
("linkcatalog\t@linkcatalog", "linkcatalog=\"$1\"$0"),
("linktable\t@linktable", "linktable=\"$1\"$0"),
("missingRowIgnored\t@missingRowIgnored", "missingRowIgnored=\"$1\"$0"),
("missingRowIgnored=\"true\"\tmissingRowIgnored", "missingRowIgnored=\"${1:true}\"$0"),
("missingRowIgnored=\"false\"\tmissingRowIgnored", "missingRowIgnored=\"${1:false}\"$0"),
("inverse\t@inverse", "inverse=\"$1\"$0"),
("inverse=\"true\"\tinverse", "inverse=\"${1:true}\"$0"),
("inverse=\"false\"\tinverse", "inverse=\"${1:false}\"$0"),
("orderby\t@orderby", "orderby=\"$1\"$0"),
("fkcolumn\t@fkcolumn", "fkcolumn=\"$1\"$0"),
("fetch\t@fetch", "fetch=\"$1\"$0"),
("fetch=\"join\"\tfetch", "fetch=\"${1:join}\"$0"),
("fetch=\"select\"\tfetch", "fetch=\"${1:select}\"$0"),
("cascade\t@cascade", "cascade=\"$1\"$0"),
("cascade=\"all\"\tcascade", "cascade=\"${1:all}\"$0"),
("cascade=\"none\"\tcascade", "cascade=\"${1:none}\"$0"),
("cascade=\"save-update\"\tcascade", "cascade=\"${1:save-update}\"$0"),
("cascade=\"delete\"\tcascade", "cascade=\"${1:delete}\"$0"),
("cascade=\"all-delete-orphan\"\tcascade", "cascade=\"${1:all-delete-orphan}\"$0"),
("cascade=\"delete-orphan\"\tcascade", "cascade=\"${1:delete-orphan}\"$0"),
("cascade=\"create\"\tcascade", "cascade=\"${1:create}\"$0"),
("cascade=\"merge\"\tcascade", "cascade=\"${1:merge}\"$0"),
("cascade=\"lock\"\tcascade", "cascade=\"${1:lock}\"$0"),
("cascade=\"refresh\"\tcascade", "cascade=\"${1:refresh}\"$0"),
("cascade=\"evict\"\tcascade", "cascade=\"${1:evict}\"$0"),
("cascade=\"replicate\"\tcascade", "cascade=\"${1:replicate}\"$0"),
("constrained\t@constrained", "constrained=\"$1\"$0"),
("constrained=\"true\"\tconstrained", "constrained=\"${1:true}\"$0"),
("constrained=\"false\"\tconstrained", "constrained=\"${1:false}\"$0"),
("unique\t@unique", "unique=\"$1\"$0"),
("unique=\"true\"\tunique", "unique=\"${1:true}\"$0"),
("unique=\"false\"\tunique", "unique=\"${1:false}\"$0"),
("uniquekey\t@uniquekey", "uniquekey=\"$1\"$0"),
("notnull\t@notnull", "notnull=\"$1\"$0"),
("notnull=\"true\"\tnotnull", "notnull=\"${1:true}\"$0"),
("notnull=\"false\"\tnotnull", "notnull=\"${1:false}\"$0"),
("update\t@update", "update=\"$1\"$0"),
("update=\"true\"\tupdate", "update=\"${1:true}\"$0"),
("update=\"false\"\tupdate", "update=\"${1:false}\"$0"),
("insert\t@insert", "insert=\"$1\"$0"),
("insert=\"true\"\tinsert", "insert=\"${1:true}\"$0"),
("insert=\"false\"\tinsert", "insert=\"${1:false}\"$0"),
("generated\t@generated", "generated=\"$1\"$0"),
("generated=\"never\"\tgenerated", "generated=\"${1:never}\"$0"),
("generated=\"insert\"\tgenerated", "generated=\"${1:insert}\"$0"),
("generated=\"always\"\tgenerated", "generated=\"${1:always}\"$0"),
("formula\t@formula", "formula=\"$1\"$0"),
("lazy\t@lazy", "lazy=\"$1\"$0"),
("lazy=\"true\"\tlazy", "lazy=\"${1:true}\"$0"),
("lazy=\"false\"\tlazy", "lazy=\"${1:false}\"$0"),
("lazy=\"extra\"\tlazy", "lazy=\"${1:extra}\"$0"),
("optimisticLock\t@optimisticLock", "optimisticLock=\"$1\"$0"),
("optimisticLock=\"true\"\toptimisticLock", "optimisticLock=\"${1:true}\"$0"),
("optimisticLock=\"false\"\toptimisticLock", "optimisticLock=\"${1:false}\"$0"),
("scale\t@scale", "scale=\"$1\"$0"),
("mappedby\t@mappedby", "mappedby=\"$1\"$0"),
("cfc\t@cfc", "cfc=\"$1\"$0"),
("joinColumn\t@joinColumn", "joinColumn=\"$1\"$0"),
("validate\t@validate", "validate=\"$1\"$0"),
("validate=\"string\"\tvalidate", "validate=\"${1:string}\"$0"),
("validate=\"boolean\"\tvalidate", "validate=\"${1:boolean}\"$0"),
("validate=\"integer\"\tvalidate", "validate=\"${1:integer}\"$0"),
("validate=\"numeric\"\tvalidate", "validate=\"${1:numeric}\"$0"),
("validate=\"date\"\tvalidate", "validate=\"${1:date}\"$0"),
("validate=\"time\"\tvalidate", "validate=\"${1:time}\"$0"),
("validate=\"creditcard\"\tvalidate", "validate=\"${1:creditcard}\"$0"),
("validate=\"email\"\tvalidate", "validate=\"${1:email}\"$0"),
("validate=\"eurodate\"\tvalidate", "validate=\"${1:eurodate}\"$0"),
("validate=\"regex\"\tvalidate", "validate=\"${1:regex}\"$0"),
("validate=\"ssn\"\tvalidate", "validate=\"${1:ssn}\"$0"),
("validate=\"telephone\"\tvalidate", "validate=\"${1:telephone}\"$0"),
("validate=\"UUID\"\tvalidate", "validate=\"${1:UUID}\"$0"),
("validate=\"guid\"\tvalidate", "validate=\"${1:guid}\"$0"),
("validate=\"zipcode\"\tvalidate", "validate=\"${1:zipcode}\"$0"),
("validateParams\t@validateParams", "validateParams=\"$1\"$0"),
("cacheUse\t@cacheUse", "cacheUse=\"$1\"$0"),
("cacheUse=\"read-only\"\tcacheUse", "cacheUse=\"${1:read-only}\"$0"),
("cacheUse=\"nonstrict-read-write\"\tcacheUse", "cacheUse=\"${1:nonstrict-read-write}\"$0"),
("cacheUse=\"read-write\"\tcacheUse", "cacheUse=\"${1:read-write}\"$0"),
("cacheUse=\"transactional\"\tcacheUse", "cacheUse=\"${1:transactional}\"$0"),
("sqlType\t@sqlType", "sqlType=\"$1\"$0"),
("dbDefault\t@dbDefault", "dbDefault=\"$1\"$0"),
("where\t@where", "where=\"$1\"$0"),
("persistent\t@persistent", "persistent=\"$1\"$0"),
("persistent=\"true\"\tpersistent", "persistent=\"${1:true}\"$0"),
("persistent=\"false\"\tpersistent", "persistent=\"${1:false}\"$0"),
("unSavedValue\t@unSavedValue", "unSavedValue=\"$1\"$0"),
("serializable\t@serializable", "serializable=\"$1\"$0"),
("serializable=\"true\"\tserializable", "serializable=\"${1:true}\"$0"),
("serializable=\"false\"\tserializable", "serializable=\"${1:false}\"$0"),
("singularname\t@singularname", "singularname=\"$1\"$0"),
("remotingFetch\t@remotingFetch", "remotingFetch=\"$1\"$0"),
("remotingFetch=\"true\"\tremotingFetch", "remotingFetch=\"${1:true}\"$0"),
("remotingFetch=\"false\"\tremotingFetch", "remotingFetch=\"${1:false}\"$0"),
("table\t@table", "table=\"$1\"$0"),
("indexBoost\t@indexBoost", "indexBoost=\"$1\"$0"),
("indexTokenize\t@indexTokenize", "indexTokenize=\"$1\"$0"),
("indexTokenize=\"true\"\tindexTokenize", "indexTokenize=\"${1:true}\"$0"),
("indexTokenize=\"false\"\tindexTokenize", "indexTokenize=\"${1:false}\"$0"),
("indexStore\t@indexStore", "indexStore=\"$1\"$0"),
("indexStore=\"true\"\tindexStore", "indexStore=\"${1:true}\"$0"),
("indexStore=\"false\"\tindexStore", "indexStore=\"${1:false}\"$0"),
("indexStore=\"compressed\"\tindexStore", "indexStore=\"${1:compressed}\"$0"),
("indexFieldName\t@indexFieldName", "indexFieldName=\"$1\"$0"),
("indexable\t@indexable", "indexable=\"$1\"$0"),
("indexable=\"true\"\tindexable", "indexable=\"${1:true}\"$0"),
("indexable=\"false\"\tindexable", "indexable=\"${1:false}\"$0"),
("indexLanguage\t@indexLanguage", "indexLanguage=\"$1\"$0")
]
attributes["param"] = [
("name\t@name", "name=\"$1\"$0"),
("type\t@type", "type=\"$1\"$0"),
("type=\"any\"\ttype", "type=\"${1:any}\"$0"),
("type=\"array\"\ttype", "type=\"${1:array}\"$0"),
("type=\"binary\"\ttype", "type=\"${1:binary}\"$0"),
("type=\"boolean\"\ttype", "type=\"${1:boolean}\"$0"),
("type=\"creditcard\"\ttype", "type=\"${1:creditcard}\"$0"),
("type=\"date\"\ttype", "type=\"${1:date}\"$0"),
("type=\"time\"\ttype", "type=\"${1:time}\"$0"),
("type=\"email\"\ttype", "type=\"${1:email}\"$0"),
("type=\"eurodate\"\ttype", "type=\"${1:eurodate}\"$0"),
("type=\"float\"\ttype", "type=\"${1:float}\"$0"),
("type=\"numeric\"\ttype", "type=\"${1:numeric}\"$0"),
("type=\"guid\"\ttype", "type=\"${1:guid}\"$0"),
("type=\"integer\"\ttype", "type=\"${1:integer}\"$0"),
("type=\"query\"\ttype", "type=\"${1:query}\"$0"),
("type=\"range\"\ttype", "type=\"${1:range}\"$0"),
("type=\"regex\"\ttype", "type=\"${1:regex}\"$0"),
("type=\"regular_expression\"\ttype", "type=\"${1:regular_expression}\"$0"),
("type=\"ssn\"\ttype", "type=\"${1:ssn}\"$0"),
("type=\"social_security_number\"\ttype", "type=\"${1:social_security_number}\"$0"),
("type=\"string\"\ttype", "type=\"${1:string}\"$0"),
("type=\"struct\"\ttype", "type=\"${1:struct}\"$0"),
("type=\"telephone\"\ttype", "type=\"${1:telephone}\"$0"),
("type=\"url\"\ttype", "type=\"${1:url}\"$0"),
("type=\"uuid\"\ttype", "type=\"${1:uuid}\"$0"),
("type=\"usdate\"\ttype", "type=\"${1:usdate}\"$0"),
("type=\"variablename\"\ttype", "type=\"${1:variablename}\"$0"),
("type=\"xml\"\ttype", "type=\"${1:xml}\"$0"),
("type=\"zipcode\"\ttype", "type=\"${1:zipcode}\"$0"),
("default\t@default", "default=\"$1\"$0"),
("max\t@max", "max=\"$1\"$0"),
("min\t@min", "min=\"$1\"$0"),
("pattern\t@pattern", "pattern=\"$1\"$0"),
("maxlength\t@maxlength", "maxlength=\"$1\"$0")
]
attributes["http"] = [
("url\t@url", "url=\"$1\"$0"),
("port\t@port", "port=\"$1\"$0"),
("method\t@method", "method=\"$1\"$0"),
("method=\"get\"\tmethod", "method=\"${1:get}\"$0"),
("method=\"post\"\tmethod", "method=\"${1:post}\"$0"),
("method=\"put\"\tmethod", "method=\"${1:put}\"$0"),
("method=\"delete\"\tmethod", "method=\"${1:delete}\"$0"),
("method=\"head\"\tmethod", "method=\"${1:head}\"$0"),
("method=\"trace\"\tmethod", "method=\"${1:trace}\"$0"),
("method=\"options\"\tmethod", "method=\"${1:options}\"$0"),
("proxyserver\t@proxyserver", "proxyserver=\"$1\"$0"),
("proxyport\t@proxyport", "proxyport=\"$1\"$0"),
("proxyuser\t@proxyuser", "proxyuser=\"$1\"$0"),
("proxypassword\t@proxypassword", "proxypassword=\"$1\"$0"),
("username\t@username", "username=\"$1\"$0"),
("password\t@password", "password=\"$1\"$0"),
("useragent\t@useragent", "useragent=\"$1\"$0"),
("charset\t@charset", "charset=\"$1\"$0"),
("charset=\"utf-8\"\tcharset", "charset=\"${1:utf-8}\"$0"),
("charset=\"iso-8859-1\"\tcharset", "charset=\"${1:iso-8859-1}\"$0"),
("charset=\"windows-1252\"\tcharset", "charset=\"${1:windows-1252}\"$0"),
("charset=\"us-ascii\"\tcharset", "charset=\"${1:us-ascii}\"$0"),
("charset=\"shift_jis\"\tcharset", "charset=\"${1:shift_jis}\"$0"),
("charset=\"iso-2022-jp\"\tcharset", "charset=\"${1:iso-2022-jp}\"$0"),
("charset=\"euc-jp\"\tcharset", "charset=\"${1:euc-jp}\"$0"),
("charset=\"euc-kr\"\tcharset", "charset=\"${1:euc-kr}\"$0"),
("charset=\"big5\"\tcharset", "charset=\"${1:big5}\"$0"),
("charset=\"euc-cn\"\tcharset", "charset=\"${1:euc-cn}\"$0"),
("charset=\"utf-16\"\tcharset", "charset=\"${1:utf-16}\"$0"),
("resolveurl\t@resolveurl", "resolveurl=\"$1\"$0"),
("resolveurl=\"true\"\tresolveurl", "resolveurl=\"${1:true}\"$0"),
("resolveurl=\"false\"\tresolveurl", "resolveurl=\"${1:false}\"$0"),
("throwonerror\t@throwonerror", "throwonerror=\"$1\"$0"),
("throwonerror=\"true\"\tthrowonerror", "throwonerror=\"${1:true}\"$0"),
("throwonerror=\"false\"\tthrowonerror", "throwonerror=\"${1:false}\"$0"),
("redirect\t@redirect", "redirect=\"$1\"$0"),
("redirect=\"true\"\tredirect", "redirect=\"${1:true}\"$0"),
("redirect=\"false\"\tredirect", "redirect=\"${1:false}\"$0"),
("timeout\t@timeout", "timeout=\"$1\"$0"),
("getasbinary\t@getasbinary", "getasbinary=\"$1\"$0"),
("getasbinary=\"auto\"\tgetasbinary", "getasbinary=\"${1:auto}\"$0"),
("getasbinary=\"yes\"\tgetasbinary", "getasbinary=\"${1:yes}\"$0"),
("getasbinary=\"no\"\tgetasbinary", "getasbinary=\"${1:no}\"$0"),
("getasbinary=\"never\"\tgetasbinary", "getasbinary=\"${1:never}\"$0"),
("result\t@result", "result=\"$1\"$0"),
("delimiter\t@delimiter", "delimiter=\"$1\"$0"),
("delimiter=\",\"\tdelimiter", "delimiter=\"${1:,}\"$0"),
("delimiter=\";\"\tdelimiter", "delimiter=\"${1:;}\"$0"),
("delimiter=\"|\"\tdelimiter", "delimiter=\"${1:|}\"$0"),
("delimiter=\":\"\tdelimiter", "delimiter=\"${1::}\"$0"),
("name\t@name", "name=\"$1\"$0"),
("columns\t@columns", "columns=\"$1\"$0"),
("firstrowasheaders\t@firstrowasheaders", "firstrowasheaders=\"$1\"$0"),
("firstrowasheaders=\"true\"\tfirstrowasheaders", "firstrowasheaders=\"${1:true}\"$0"),
("firstrowasheaders=\"false\"\tfirstrowasheaders", "firstrowasheaders=\"${1:false}\"$0"),
("textqualifier\t@textqualifier", "textqualifier=\"$1\"$0"),
("textqualifier=\"\"\ttextqualifier", "textqualifier=\"${1:}\"$0"),
("textqualifier=\"'\"\ttextqualifier", "textqualifier=\"${1:'}\"$0"),
("file\t@file", "file=\"$1\"$0"),
("multipart\t@multipart", "multipart=\"$1\"$0"),
("multipart=\"true\"\tmultipart", "multipart=\"${1:true}\"$0"),
("multipart=\"false\"\tmultipart", "multipart=\"${1:false}\"$0"),
("clientcertpassword\t@clientcertpassword", "clientcertpassword=\"$1\"$0"),
("path\t@path", "path=\"$1\"$0"),
("clientcert\t@clientcert", "clientcert=\"$1\"$0"),
("compression\t@compression", "compression=\"$1\"$0"),
("multiparttype\t@multiparttype", "multiparttype=\"$1\"$0")
]
attributes["httpparam"] = [
("name\t@name", "name=\"$1\"$0"),
("value\t@value", "value=\"$1\"$0"),
("file\t@file", "file=\"$1\"$0"),
("type\t@type", "type=\"$1\"$0"),
("type=\"header\"\ttype", "type=\"${1:header}\"$0"),
("type=\"body\"\ttype", "type=\"${1:body}\"$0"),
("type=\"xml\"\ttype", "type=\"${1:xml}\"$0"),
("type=\"cgi\"\ttype", "type=\"${1:cgi}\"$0"),
("type=\"file\"\ttype", "type=\"${1:file}\"$0"),
("type=\"url\"\ttype", "type=\"${1:url}\"$0"),
("type=\"formfield\"\ttype", "type=\"${1:formfield}\"$0"),
("type=\"cookie\"\ttype", "type=\"${1:cookie}\"$0"),
("encoded\t@encoded", "encoded=\"$1\"$0"),
("encoded=\"true\"\tencoded", "encoded=\"${1:true}\"$0"),
("encoded=\"false\"\tencoded", "encoded=\"${1:false}\"$0"),
("mimetype\t@mimetype", "mimetype=\"$1\"$0"),
("mimetype=\"text/plain\"\tmimetype", "mimetype=\"${1:text/plain}\"$0"),
("mimetype=\"text/html\"\tmimetype", "mimetype=\"${1:text/html}\"$0")
]
SUBLIME_SETTINGS = sublime.load_settings('Preferences.sublime-settings')
class TagOperatorComplete(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
completions = []
if not view.match_selector(locations[0], "\
source.cfscript -meta -string -text -source.sql, \
source.cfscript.embedded.cfml -meta -string"):
return []
sel = view.sel()[0]
if view.substr(sel.begin() - 1) == ".":
return []
for s in attributes.keys():
completions.extend([(s.split(" ")[0] + "\tTagOp. (cfscript)",s)])
return sorted(completions)
class TagOperatorAttributeComplete(sublime_plugin.EventListener):
valid_scopes_operators = ["meta.operator"]
def on_modified(self, view):
if not SUBLIME_SETTINGS.get("auto_complete"):
return
sel = view.sel()[0].a
if any(s in view.scope_name(sel) for s in self.valid_scopes_operators):
if view.substr(sel - 1) == " ":
t = view.settings().get("auto_complete_delay")
sublime.set_timeout(lambda:
view.run_command("auto_complete", {
'disable_auto_insert': True,
'next_completion_if_showing': False,
'api_completions_only': True}), t)
def on_query_completions(self, view, prefix, locations):
# make sure we're in a tag operator attribute scope and not in an attribute string
if not any(s in view.scope_name(locations[0]) for s in self.valid_scopes_operators) or \
"string" in view.scope_name(locations[0]):
return []
opdata = view.substr(sublime.Region(0, locations[0])).split("\n")
opdata = opdata.pop().split(" ")
opdata = filter (lambda a: a != "", opdata)
if opdata[0] in attributes.keys():
return attributes[opdata[0]]
return []
|
|
#!/Library/Frameworks/Python.framework/Versions/Current/bin/python
#This file isn't in use yet, we have to fix it up to do uploads later
import os
import sys
import random
import optparse
import httplib
import getpass
import base64
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
#Btanner: commenting this out because they now allow me@mydomain.com
# if '@' in user_name:
# user_name = user_name[:user_name.index('@')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path)
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def releaseFile(projectName, baseRepoURL,releaseNumber,releaseType):
trunkDir="trunk/";
tagsDir="tags/versions/";
branchDir="branches/";
releaseDescription=releaseNumber+"-"+releaseType+"-"+projectName;
tagURL=baseRepoURL+tagsDir+releaseDescription;
branchURL=baseRepoURL+branchDir+releaseDescription;
trunkURL=baseRepoURL+trunkDir;
randFileSuffix=random.randint(0,5000);
tmpDirName="tmpExportDir_"+str(randFileSuffix);
#adding code to delete if it's there in case we try and release because of a problem uploading
rmExistingBranchCommand="svn rm "+branchURL+" -m 'removing dupe branch if exists'";
rmExistingTagCommand="svn rm "+tagURL+" -m 'removing dupe tag if exists'";
branchCommand="svn cp "+trunkURL+" "+branchURL+" -m 'Creating a release branch "+projectName+" "+releaseDescription+"'";
tagCommand="svn cp "+branchURL+" "+tagURL+" -m 'Creating a tag for "+projectName+" "+releaseDescription+"'";
mkDirCommand="mkdir "+tmpDirName;
exportCommand="svn export "+tagURL+" "+tmpDirName+"/"+projectName;
tarFileName=projectName+"_"+releaseType+"-"+releaseNumber+".tar";
gzipFileName=tarFileName+".gz";
archiveTarCommand="cd "+tmpDirName+";tar -cf "+tarFileName+" "+projectName+";cd ..";
archiveGZIPCommand="gzip "+tmpDirName+"/"+tarFileName;
gzipFile=tmpDirName+"/"+gzipFileName;
cleanUpCommand="rm -Rf "+tmpDirName;
Commands=[rmExistingBranchCommand, rmExistingTagCommand, branchCommand, tagCommand,mkDirCommand,exportCommand,archiveTarCommand,archiveGZIPCommand];
print("\n-------------------------------\Executing the following :\n");
for c in Commands:
status=os.system(c);
print "Status: "+str(status)+" : "+c;
#256 is what subversion gives if we try to delete something not there, not worth dying over
if(status and status !=256):
print("Something bad happened, aborting!");
sys.exit();
return gzipFile,cleanUpCommand;
def main():
projectName="rl-library";
baseRepoURL="https://rl-library.googlecode.com/svn/";
parser = optparse.OptionParser(usage='makeRelease.py -n NAME -t TYPE');
parser.add_option('-n', '--name', dest='releaseNumber', help='Number of release, something like .1 or 5 for 1.0');
parser.add_option('-t', '--type', dest='releaseType', help='Type of release, either something like ALPHA, BETA, or Official');
parser.add_option('-u', '--username', dest='username', help='Your GoogleCode username that is allowed to upload files');
parser.add_option('-p', '--password', dest='password', help='Your GoogleCode password');
options, args = parser.parse_args()
if not options.releaseNumber:
parser.error('No release number provided. Use the -n option.')
else:
options.releaseNumber=str(options.releaseNumber);
if not options.username:
parser.error('No Username provided. Use the -u option.')
if not options.password:
parser.error('No Password provided. Use the -p option.')
labels=["Type-Archive", "Featured", "OpSys-All"];
gzipFile, cleanUpCommand=releaseFile(projectName,baseRepoURL,options.releaseNumber,options.releaseType);
summary="This is the archived version of "+projectName+" release "+options.releaseType+" "+options.releaseNumber;
status, reason, url = upload(gzipFile, projectName,options.username, options.password,summary, labels);
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
status=os.system(cleanUpCommand);
print "Status: "+str(status)+" : "+cleanUpCommand;
if(~status):
print("Temporary files all cleaned up... \n\n Code Released Successfully.");
else:
print("Problem cleaning up the file.")
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
if __name__ == '__main__':
main()
|
|
from __future__ import division
import abc
import warnings
import numpy as np
import six
np.seterr('warn')
from scipy.special import gamma as scipy_gamma
from scipy.special import gammaln as scipy_gammaln
from astropy.modeling.fitting import _fitter_to_model_params
from astropy.modeling import models
from stingray import Lightcurve, Powerspectrum
# TODO: Add checks and balances to code
#from stingray.modeling.parametricmodels import logmin
__all__ = ["set_logprior", "Posterior", "PSDPosterior", "LogLikelihood",
"PSDLogLikelihood", "GaussianLogLikelihood", "LaplaceLogLikelihood",
"PoissonPosterior", "GaussianPosterior", "LaplacePosterior",
"PriorUndefinedError", "LikelihoodUndefinedError"]
logmin = -10000000000000000.0
class PriorUndefinedError(Exception):
pass
class LikelihoodUndefinedError(Exception):
pass
class IncorrectParameterError(Exception):
pass
def set_logprior(lpost, priors):
"""
This function constructs the `logprior` method required to successfully
use a `Posterior` object.
All instances of lass `Posterior` and its subclasses require to implement a
`logprior` methods. However, priors are strongly problem-dependent and
therefore usually user-defined.
This function allows for setting the `logprior` method on any instance
of class `Posterior` efficiently by allowing the user to pass a
dictionary of priors and an instance of class `Posterior`.
Parameters
----------
lpost : Posterior object
An instance of class Posterior or any of its subclasses
priors : dictionary
A dictionary containing the prior definitions. Keys are parameter
names as defined by the model used in `lpost`. Items are functions
that take a parameter as input and return the log-prior probability
of that parameter.
Returns
-------
logprior : function
The function definition for the prior
Example
-------
Make a light curve and power spectrum
>>> photon_arrivals = np.sort(np.random.uniform(0,1000, size=10000))
>>> lc = Lightcurve.make_lightcurve(photon_arrivals, dt=1.0)
>>> ps = Powerspectrum(lc, norm="frac")
Define the model
>>> pl = models.PowerLaw1D()
>>> pl.x_0.fixed = True
Instantiate the posterior:
>>> lpost = PSDPosterior(ps.freq, ps.power, pl, m=ps.m)
Define the priors:
>>> p_alpha = lambda alpha: ((-1. <= alpha) & (alpha <= 5.))
>>> p_amplitude = lambda amplitude: ((-10 <= np.log(amplitude)) &
... ((np.log(amplitude) <= 10.0)))
>>> priors = {"alpha":p_alpha, "amplitude":p_amplitude}
Set the logprior method in the lpost object:
>>> lpost.logprior = set_logprior(lpost, priors)
"""
# get the number of free parameters in the model
#free_params = [p for p in lpost.model.param_names if not
# getattr(lpost.model, p).fixed]
free_params = [key for key, l in lpost.model.fixed.items() if not l]
# define the logprior
def logprior(t0, neg=False):
"""
The logarithm of the prior distribution for the
model defined in self.model.
Parameters:
------------
t0: {list | numpy.ndarray}
The list with parameters for the model
Returns:
--------
logp: float
The logarithm of the prior distribution for the model and
parameters given.
"""
if len(t0) != len(free_params):
raise IncorrectParameterError("The number of parameters passed into "
"the prior does not match the number "
"of parameters in the model.")
logp = 0.0 # initialize log-prior
ii = 0 # counter for the variable parameter
# loop through all parameter names, but only compute
# prior for those that are not fixed
# Note: need to do it this way to preserve order of parameters
# correctly!
for pname in lpost.model.param_names:
if not lpost.model.fixed[pname]:
with warnings.catch_warnings(record=True) as out:
logp += np.log(priors[pname](t0[ii]))
if len(out) > 0:
if isinstance(out[0].message, RuntimeWarning):
logp = np.nan
ii += 1
if not np.isfinite(logp):
logp = logmin
if neg:
return -logp
else:
return logp
return logprior
@six.add_metaclass(abc.ABCMeta)
class LogLikelihood(object):
def __init__(self, x, y, model, **kwargs):
"""
x : iterable
x-coordinate of the data. Could be multi-dimensional.
y : iterable
y-coordinate of the data. Could be multi-dimensional.
model : probably astropy.modeling.FittableModel instance
Your model
kwargs :
keyword arguments specific to the individual sub-classes. For
details, see the respective docstrings for each subclass
"""
self.x = x
self.y = y
self.model = model
@abc.abstractmethod
def evaluate(self, parameters):
"""
This is where you define your log-likelihood. Do this!
"""
pass
def __call__(self, parameters, neg=False):
return self.evaluate(parameters, neg)
class GaussianLogLikelihood(LogLikelihood):
def __init__(self, x, y, yerr, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
x-coordinate of the data
y : iterable
y-coordinte of the data
yerr: iterable
the uncertainty on the data, as standard deviation
model: an Astropy Model instance
The model to use in the likelihood.
"""
self.x = x
self.y = y
self.yerr = yerr
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
loglike = np.sum(-0.5*np.log(2.*np.pi) - np.log(self.yerr) -
(self.y-mean_model)**2/(2.*self.yerr**2))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class PoissonLogLikelihood(LogLikelihood):
def __init__(self, x, y, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
x-coordinate of the data
y : iterable
y-coordinte of the data
model: an Astropy Model instance
The model to use in the likelihood.
"""
self.x = x
self.y = y
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
loglike = np.sum(-mean_model + self.y*np.log(mean_model) \
- scipy_gammaln(self.y + 1.))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class PSDLogLikelihood(LogLikelihood):
def __init__(self, freq, power, model, m=1):
"""
A Gaussian likelihood.
Parameters
----------
freq: iterable
Array with frequencies
power: iterable
Array with (averaged/singular) powers corresponding to the
frequencies in `freq`
model: an Astropy Model instance
The model to use in the likelihood.
m : int
1/2 of the degrees of freedom
"""
LogLikelihood.__init__(self, freq, power, model)
self.m = m
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname] and not self.model.tied[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
with warnings.catch_warnings(record=True) as out:
if self.m == 1:
loglike = -np.sum(np.log(mean_model)) - \
np.sum(self.y/mean_model)
else:
loglike = -2.0*self.m*(np.sum(np.log(mean_model)) +
np.sum(self.y/mean_model) +
np.sum((2.0 / (2. * self.m) - 1.0) *
np.log(self.y)))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class LaplaceLogLikelihood(LogLikelihood):
def __init__(self, x, y, yerr, model):
"""
A Gaussian likelihood.
Parameters
----------
x : iterable
Array with independent variable
y : iterable
Array with dependent variable
model : an Astropy Model instance
The model to use in the likelihood.
yerr : iterable
Array with the uncertainties on `y`, in standard deviation
"""
LogLikelihood.__init__(self, x, y, model)
self.yerr = yerr
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname] and not self.model.tied[pname]:
self.npar += 1
def evaluate(self, pars, neg=False):
if np.size(pars) != self.npar:
raise IncorrectParameterError("Input parameters must" +
" match model parameters!")
_fitter_to_model_params(self.model, pars)
mean_model = self.model(self.x)
with warnings.catch_warnings(record=True) as out:
loglike = np.sum(-np.log(2.*self.yerr) - \
(np.abs(self.y - mean_model)/self.yerr))
if not np.isfinite(loglike):
loglike = logmin
if neg:
return -loglike
else:
return loglike
class Posterior(object):
def __init__(self, x, y, model, **kwargs):
"""
Define a posterior object.
The posterior describes the Bayesian probability distribution of
a set of parameters $\theta$ given some observed data $D$ and
some prior assumptions $I$.
It is defined as
$p(\theta | D, I) = p(D | \theta, I) p(\theta | I)/p(D| I)
where $p(D | \theta, I)$ describes the likelihood, i.e. the
sampling distribution of the data and the (parametric) model, and
$p(\theta | I)$ describes the prior distribution, i.e. our information
about the parameters $\theta$ before we gathered the data.
The marginal likelihood $p(D| I)$ describes the probability of
observing the data given the model assumptions, integrated over the
space of all parameters.
Parameters
----------
x : iterable
The abscissa or independent variable of the data. This could
in principle be a multi-dimensional array.
y : iterable
The ordinate or dependent variable of the data.
model: astropy.modeling.models class instance
The parametric model supposed to represent the data. For details
see the astropy.modeling documentation
kwargs :
keyword arguments related to the subclases of `Posterior`. For
details, see the documentation of the individual subclasses
References
----------
* Sivia, D. S., and J. Skilling. "Data Analysis:
A Bayesian Tutorial. 2006."
* Gelman, Andrew, et al. Bayesian data analysis. Vol. 2. Boca Raton,
FL, USA: Chapman & Hall/CRC, 2014.
* von Toussaint, Udo. "Bayesian inference in physics."
Reviews of Modern Physics 83.3 (2011): 943.
* Hogg, David W. "Probability Calculus for inference".
arxiv: 1205.4446
"""
self.x = x
self.y = y
self.model = model
self.npar = 0
for pname in self.model.param_names:
if not self.model.fixed[pname]:
self.npar += 1
def logposterior(self, t0, neg=False):
if not hasattr(self, "logprior"):
raise PriorUndefinedError("There is no prior implemented. " +
"Cannot calculate posterior!")
if not hasattr(self, "loglikelihood"):
raise LikelihoodUndefinedError("There is no likelihood implemented. " +
"Cannot calculate posterior!")
lpost = self.loglikelihood(t0) + self.logprior(t0)
if neg is True:
return -lpost
else:
return lpost
def __call__(self, t0, neg=False):
return self.logposterior(t0, neg=neg)
class PSDPosterior(Posterior):
def __init__(self, freq, power, model, priors=None, m=1):
"""
Posterior distribution for power spectra.
Uses an exponential distribution for the errors in the likelihood,
or a $\chi^2$ distribution with $2M$ degrees of freedom, where $M$ is
the number of frequency bins or power spectra averaged in each bin.
Parameters
----------
ps: {Powerspectrum | AveragedPowerspectrum} instance
the Powerspectrum object containing the data
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
priors : dict of form {"parameter name": function}, optional
A dictionary with the definitions for the prior probabilities.
For each parameter in `model`, there must be a prior defined with
a key of the exact same name as stored in `model.param_names`.
The item for each key is a function definition defining the prior
(e.g. a lambda function or a `scipy.stats.distribution.pdf`.
If `priors = None`, then no prior is set. This means priors need
to be added by hand using the `set_logprior` function defined in
this module. Note that it is impossible to call the posterior object
itself or the `self.logposterior` method without defining a prior.
m: int, default 1
The number of averaged periodograms or frequency bins in ps.
Useful for binned/averaged periodograms, since the value of
m will change the likelihood function!
Attributes
----------
ps: {Powerspectrum | AveragedPowerspectrum} instance
the Powerspectrum object containing the data
x: numpy.ndarray
The independent variable (list of frequencies) stored in ps.freq
y: numpy.ndarray
The dependent variable (list of powers) stored in ps.power
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = PSDLogLikelihood(freq, power,
model, m=m)
self.m = m
Posterior.__init__(self, freq, power, model)
if not priors is None:
self.logprior = set_logprior(self, priors)
class PoissonPosterior(Posterior):
def __init__(self, x, y, model, priors=None):
"""
Posterior for Poisson lightcurve data. Primary intended use is for
modelling X-ray light curves, but alternative uses are conceivable.
TODO: Include astropy.modeling models
Parameters
----------
x : numpy.ndarray
The independent variable (e.g. time stamps of a light curve)
y : numpy.ndarray
The dependent variable (e.g. counts per bin of a light curve)
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
priors : dict of form {"parameter name": function}, optional
A dictionary with the definitions for the prior probabilities.
For each parameter in `model`, there must be a prior defined with
a key of the exact same name as stored in `model.param_names`.
The item for each key is a function definition defining the prior
(e.g. a lambda function or a `scipy.stats.distribution.pdf`.
If `priors = None`, then no prior is set. This means priors need
to be added by hand using the `set_logprior` function defined in
this module. Note that it is impossible to call the posterior object
itself or the `self.logposterior` method without defining a prior.
Attributes
----------
x: numpy.ndarray
The independent variable (list of frequencies) stored in ps.freq
y: numpy.ndarray
The dependent variable (list of powers) stored in ps.power
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.x = x
self.y = y
self.loglikelihood = PoissonLogLikelihood(self.x, self.y, model)
Posterior.__init__(self, self.x, self.y, model)
if not priors is None:
self.logprior = set_logprior(self, priors)
class GaussianPosterior(Posterior):
def __init__(self, x, y, yerr, model, priors=None):
"""
A general class for two-dimensional data following a Gaussian
sampling distribution.
Parameters
----------
x: numpy.ndarray
independent variable
y: numpy.ndarray
dependent variable
yerr: numpy.ndarray
measurement uncertainties for y
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = GaussianLogLikelihood(x, y, yerr, model)
Posterior.__init__(self, x, y, model)
self.yerr = yerr
if not priors is None:
self.logprior = set_logprior(self, priors)
class LaplacePosterior(Posterior):
def __init__(self, x, y, yerr, model, priors=None):
"""
A general class for two-dimensional data following a Gaussian
sampling distribution.
Parameters
----------
x: numpy.ndarray
independent variable
y: numpy.ndarray
dependent variable
yerr: numpy.ndarray
measurement uncertainties for y, in standard deviation
model: instance of any subclass of parameterclass.ParametricModel
The model for the power spectrum. Note that in order to define
the posterior properly, the ParametricModel subclass must be
instantiated with the hyperpars parameter set, or there won't
be a prior to be calculated! If all this object is used
for a maximum likelihood-style analysis, no prior is required.
"""
self.loglikelihood = LaplaceLogLikelihood(x, y, yerr, model)
Posterior.__init__(self, x, y, model)
self.yerr = yerr
if not priors is None:
self.logprior = set_logprior(self, priors)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
import copy
import functools
import numpy as np
from federatedml.feature.instance import Instance
from federatedml.feature.sparse_vector import SparseVector
from federatedml.model_base import ModelBase
from federatedml.protobuf.generated.data_io_meta_pb2 import DataIOMeta
from federatedml.protobuf.generated.data_io_meta_pb2 import ImputerMeta
from federatedml.protobuf.generated.data_io_meta_pb2 import OutlierMeta
from federatedml.protobuf.generated.data_io_param_pb2 import DataIOParam
from federatedml.protobuf.generated.data_io_param_pb2 import ImputerParam
from federatedml.protobuf.generated.data_io_param_pb2 import OutlierParam
from federatedml.statistic import data_overview
from federatedml.util import abnormal_detection
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
# =============================================================================
# DenseFeatureReader
# =============================================================================
class DenseFeatureReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.exclusive_data_type = data_io_param.exclusive_data_type
self.missing_fill = data_io_param.missing_fill
self.default_value = data_io_param.default_value
self.missing_fill_method = data_io_param.missing_fill_method
self.missing_impute = data_io_param.missing_impute
self.outlier_replace = data_io_param.outlier_replace
self.outlier_replace_method = data_io_param.outlier_replace_method
self.outlier_impute = data_io_param.outlier_impute
self.outlier_replace_value = data_io_param.outlier_replace_value
self.with_label = data_io_param.with_label
self.label_name = data_io_param.label_name if self.with_label else None
self.label_type = data_io_param.label_type if self.with_label else None
self.output_format = data_io_param.output_format
self.missing_impute_rate = None
self.outlier_replace_rate = None
self.label_idx = None
self.header = None
self.sid_name = None
self.exclusive_data_type_fid_map = {}
def generate_header(self, input_data, mode="fit"):
header = input_data.schema["header"]
sid_name = input_data.schema["sid"]
LOGGER.debug("header is {}".format(header))
LOGGER.debug("sid_name is {}".format(sid_name))
if not header and not sid_name:
raise ValueError("dense input-format should have header schema")
header_gen = None
if self.with_label:
if mode == "fit":
if not header:
raise ValueError("dense input-format for fit stage should not be None if with_label is true")
self.label_idx = header.split(self.delimitor, -1).index(self.label_name)
header_gen = header.split(self.delimitor, -1)[: self.label_idx] + \
header.split(self.delimitor, -1)[self.label_idx + 1:] or None
elif header:
header_list = header.split(self.delimitor, -1)
if self.label_name in header_list:
self.label_idx = header_list.index(self.label_name)
header_gen = header.split(self.delimitor, -1)[: self.label_idx] + \
header.split(self.delimitor, -1)[self.label_idx + 1:] or None
else:
self.label_idx = None
header_gen = header.split(self.delimitor, -1)
elif header:
header_gen = header.split(self.delimitor, -1)
self.header = header_gen
self.sid_name = sid_name
if header_gen:
for i in range(len(header_gen)):
col_name = header_gen[i]
if self.exclusive_data_type is not None and col_name in self.exclusive_data_type:
self.exclusive_data_type_fid_map[i] = self.exclusive_data_type[col_name]
def get_schema(self):
schema = make_schema(self.header, self.sid_name, self.label_name)
return schema
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read dense data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
input_data_labels = None
fit_header = None
if mode == "transform":
fit_header = self.header
self.generate_header(input_data, mode=mode)
if self.label_idx is not None:
data_shape = data_overview.get_data_shape(input_data)
if not data_shape or self.label_idx >= data_shape:
raise ValueError("input data's value is empty, it does not contain a label")
input_data_features = input_data.mapValues(
lambda value: [] if data_shape == 1 else value.split(self.delimitor, -1)[:self.label_idx] + value.split(
self.delimitor, -1)[self.label_idx + 1:])
input_data_labels = input_data.mapValues(lambda value: value.split(self.delimitor, -1)[self.label_idx])
else:
input_data_features = input_data.mapValues(
lambda value: [] if not self.header else value.split(self.delimitor, -1))
if mode == "fit":
data_instance = self.fit(input_data, input_data_features, input_data_labels)
else:
data_instance = self.transform(input_data_features, input_data_labels)
# data_instance = ModelBase.align_data_header(data_instance, fit_header)
data_instance = data_overview.header_alignment(data_instance, fit_header)
return data_instance
def fit(self, input_data, input_data_features, input_data_labels):
schema = self.get_schema()
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "fit")
input_data_features = self.replace_outlier_value(input_data_features, "fit")
data_instance = self.gen_data_instance(input_data_features, input_data_labels)
set_schema(data_instance, schema)
return data_instance
@assert_io_num_rows_equal
def transform(self, input_data_features, input_data_labels):
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data_features, schema)
input_data_features = self.fill_missing_value(input_data_features, "transform")
input_data_features = self.replace_outlier_value(input_data_features, "transform")
data_instance = self.gen_data_instance(input_data_features, input_data_labels)
set_schema(data_instance, schema)
return data_instance
def fill_missing_value(self, input_data_features, mode="fit"):
if self.missing_fill:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.missing_impute)
if mode == "fit":
input_data_features, self.default_value = imputer_processor.fit(input_data_features,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def replace_outlier_value(self, input_data_features, mode="fit"):
if self.outlier_replace:
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer(self.outlier_impute)
if mode == "fit":
input_data_features, self.outlier_replace_value = \
imputer_processor.fit(input_data_features,
replace_method=self.outlier_replace_method,
replace_value=self.outlier_replace_value)
if self.outlier_impute is None:
self.outlier_impute = imputer_processor.get_missing_value_list()
else:
input_data_features = imputer_processor.transform(input_data_features,
transform_value=self.outlier_replace_value)
self.outlier_replace_rate = imputer_processor.get_impute_rate(mode)
return input_data_features
def gen_data_instance(self, input_data_features, input_data_labels):
if self.label_idx is not None:
data_instance = input_data_features.join(input_data_labels,
lambda features, label:
self.to_instance(features, label))
else:
data_instance = input_data_features.mapValues(lambda features: self.to_instance(features))
return data_instance
def to_instance(self, features, label=None):
if self.header is None and len(features) != 0:
raise ValueError("features shape {} not equal to header shape 0".format(len(features)))
elif self.header is not None and len(self.header) != len(features):
raise ValueError("features shape {} not equal to header shape {}".format(len(features), len(self.header)))
if self.label_idx is not None:
if self.label_type == 'int':
label = int(label)
elif self.label_type in ["float", "float64"]:
label = float(label)
format_features = DenseFeatureReader.gen_output_format(features, self.data_type, self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
else:
format_features = DenseFeatureReader.gen_output_format(features, self.data_type, self.exclusive_data_type_fid_map,
self.output_format,
missing_impute=self.missing_impute)
return Instance(inst_id=None,
features=format_features,
label=label)
@staticmethod
def gen_output_format(features, data_type='float', exclusive_data_type_fid_map=None,
output_format='dense', missing_impute=None):
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
if output_format == "dense":
format_features = copy.deepcopy(features)
if data_type in ["int", "int64", "long", "float", "float64", "double"]:
for i in range(len(features)):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
format_features[i] = np.nan
if exclusive_data_type_fid_map:
for fid in range(len(features)):
if fid in exclusive_data_type_fid_map:
dtype = exclusive_data_type_fid_map[fid]
else:
dtype = data_type
format_features[fid] = getattr(np, dtype)(features[fid])
return np.asarray(format_features, dtype=object)
else:
return np.asarray(format_features, dtype=data_type)
indices = []
data = []
column_shape = len(features)
non_zero = 0
for i in range(column_shape):
if (missing_impute is not None and features[i] in missing_impute) or \
(missing_impute is None and features[i] in ['', 'NULL', 'null', "NA"]):
indices.append(i)
data.append(np.nan)
non_zero += 1
elif data_type in ['float', 'float64', "double"]:
if np.fabs(float(features[i])) < consts.FLOAT_ZERO:
continue
indices.append(i)
data.append(float(features[i]))
non_zero += 1
elif data_type in ['int', "int64", "long"]:
if int(features[i]) == 0:
continue
indices.append(i)
data.append(int(features[i]))
else:
indices.append(i)
data.append(features[i])
return SparseVector(indices, data, column_shape)
def get_summary(self):
if not self.missing_fill and not self.outlier_replace:
return {}
summary_buf = {}
if self.missing_fill:
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf["missing_fill_info"] = missing_summary
if self.outlier_replace:
outlier_replace_summary = dict()
outlier_replace_summary["outlier_value"] = list(self.outlier_impute)
outlier_replace_summary["outlier_replace_value"] = dict(zip(self.header, self.outlier_replace_value))
outlier_replace_summary["outlier_replace_rate"] = dict(zip(self.header, self.outlier_replace_rate))
summary_buf["outlier_replace_rate"] = outlier_replace_summary
return summary_buf
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="dense",
delimitor=self.delimitor,
data_type=self.data_type,
exclusive_data_type=self.exclusive_data_type,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="DenseFeatureReader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(self.outlier_replace,
self.outlier_replace_method,
self.outlier_impute,
self.outlier_replace_value,
self.outlier_replace_rate,
self.header,
"Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, self.exclusive_data_type, _1, _2, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = \
load_data_io_model("DenseFeatureReader", model_meta, model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
self.outlier_replace, self.outlier_replace_method, \
self.outlier_impute, self.outlier_replace_value = load_outlier_model(self.header,
"Outlier",
model_meta.outlier_meta,
model_param.outlier_param)
# =============================================================================
# SparseFeatureReader: mainly for libsvm input format
# =============================================================================
class SparseFeatureReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.label_type = data_io_param.label_type
self.output_format = data_io_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = data_io_param.label_name
def get_max_feature_index(self, line, delimitor=' '):
if line.strip() == '':
raise ValueError("find an empty line, please check!!!")
cols = line.split(delimitor, -1)
if len(cols) <= 1:
return -1
return max([int(fid_value.split(":", -1)[0]) for fid_value in cols[1:]])
def generate_header(self, max_feature):
self.header = [str(i) for i in range(max_feature + 1)]
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if not data_overview.get_data_shape(input_data):
raise ValueError("input data's value is empty, it does not contain a label")
if mode == "fit":
data_instance = self.fit(input_data)
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(data_instance, schema)
return data_instance
def fit(self, input_data):
get_max_fid = functools.partial(self.get_max_feature_index, delimitor=self.delimitor)
max_feature = input_data.mapValues(get_max_fid).reduce(lambda max_fid1, max_fid2: max(max_fid1, max_fid2))
if max_feature == -1:
raise ValueError("no feature value in input data, please check!")
self.generate_header(max_feature)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def transform(self, input_data):
max_feature = len(self.header)
data_instance = self.gen_data_instance(input_data, max_feature)
return data_instance
def gen_data_instance(self, input_data, max_feature):
params = [self.delimitor, self.data_type,
self.label_type,
self.output_format, max_feature]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
label_type = param_list[2]
output_format = param_list[3]
max_fid = param_list[4]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
label = cols[0]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
fid_value = []
for i in range(1, len(cols)):
fid, val = cols[i].split(":", -1)
fid = int(fid)
if data_type in ["float", "float64"]:
val = float(val)
elif data_type in ["int", "int64"]:
val = int(val)
fid_value.append((fid, val))
if output_format == "dense":
features = [0 for i in range(max_fid + 1)]
for fid, val in fid_value:
features[fid] = val
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fid, val in fid_value:
indices.append(fid)
data.append(val)
features = SparseVector(indices, data, max_fid + 1)
return Instance(inst_id=None,
features=features,
label=label)
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="sparse",
delimitor=self.delimitor,
data_type=self.data_type,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="SparseFeatureReader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(missing_fill=False,
model_name="Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, _1, _2, _3, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = load_data_io_model(
"SparseFeatureReader",
model_meta,
model_param)
# =============================================================================
# SparseTagReader: mainly for tag data
# =============================================================================
class SparseTagReader(object):
def __init__(self, data_io_param):
self.delimitor = data_io_param.delimitor
self.data_type = data_io_param.data_type
self.tag_with_value = data_io_param.tag_with_value
self.tag_value_delimitor = data_io_param.tag_value_delimitor
self.with_label = data_io_param.with_label
self.label_type = data_io_param.label_type if self.with_label else None
self.output_format = data_io_param.output_format
self.header = None
self.sid_name = "sid"
self.label_name = data_io_param.label_name if self.with_label else None
self.missing_fill = data_io_param.missing_fill
self.missing_fill_method = data_io_param.missing_fill_method
self.default_value = data_io_param.default_value
self.missing_impute_rate = None
self.missing_impute = None
@staticmethod
def agg_tag(kvs, delimitor=' ', with_label=True, tag_with_value=False, tag_value_delimitor=":"):
tags_set = set()
for key, value in kvs:
if with_label:
cols = value.split(delimitor, -1)[1:]
else:
cols = value.split(delimitor, -1)[0:]
if tag_with_value is False:
tags = cols
else:
tags = [fea_value.split(tag_value_delimitor, -1)[0] for fea_value in cols]
tags_set |= set(tags)
return tags_set
def generate_header(self, tags):
self.header = tags
def read_data(self, input_data, mode="fit"):
LOGGER.info("start to read sparse data and change data to instance")
abnormal_detection.empty_table_detection(input_data)
if mode == "fit":
data_instance = self.fit(input_data)
if self.with_label:
self.label_name = "label"
else:
data_instance = self.transform(input_data)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(data_instance, schema)
return data_instance
@staticmethod
def change_tag_to_str(value, tags_dict=None, delimitor=",", with_label=False, tag_value_delimitor=":"):
vals = value.split(delimitor, -1)
ret = [''] * len(tags_dict)
if with_label:
vals = vals[1:]
for i in range(len(vals)):
tag, value = vals[i].split(tag_value_delimitor, -1)
idx = tags_dict.get(tag, None)
if idx is not None:
ret[idx] = value
return ret
@staticmethod
def change_str_to_tag(value, tags_dict=None, delimitor=",", tag_value_delimitor=":"):
ret = [None] * len(tags_dict)
tags = sorted(list(tags_dict.keys()))
for i in range(len(value)):
tag, val = tags[i], value[i]
ret[i] = tag_value_delimitor.join([tag, val])
return delimitor.join(ret)
def fill_missing_value(self, input_data, tags_dict, mode="fit"):
str_trans_method = functools.partial(self.change_tag_to_str,
tags_dict=tags_dict,
delimitor=self.delimitor,
with_label=self.with_label,
tag_value_delimitor=self.tag_value_delimitor)
input_data = input_data.mapValues(str_trans_method)
schema = make_schema(self.header, self.sid_name, self.label_name)
set_schema(input_data, schema)
from federatedml.feature.imputer import Imputer
imputer_processor = Imputer()
if mode == "fit":
data, self.default_value = imputer_processor.fit(input_data,
replace_method=self.missing_fill_method,
replace_value=self.default_value)
LOGGER.debug("self.default_value is {}".format(self.default_value))
else:
data = imputer_processor.transform(input_data,
transform_value=self.default_value)
if self.missing_impute is None:
self.missing_impute = imputer_processor.get_missing_value_list()
LOGGER.debug("self.missing_impute is {}".format(self.missing_impute))
self.missing_impute_rate = imputer_processor.get_impute_rate(mode)
str_trans_tag_method = functools.partial(self.change_str_to_tag,
tags_dict=tags_dict,
delimitor=self.delimitor,
tag_value_delimitor=self.tag_value_delimitor)
data = data.mapValues(str_trans_tag_method)
return data
def fit(self, input_data):
tag_aggregator = functools.partial(SparseTagReader.agg_tag,
delimitor=self.delimitor,
with_label=self.with_label,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor)
tags_set_list = list(input_data.applyPartitions(tag_aggregator).collect())
tags_set = set()
for _, _tags_set in tags_set_list:
tags_set |= _tags_set
tags = list(tags_set)
tags = sorted(tags)
tags_dict = dict(zip(tags, range(len(tags))))
self.generate_header(tags)
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="fit")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def transform(self, input_data):
tags_dict = dict(zip(self.header, range(len(self.header))))
if self.tag_with_value and self.missing_fill:
input_data = self.fill_missing_value(input_data, tags_dict, mode="transform")
data_instance = self.gen_data_instance(input_data, tags_dict)
return data_instance
def gen_data_instance(self, input_data, tags_dict):
params = [self.delimitor,
self.data_type,
self.tag_with_value,
self.tag_value_delimitor,
self.with_label,
self.label_type,
self.output_format,
tags_dict]
to_instance_with_param = functools.partial(self.to_instance, params)
data_instance = input_data.mapValues(to_instance_with_param)
return data_instance
def get_summary(self):
if not self.missing_fill:
return {}
missing_summary = dict()
missing_summary["missing_value"] = list(self.missing_impute)
missing_summary["missing_impute_value"] = dict(zip(self.header, self.default_value))
missing_summary["missing_impute_rate"] = dict(zip(self.header, self.missing_impute_rate))
summary_buf = {"missing_fill_info": missing_summary}
return summary_buf
@staticmethod
def to_instance(param_list, value):
delimitor = param_list[0]
data_type = param_list[1]
tag_with_value = param_list[2]
tag_value_delimitor = param_list[3]
with_label = param_list[4]
label_type = param_list[5]
output_format = param_list[6]
tags_dict = param_list[7]
if output_format not in ["dense", "sparse"]:
raise ValueError("output format {} is not define".format(output_format))
cols = value.split(delimitor, -1)
start_pos = 0
label = None
if with_label:
start_pos = 1
label = cols[0]
if label_type == 'int':
label = int(label)
elif label_type in ["float", "float64"]:
label = float(label)
if output_format == "dense":
features = [0 for i in range(len(tags_dict))]
for fea in cols[start_pos:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
if _tag in tags_dict:
features[tags_dict.get(_tag)] = _val
else:
if fea in tags_dict:
features[tags_dict.get(fea)] = 1
features = np.asarray(features, dtype=data_type)
else:
indices = []
data = []
for fea in cols[start_pos:]:
if tag_with_value:
_tag, _val = fea.split(tag_value_delimitor, -1)
else:
_tag = fea
_val = 1
if _tag not in tags_dict:
continue
indices.append(tags_dict.get(_tag))
if data_type in ["float", "float64"]:
_val = float(_val)
elif data_type in ["int", "int64", "long"]:
_val = int(_val)
elif data_type == "str":
_val = str(_val)
data.append(_val)
features = SparseVector(indices, data, len(tags_dict))
return Instance(inst_id=None,
features=features,
label=label)
def save_model(self):
dataio_meta, dataio_param = save_data_io_model(input_format="tag",
delimitor=self.delimitor,
data_type=self.data_type,
tag_with_value=self.tag_with_value,
tag_value_delimitor=self.tag_value_delimitor,
with_label=self.with_label,
label_type=self.label_type,
output_format=self.output_format,
header=self.header,
sid_name=self.sid_name,
label_name=self.label_name,
model_name="Reader")
missing_imputer_meta, missing_imputer_param = save_missing_imputer_model(self.missing_fill,
self.missing_fill_method,
self.missing_impute,
self.default_value,
self.missing_impute_rate,
self.header,
"Imputer")
dataio_meta.imputer_meta.CopyFrom(missing_imputer_meta)
dataio_param.imputer_param.CopyFrom(missing_imputer_param)
outlier_meta, outlier_param = save_outlier_model(outlier_replace=False,
model_name="Outlier")
dataio_meta.outlier_meta.CopyFrom(outlier_meta)
dataio_param.outlier_param.CopyFrom(outlier_param)
return {"DataIOMeta": dataio_meta,
"DataIOParam": dataio_param
}
def load_model(self, model_meta, model_param):
self.delimitor, self.data_type, _0, self.tag_with_value, self.tag_value_delimitor, self.with_label, \
self.label_type, self.output_format, self.header, self.sid_name, self.label_name = load_data_io_model(
"SparseTagReader",
model_meta,
model_param)
self.missing_fill, self.missing_fill_method, \
self.missing_impute, self.default_value = load_missing_imputer_model(self.header,
"Imputer",
model_meta.imputer_meta,
model_param.imputer_param)
class DataIO(ModelBase):
def __init__(self):
super(DataIO, self).__init__()
self.reader = None
from federatedml.param.dataio_param import DataIOParam
self.model_param = DataIOParam()
def _init_model(self, model_param):
LOGGER.warning('DataIO is deprecated, and will be removed in 1.7, use DataTransform module instead')
if model_param.input_format == "dense":
self.reader = DenseFeatureReader(self.model_param)
elif model_param.input_format == "sparse":
self.reader = SparseFeatureReader(self.model_param)
elif model_param.input_format == "tag":
self.reader = SparseTagReader(self.model_param)
self.model_param = model_param
def load_model(self, model_dict):
input_model_param = None
input_model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
input_model_meta = value[model]
if model.endswith("Param"):
input_model_param = value[model]
if input_model_meta.input_format == "dense":
self.reader = DenseFeatureReader(self.model_param)
elif input_model_meta.input_format == "sparse":
self.reader = SparseFeatureReader(self.model_param)
elif input_model_meta.input_format == "tag":
self.reader = SparseTagReader(self.model_param)
self.reader.load_model(input_model_meta, input_model_param)
def fit(self, data_inst):
data_inst = self.reader.read_data(data_inst, "fit")
if isinstance(self.reader, (DenseFeatureReader, SparseTagReader)):
summary_buf = self.reader.get_summary()
if summary_buf:
self.set_summary(summary_buf)
return data_inst
def transform(self, data_inst):
return self.reader.read_data(data_inst, "transform")
def export_model(self):
model_dict = self.reader.save_model()
model_dict["DataIOMeta"].need_run = self.need_run
return model_dict
def make_schema(header=None, sid_name=None, label_name=None):
schema = {}
if header:
schema["header"] = header
if sid_name:
schema["sid_name"] = sid_name
if label_name:
schema["label_name"] = label_name
ModelBase.check_schema_content(schema)
return schema
def set_schema(data_instance, schema):
data_instance.schema = schema
def save_data_io_model(input_format="dense",
delimitor=",",
data_type="str",
exclusive_data_type=None,
tag_with_value=False,
tag_value_delimitor=":",
with_label=False,
label_name='',
label_type="int",
output_format="dense",
header=None,
sid_name=None,
model_name="DataIO"):
model_meta = DataIOMeta()
model_param = DataIOParam()
model_meta.input_format = input_format
model_meta.delimitor = delimitor
model_meta.data_type = data_type
model_meta.tag_with_value = tag_with_value
model_meta.tag_value_delimitor = tag_value_delimitor
model_meta.with_label = with_label
if with_label:
model_meta.label_name = label_name
model_meta.label_type = label_type
model_meta.output_format = output_format
if header is not None:
model_param.header.extend(header)
if sid_name:
model_param.sid_name = sid_name
if label_name:
model_param.label_name = label_name
if exclusive_data_type is not None:
model_meta.exclusive_data_type.update(exclusive_data_type)
return model_meta, model_param
def load_data_io_model(model_name="DataIO",
model_meta=None,
model_param=None):
delimitor = model_meta.delimitor
data_type = model_meta.data_type
tag_with_value = model_meta.tag_with_value
tag_value_delimitor = model_meta.tag_value_delimitor
with_label = model_meta.with_label
label_name = model_meta.label_name if with_label else None
label_type = model_meta.label_type if with_label else None
output_format = model_meta.output_format
header = list(model_param.header) or None
sid_name = None
if model_param.sid_name:
sid_name = model_param.sid_name
exclusive_data_type = None
if model_meta.exclusive_data_type:
exclusive_data_type = {}
for col_name in model_meta.exclusive_data_type:
exclusive_data_type[col_name] = model_meta.exclusive_data_type.get(col_name)
return delimitor, data_type, exclusive_data_type, tag_with_value, tag_value_delimitor, with_label, \
label_type, output_format, header, sid_name, label_name
def save_missing_imputer_model(missing_fill=False,
missing_replace_method=None,
missing_impute=None,
missing_fill_value=None,
missing_replace_rate=None,
header=None,
model_name="Imputer"):
model_meta = ImputerMeta()
model_param = ImputerParam()
model_meta.is_imputer = missing_fill
if missing_fill:
if missing_replace_method:
model_meta.strategy = str(missing_replace_method)
if missing_impute is not None:
model_meta.missing_value.extend(map(str, missing_impute))
if missing_fill_value is not None:
feature_value_dict = dict(zip(header, map(str, missing_fill_value)))
model_param.missing_replace_value.update(feature_value_dict)
if missing_replace_rate is not None:
missing_replace_rate_dict = dict(zip(header, missing_replace_rate))
model_param.missing_value_ratio.update(missing_replace_rate_dict)
return model_meta, model_param
def load_missing_imputer_model(header=None,
model_name="Imputer",
model_meta=None,
model_param=None):
missing_fill = model_meta.is_imputer
missing_replace_method = model_meta.strategy
missing_value = model_meta.missing_value
missing_fill_value = model_param.missing_replace_value
if missing_fill:
if not missing_replace_method:
missing_replace_method = None
if not missing_value:
missing_value = None
else:
missing_value = list(missing_value)
if missing_fill_value:
missing_fill_value = [missing_fill_value.get(head) for head in header]
else:
missing_fill_value = None
else:
missing_replace_method = None
missing_value = None
missing_fill_value = None
return missing_fill, missing_replace_method, missing_value, missing_fill_value
def save_outlier_model(outlier_replace=False,
outlier_replace_method=None,
outlier_impute=None,
outlier_replace_value=None,
outlier_replace_rate=None,
header=None,
model_name="Outlier"):
model_meta = OutlierMeta()
model_param = OutlierParam()
model_meta.is_outlier = outlier_replace
if outlier_replace:
if outlier_replace_method:
model_meta.strategy = str(outlier_replace_method)
if outlier_impute:
model_meta.outlier_value.extend(map(str, outlier_impute))
if outlier_replace_value:
outlier_value_dict = dict(zip(header, map(str, outlier_replace_value)))
model_param.outlier_replace_value.update(outlier_value_dict)
if outlier_replace_rate:
outlier_value_ratio_dict = dict(zip(header, outlier_replace_rate))
model_param.outlier_value_ratio.update(outlier_value_ratio_dict)
return model_meta, model_param
def load_outlier_model(header=None,
model_name="Outlier",
model_meta=None,
model_param=None):
outlier_replace = model_meta.is_outlier
outlier_replace_method = model_meta.strategy
outlier_value = model_meta.outlier_value
outlier_replace_value = model_param.outlier_replace_value
if outlier_replace:
if not outlier_replace_method:
outlier_replace_method = None
if not outlier_value:
outlier_value = None
else:
outlier_value = list(outlier_value)
if outlier_replace_value:
outlier_replace_value = [outlier_replace_value.get(head) for head in header]
else:
outlier_replace_value = None
else:
outlier_replace_method = None
outlier_value = None
outlier_replace_value = None
return outlier_replace, outlier_replace_method, outlier_value, outlier_replace_value
|
|
#!/usr/bin/env python
import time
import roslib; roslib.load_manifest('ur_driver')
import rospy
import actionlib
import threading
import pygame
import sys
import os
sys.path.insert(0, '/home/ubuntu/catkin_ws/src/keyboard/scripts/')
import keyboard_talker
import socket
sys.path.insert(0, '/home/ubuntu/catkin_ws/src/joystick/scripts/')
import joystick_talker
sys.path.insert(0, '/home/ubuntu/catkin_ws/src/leap_motion/scripts/')
import leap_talker
sys.path.insert(0, '/home/ubuntu/catkin_ws/src/GUI/')
import display
from pygame.locals import *
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from ur_msgs.msg import *
from leap_motion.msg import LeapFrame
from joystick.msg import JoystickFrame
from keyboard.msg import KeyboardFrame
from ur_driver.io_interface import *
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
MODE_TOOL = 1
MODE_JOINTS = 2
HOST = '192.168.1.100'
PORT = 30002
PI = 3.14159265359
d1 = 0.017453293 #1 degree in rad
#Robot joints position
J1 = 0
J2 = 0
J3 = 0
J4 = 0
J5 = 0
J6 = 0
#Hand palm position and status
palmX = 0
palmY = 0
palmZ = 0
palmYaw = 0
palmPitch = 0
palmRoll = 0
hands = False
grip = False
lm = 0
jy = 0
kb = 0
last = 0
host = 0
#last move sended to the robot
last_move = "null"
gripped = False
client = None
changing = False
mode = MODE_JOINTS
#Method to send the command to move the tool
def move_tool(position):
global last_move
if position == [0,0,0,0,0,0]:
if last_move != "stopl":
stop()
else:
command = "speedl(%s,0.5,1)"%position
last_move = "speedl"
print command
s.send(command+"\n")
# print command
#Method to send the command to move the joints
def move_joints(position):
global last_move
if position == [0,0,0,0,0,0]:
if last_move != "stopl":
stop()
else:
command = "speedj(%s,0.5,1)"%position
last_move = "speedj"
print command
s.send(command+"\n")
def stop():
global last_move
#time.sleep(0.05)
if last_move != "stopl":
command = "stopl(0.5)"
s.send(command+"\n")
last_move = "stopl"
def grab_action():
global changing
grab = grip
string_bool = str(grab)
command = "set_digital_out(8,"+string_bool+")"
print command
s.send(command+"\n")
time.sleep(2)
changing = False
#Method that compliment the subscription to a topic, each time that
# something is published into the topic this callback method is called
def callback_ur(data):
#get each wrist position and change it from rad to degrees
global J1,J2,J3,J4,J5,J6
J1 = data.position[0]
J2 = data.position[1]
J3 = data.position[2]
J4 = data.position[3]
J5 = data.position[4]
J6 = data.position[5]
#rospy.loginfo("\nShoulder pan %s\nShoulder lift %s\nElbow %s\nWrist1 %s\nWrist2 %s\nwrist3 %s\n" % (J1,J2,J3,J4,J5,J6))
#Method that compliment the subscription to a topic, each time that
# something is published into the topic this callback method is called
def callback(data):
global palmY, palmX, palmZ, palmYaw, palmPitch, palmRoll, hands, grip, changing
palmX = data.palm_position.x
palmY = data.palm_position.y
palmZ = data.palm_position.z
palmYaw = data.ypr.x
palmPitch = data.ypr.y
palmRoll = data.ypr.z
hands = data.hand_available
if not changing:
grip = data.grab_action
#rospy.loginfo("\nx: %s\ny: %s\nz: %s" % (data.palm_position.x,data.palm_position.y,data.palm_position.z))
#Regarding the position of the user hands send different movements to
# the robot, making it moves according to the hand
def send_movement():
global J1,J2,J3,J4,J5,J6
global grip,gripped,changing
if hands:
if palmX > 70:
x = float(round(0.000476 * palmX - 0.0333,2))
elif palmX < -70:
x = float(round(0.000476 * palmX + 0.0333,2))
else:
x = 0.00
if palmY > 220:
z = float(round(0.001333 * palmY - 0.28,2))
elif palmY < 110:
z = float(round(0.00125 * palmY - 0.15,2))
else:
z = 0.00
if palmZ > 50:
y = float(round(-0.000666 * palmZ + 0.0333,2))
elif palmZ < -50:
y = float(round(-0.000666 * palmZ - 0.0333,2))
else:
y = 0.00
if palmRoll > 25:
ry = float(round(palmRoll*0.002,2))
elif palmRoll < -25:
ry = float(round(palmRoll*0.002,2))
else:
ry = 0.00
if palmPitch > 25:
rx = float(round(palmPitch*0.002,2))
elif palmPitch < -25:
rx = float(round(palmPitch*0.002,2))
else:
rx = 0.00
if palmYaw > 25:
rz = float(round(-palmYaw*0.002,2))
elif palmYaw < -25:
rz = float(round(-palmYaw*0.002,2))
else:
rz = 0.00
mode = keyboard_talker.mode_state()
if mode == MODE_TOOL:
move_tool([x,y,z,rx,ry,rz])
elif mode == MODE_JOINTS:
move_joints([rz,rx,ry,0,0,0])
if grip and not gripped:
if not changing:
changing = True
t = threading.Thread(target=grab_action)
t.start()
gripped = True
if not grip and gripped:
if not changing:
changing = True
t = threading.Thread(target=grab_action)
t.start()
gripped = False
else:
stop()
def check_input():
global lm, jy, kb, last
try:
a = keyboard_talker.driver_state()
if(a < 4):
if(a == 1):
jy.unregister()
kb.unregister()
if (last != 1):
print last
lm = rospy.Subscriber("leapmotion/data", LeapFrame, callback)
print "You are now using <LeapMotion>"
last = 1
return True
elif(a == 2):
lm.unregister()
kb.unregister()
if (last != 2):
print last
jy = rospy.Subscriber("joystick/data",JoystickFrame, callback)
print "You are now using a <Joystick>"
last = 2
return True
elif(a == 3):
jy.unregister()
lm.unregister()
if (last != 3):
print last
last = 3
kb = rospy.Subscriber("keyboard/data", KeyboardFrame, callback)
print "You are now using <Keyboard>"
return True
else:
print "[WARN] Number incorrect"
return False
except ValueError:
print "[EXCEPTION] Introduce a correct number"
return False
def select_hardware():
while(True):
check_input()
def init_leap():
os.system("LeapControlPanel")
def init_screen(screen):
counter = 0
while counter<60:
for event in pygame.event.get():
pressed = pygame.key.get_pressed()
if event.type == pygame.QUIT:
os.system("pkill LeapControlPane")
os.system("pkill roscore")
leapMotion_stop()
rospy.signal_shutdown("KeyboardInterrupt")
pygame.quit()
end = True
if counter % 20 == 0:
display.start_screen(screen,3-counter/20)
time.sleep(0.08)
counter += 1
def init_threads(screen,clock):
t = threading.Thread(target=select_hardware, args = ())
t.daemon = True
t.start()
t = threading.Thread(target=joystick_talker.talker, args = ())
t.daemon = True
t.start()
t = threading.Thread(target=keyboard_talker.keypress, args = (screen, clock))
t.daemon = True
t.start()
t = threading.Thread(target=leap_talker.main, args = ())
t.daemon = True
t.start()
def info_connection(screen):
display.trying_to_connect(screen)
def init_server(screen):
global s,host
connected = False
state = 0
while not connected:
host = display.server_screen(screen, state)
try:
display.server_screen(screen, 1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, PORT))
connected = True
except socket.error as msg:
print "Couldn't establish connection with the robot"
print msg
state = 2
pass
def init_subscriber():
global lm,jy,kb
jy = rospy.Subscriber("joystick/data",JoystickFrame, callback)
lm = rospy.Subscriber("leapmotion/data", LeapFrame, callback)
kb = rospy.Subscriber("keyboard/data", KeyboardFrame, callback)
jy.unregister()
lm.unregister()
kb.unregister()
creating = False
def socket_clean():
global creating,s,host
while True:
time.sleep(30)
creating = True
s.close()
s = socket.create_connection((host, PORT))
creating = False
def init_move():
global creating
while(True):
if not creating:
send_movement()
time.sleep (0.08) #which is almost 120Hz
if (keyboard_talker.end):
client.cancel_goal()
rospy.signal_shutdown("KeyboardInterrupt")
keyboard_talker.leapMotion_stop()
pygame.quit()
def main():
global client,s
try:
os.system("roscore &")
pygame.init()
screen = pygame.display.set_mode((650,370),0,32)
clock = pygame.time.Clock()
init_subscriber()
init_screen(screen)
init_server(screen)
rospy.init_node("test_move", anonymous=True, disable_signals=True)
init_threads(screen,clock)
t = threading.Thread(target=socket_clean)
t.daemon = True
t.start()
init_move()
except KeyboardInterrupt:
s.close
rospy.signal_shutdown("KeyboardInterrupt")
keyboard_talker.leapMotion_stop()
pygame.quit()
raise
if __name__ == '__main__': main()
|
|
r"""*Root conftest for* ``sphobjinv``.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
**Author**
Brian Skinn (bskinn@alum.mit.edu)
**File Created**
20 Mar 2019
**Copyright**
\(c) Brian Skinn 2016-2022
**Source Repository**
http://www.github.com/bskinn/sphobjinv
**Documentation**
http://sphobjinv.readthedocs.io
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import os.path as osp
import platform
import re
import shutil
import sys
from enum import Enum
from filecmp import cmp
from functools import partial
from io import BytesIO
from pathlib import Path
import jsonschema
import pytest
from sphinx import __version__ as sphinx_version_str
from sphinx.util.inventory import InventoryFile as IFile
import sphobjinv as soi
def pytest_addoption(parser):
"""Add custom CLI options to pytest."""
parser.addoption(
"--testall",
action="store_true",
help=(
"Where relevant, test *all* inventories stored in "
"testing resource folder, not just objects_attrs.inv"
),
)
parser.addoption("--nonloc", action="store_true", help="Include nonlocal tests")
parser.addoption(
"--flake8_ext", action="store_true", help="Include flake8 extensions test"
)
@pytest.fixture(scope="session")
def res_path():
"""Provide Path object to the test resource directory."""
return Path("tests", "resource")
@pytest.fixture(scope="session")
def res_cmp(res_path, misc_info):
"""Provide Path object to the compressed attrs inventory in resource."""
return res_path / (misc_info.FNames.RES.value + misc_info.Extensions.CMP.value)
@pytest.fixture(scope="session")
def res_dec(res_path, misc_info):
"""Provide Path object to the decompressed attrs inventory in resource."""
return res_path / (misc_info.FNames.RES.value + misc_info.Extensions.DEC.value)
@pytest.fixture(scope="session")
def misc_info(res_path):
"""Supply Info object with various test-relevant content."""
class Info:
"""Monolithic test-information class."""
class FNames(str, Enum):
"""Enum of test-relevant file names."""
RES = "objects_attrs"
INIT = "objects"
MOD = "objects_mod"
class Extensions(str, Enum):
"""Enum of test-relevant file extensions."""
CMP = ".inv"
DEC = ".txt"
JSON = ".json"
invalid_filename = "*?*?.txt" if sys.platform == "win32" else "/"
IN_PYPY = "pypy" in sys.implementation.name
# Sample object lines lines from an inventory, as bytes
# False --> contracted abbreviations
# True --> expanded abbreviations
byte_lines = {
False: b"attr.Attribute py:class 1 api.html#$ -",
True: b"attr.Attribute py:class 1 api.html#attr.Attribute attr.Attribute",
}
# For the URL mode of Inventory instantiation
remote_url = (
"https://github.com/bskinn/sphobjinv/raw/main/"
"tests/resource/objects_{0}.inv"
)
# Regex pattern for objects_xyz.inv files
p_inv = re.compile(r"objects_([^.]+)\.inv", re.I)
# Standard location for the already-decompressed object in resource folder,
# for comparison to a freshly generated decompressed file
Info.res_decomp_path = res_path / (
Info.FNames.RES.value + Info.Extensions.DEC.value
)
# String version of the sample object lines
Info.str_lines = {_: Info.byte_lines[_].decode("utf-8") for _ in Info.byte_lines}
return Info()
@pytest.fixture()
def scratch_path(tmp_path, res_path, misc_info):
"""Provision pre-populated scratch directory, returned as Path."""
res_base = misc_info.FNames.RES.value
scr_base = misc_info.FNames.INIT.value
for ext in [_.value for _ in misc_info.Extensions]:
# The str() calls here are for Python 3.5 compat
shutil.copy(
str(res_path / f"{res_base}{ext}"),
str(tmp_path / f"{scr_base}{ext}"),
)
yield tmp_path
@pytest.fixture(scope="session")
def ensure_doc_scratch():
"""Ensure doc/scratch dir exists, for README shell examples."""
Path("doc", "scratch").mkdir(parents=True, exist_ok=True)
@pytest.fixture(scope="session")
def bytes_txt(misc_info, res_path):
"""Load and return the contents of the example objects_attrs.txt as bytes."""
return soi.fileops.readbytes(
res_path / (misc_info.FNames.RES.value + misc_info.Extensions.DEC.value)
)
def sphinx_ifile_load(path):
"""Carry out inventory load via Sphinx InventoryFile.
Defined as a standalone function to allow importing
during debugging.
"""
return IFile.load(BytesIO(path.read_bytes()), "", osp.join)
@pytest.fixture(scope="session", name="sphinx_ifile_load")
def fixture_sphinx_ifile_load():
"""Return helper function to load inventory via Sphinx InventoryFile."""
return sphinx_ifile_load
def sphinx_ifile_data_count(ifile_data):
"""Report the total number of items in the InventoryFile data.
Defined standalone to allow import during debugging.
"""
return sum(len(ifile_data[k]) for k in ifile_data)
@pytest.fixture(scope="session", name="sphinx_ifile_data_count")
def fixture_sphinx_ifile_data_count():
"""Return helper function to report total number of objects."""
return sphinx_ifile_data_count
@pytest.fixture(scope="session")
def sphinx_load_test(sphinx_ifile_load):
"""Return function to perform 'live' Sphinx inventory load test."""
def func(path):
"""Perform the 'live' inventory load test."""
try:
sphinx_ifile_load(path)
except Exception as e: # noqa: PIE786
# An exception here is a failing test, not a test error.
pytest.fail(e)
return func
@pytest.fixture(scope="session")
def sphinx_version():
"""Provide the installed Sphinx version as a tuple.
Returns (major, minor, patch).
"""
p_version = re.compile(r"(\d+)[.]?(\d+)?[.]?(\d+)?")
mch = p_version.match(sphinx_version_str)
return tuple(map((lambda x: int(x) if x else 0), mch.groups()))
@pytest.fixture() # Must be function scope since uses monkeypatch
def run_cmdline_test(monkeypatch):
"""Return function to perform command line exit code test."""
from sphobjinv.cli.core import main
def func(arglist, *, expect=0): # , suffix=None):
"""Perform the CLI exit-code test."""
# Assemble execution arguments
runargs = ["sphobjinv"]
runargs.extend(str(a) for a in arglist)
# Mock sys.argv, run main, and restore sys.argv
with monkeypatch.context() as m:
m.setattr(sys, "argv", runargs)
try:
main()
except SystemExit as e:
retcode = e.args[0]
ok = True
else:
ok = False
# Do all pytesty stuff outside monkeypatch context
assert ok, "SystemExit not raised on termination."
# Test that execution completed w/indicated exit code
assert retcode == expect, runargs
return func
@pytest.fixture(scope="session")
def decomp_cmp_test(misc_info):
"""Return function to confirm a decompressed file is identical to resource."""
def func(path):
"""Perform the round-trip compress/decompress comparison test."""
# The str() calls here are for Python 3.5 compat
assert cmp(str(misc_info.res_decomp_path), str(path), shallow=False)
return func
@pytest.fixture(scope="session")
def attrs_inventory_test():
"""Provide function for high-level attrs Inventory consistency tests."""
def func(inv, source_type):
"""Perform high-level attrs Inventory consistency test.
`inv` is an Inventory instance.
`source_type` is a member of the `soi.inventory.SourceTypes` enum.
"""
assert inv.project == "attrs"
assert inv.version == "17.2"
assert inv.count == 56
assert inv.source_type
return func
testall_inv_paths = [
p
for p in (Path(__file__).parent / "tests" / "resource").iterdir()
if p.name.startswith("objects_") and p.name.endswith(".inv")
]
testall_inv_ids = [p.name[8:-4] for p in testall_inv_paths]
@pytest.fixture(params=testall_inv_paths, ids=testall_inv_ids)
def testall_inv_path(request):
"""Provide parametrized --testall inventory paths."""
return request.param
@pytest.fixture(scope="session")
def is_win():
"""Report boolean of whether the current system is Windows."""
return platform.system().lower() == "windows"
@pytest.fixture(scope="session")
def unix2dos():
"""Provide function for converting POSIX to Windows EOLs."""
return partial(re.sub, rb"(?<!\r)\n", b"\r\n")
@pytest.fixture(scope="session")
def jsonschema_validator():
"""Provide the standard JSON schema validator."""
return jsonschema.Draft4Validator
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import yaml
import netifaces
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment
from libcloud.compute.deployment import SSHKeyDeployment
from libcloud.compute.ssh import SSHClient
from plumbery.exceptions import PlumberyException
from plumbery.polisher import PlumberyPolisher
class RubPolisher(PlumberyPolisher):
"""
Bootstraps nodes via ssh
This polisher looks at each node in sequence, and contact selected nodes
via ssh to rub them. The goal here is to accelerate post-creation tasks as
much as possible.
Bootstrapping steps can consist of multiple tasks:
* push a SSH public key to allow for automated secured communications
* ask for package update
* install docker
* install any pythons script
* install Stackstorm
* configure a Chef client
* register a node to a monitoring dashboard
* ...
To activate this polisher you have to mention it in the fittings plan,
like in the following example::
---
safeMode: False
polishers:
- rub:
key: ~/.ssh/id_rsa.pub
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
...
Plumbery will only rub nodes that have been configured for it. The example
below demonstrates how this can be done for multiple docker containers::
# some docker resources
- docker:
domain: *vdc1
ethernet: *containers
nodes:
- docker1:
rub: &docker
- rub.update.sh
- rub.docker.sh
- docker2:
rub: *docker
- docker3:
rub: *docker
In the real life when you have to rub any appliance, you need to be close
to the stuff and to touch it. This is the same for virtual fittings.
This polisher has the need to communicate directly with target
nodes over the network.
This connectivity can become quite complicated because of the potential mix
of private and public networks, firewalls, etc. To stay safe plumbery
enforces a simple beachheading model, where network connectivity with end
nodes is a no brainer.
This model is based on predefined network addresses for plumbery itself,
as in the snippet below::
---
# Frankfurt in Europe
locationId: EU6
regionId: dd-eu
# network subnets are 10.1.x.y
rub:
- beachhead: 10.1.3.4
Here nodes at EU6 will be rubbed only if the machine that is
executing plumbery has the adress 10.1.3.4. In other cases, plumbery will
state that the location is out of reach.
"""
def _apply_rubs(self, node, steps):
"""
Does the actual job over SSH
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param steps: the various steps of the rubbing
:type steps: :class:`libcloud.compute.deployment.MultiStepDeployment`
:returns: ``bool``
- ``True`` if everything went fine, ``False`` otherwise
"""
# use libcloud to communicate with remote nodes
session = SSHClient(hostname=node.private_ips[0],
port=22,
username='root',
password=self.secret,
key_files=None,
timeout=15)
try:
session.connect()
node = steps.run(node, session)
except Exception as feedback:
logging.info("Error: unable to rub '{}' at '{}'!".format(node.name,
node.private_ips[0]))
logging.info(str(feedback))
result = False
else:
result = True
try:
session.close()
except:
pass
return result
def _get_rubs(self, node, settings):
"""
Defines the set of actions to be done on a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
:returns: a list of { 'description': ..., 'genius': ... }
"""
if not isinstance(settings, dict) or 'rub' not in settings:
return []
rubs = []
if self.key is not None:
rubs.append({
'description': 'deploy SSH public key',
'genius': SSHKeyDeployment(self.key)})
for script in settings['rub']:
try:
with open(os.path.dirname(__file__)+'/'+script) as stream:
text = stream.read()
if text:
rubs.append({
'description': 'run '+script,
'genius': ScriptDeployment(text)})
except IOError:
raise PlumberyException("Error: cannot read '{}'".format(script))
return rubs
def go(self, engine):
"""
Starts the rubbing process
:param engine: access to global parameters and functions
:type engine: :class:`plumbery.PlumberyEngine`
"""
self.engine = engine
self.report = []
self.secret = engine.get_shared_secret()
self.key = None
if 'key' in self.settings:
try:
path = os.path.expanduser(self.settings['key'])
with open(path) as stream:
self.key = stream.read()
stream.close()
except IOError:
pass
def move_to(self, facility):
"""
Checks if we can beachhead at this facility
:param facility: access to local parameters and functions
:type facility: :class:`plumbery.PlumberyFacility`
"""
self.facility = facility
self.beachheading = False
try:
self.addresses = []
for interface in netifaces.interfaces():
addresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET in addresses.keys():
for address in addresses[netifaces.AF_INET]:
self.addresses.append(address['addr'])
if netifaces.AF_INET6 in addresses.keys():
for address in addresses[netifaces.AF_INET6]:
self.addresses.append(address['addr'])
except Exception as feedback:
pass
for item in self.facility.fittings.rub:
if not isinstance(item, dict):
continue
if 'beachhead' not in item.keys():
continue
if item['beachhead'] in self.addresses:
self.beachheading = True
break
if self.beachheading:
logging.info("- beachheading at '{}'".format(self.facility.fittings.locationId))
else:
logging.info("- '{}' is unreachable".format(self.facility.fittings.locationId))
def shine_node(self, node, settings):
"""
Rubs a node
:param node: the node to be polished
:type node: :class:`libcloud.compute.base.Node`
:param settings: the fittings plan for this node
:type settings: ``dict``
"""
if not self.beachheading:
self.report.append({node.name: {
'status': 'unreachable'
}})
return
rubs = self._get_rubs(node, settings)
if len(rubs) < 1:
self.report.append({node.name: {
'status': 'skipped - nothing to do'
}})
return
descriptions = []
steps = []
for item in rubs:
descriptions.append(item['description'])
steps.append(item['genius'])
if self._apply_rubs(node, MultiStepDeployment(steps)):
logging.info('- done')
self.report.append({node.name: {
'status': 'completed',
'rubs': descriptions
}})
else:
logging.info('- failed')
self.report.append({node.name: {
'status': 'failed',
'rubs': descriptions
}})
def reap(self):
"""
Reports on rubbing
"""
if 'reap' in self.settings:
fileName = self.settings['reap']
else:
fileName = 'rub.yaml'
logging.info("Reporting in '{}'".format(fileName))
with open(fileName, 'w') as stream:
stream.write(yaml.dump(self.report, default_flow_style=False))
stream.close()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the key functions in pruning library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import training_util
class PruningHParamsTest(test.TestCase):
PARAM_LIST = [
"name=test", "threshold_decay=0.9", "pruning_frequency=10",
"do_not_prune=[conv1,conv2]", "sparsity_function_end_step=100",
"target_sparsity=0.9"
]
TEST_HPARAMS = ",".join(PARAM_LIST)
def setUp(self):
super(PruningHParamsTest, self).setUp()
# Add global step variable to the graph
self.global_step = training_util.get_or_create_global_step()
# Add sparsity
self.sparsity = variables.Variable(0.5, name="sparsity")
# Parse hparams
self.pruning_hparams = pruning.get_pruning_hparams().parse(
self.TEST_HPARAMS)
def testInit(self):
p = pruning.Pruning(self.pruning_hparams)
self.assertEqual(p._spec.name, "test")
self.assertAlmostEqual(p._spec.threshold_decay, 0.9)
self.assertEqual(p._spec.pruning_frequency, 10)
self.assertAllEqual(p._spec.do_not_prune, ["conv1", "conv2"])
self.assertEqual(p._spec.sparsity_function_end_step, 100)
self.assertAlmostEqual(p._spec.target_sparsity, 0.9)
def testInitWithExternalSparsity(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
def testInitWithVariableReuse(self):
with self.test_session():
p = pruning.Pruning(spec=self.pruning_hparams, sparsity=self.sparsity)
p_copy = pruning.Pruning(
spec=self.pruning_hparams, sparsity=self.sparsity)
variables.global_variables_initializer().run()
sparsity = p._sparsity.eval()
self.assertAlmostEqual(sparsity, 0.5)
self.assertEqual(p._sparsity.eval(), p_copy._sparsity.eval())
class PruningTest(test.TestCase):
def setUp(self):
super(PruningTest, self).setUp()
self.global_step = training_util.get_or_create_global_step()
def testCreateMask2D(self):
width = 10
height = 20
with self.test_session():
weights = variables.Variable(
random_ops.random_normal([width, height], stddev=1), name="weights")
masked_weights = pruning.apply_mask(weights,
variable_scope.get_variable_scope())
variables.global_variables_initializer().run()
weights_val = weights.eval()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(weights_val, masked_weights_val)
def testUpdateSingleMask(self):
with self.test_session() as session:
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.5, name="sparsity")
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 100)
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def _blockMasking(self, hparams, weights, expected_mask):
threshold = variables.Variable(0.0, name="threshold")
sparsity = variables.Variable(0.51, name="sparsity")
test_spec = ",".join(hparams)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
with self.test_session():
variables.global_variables_initializer().run()
_, new_mask = p._maybe_update_block_mask(weights, threshold)
# Check if the mask is the same size as the weights
self.assertAllEqual(new_mask.get_shape(), weights.get_shape())
mask_val = new_mask.eval()
self.assertAllEqual(mask_val, expected_mask)
def testBlockMasking(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
weights_avg = constant_op.constant(
[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]])
weights_max = constant_op.constant(
[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]])
expected_mask = [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"], weights_avg,
expected_mask)
def testBlockMaskingWithHigherDimensions(self):
param_list = ["block_height=2", "block_width=2", "threshold_decay=0"]
# Weights as in testBlockMasking, but with one extra dimension.
weights_avg = constant_op.constant(
[[[0.1, 0.1, 0.2, 0.2], [0.1, 0.1, 0.2, 0.2], [0.3, 0.3, 0.4, 0.4],
[0.3, 0.3, 0.4, 0.4]]])
weights_max = constant_op.constant(
[[[0.1, 0.0, 0.2, 0.0], [0.0, -0.1, 0.0, -0.2], [0.3, 0.0, 0.4, 0.0],
[0.0, -0.3, 0.0, -0.4]]])
expected_mask = [[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1]]]
self._blockMasking(param_list + ["block_pooling_function=MAX"], weights_max,
expected_mask)
self._blockMasking(param_list + ["block_pooling_function=AVG"],
weights_avg, expected_mask)
def testPartitionedVariableMasking(self):
partitioner = partitioned_variables.variable_axis_size_partitioner(40)
with self.test_session() as session:
with variable_scope.variable_scope("", partitioner=partitioner):
sparsity = variables.Variable(0.5, name="Sparsity")
weights = variable_scope.get_variable(
"weights", initializer=math_ops.linspace(1.0, 100.0, 100))
masked_weights = pruning.apply_mask(
weights, scope=variable_scope.get_variable_scope())
p = pruning.Pruning(sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.mask_update_op()
variables.global_variables_initializer().run()
masked_weights_val = masked_weights.eval()
session.run(mask_update_op)
masked_weights_val = masked_weights.eval()
self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
def testConditionalMaskUpdate(self):
param_list = [
"pruning_frequency=2", "begin_pruning_step=1", "end_pruning_step=6"
]
test_spec = ",".join(param_list)
pruning_hparams = pruning.get_pruning_hparams().parse(test_spec)
weights = variables.Variable(
math_ops.linspace(1.0, 100.0, 100), name="weights")
masked_weights = pruning.apply_mask(weights)
sparsity = variables.Variable(0.00, name="sparsity")
# Set up pruning
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
p._spec.threshold_decay = 0.0
mask_update_op = p.conditional_mask_update_op()
sparsity_val = math_ops.linspace(0.0, 0.9, 10)
increment_global_step = state_ops.assign_add(self.global_step, 1)
non_zero_count = []
with self.test_session() as session:
variables.global_variables_initializer().run()
for i in range(10):
session.run(state_ops.assign(sparsity, sparsity_val[i]))
session.run(mask_update_op)
session.run(increment_global_step)
non_zero_count.append(np.count_nonzero(masked_weights.eval()))
# Weights pruned at steps 0,2,4,and,6
expected_non_zero_count = [100, 100, 80, 80, 60, 60, 40, 40, 40, 40]
self.assertAllEqual(expected_non_zero_count, non_zero_count)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
import json
import os.path
from time import time
from pysnap.utils import (encrypt, decrypt, decrypt_story,
make_media_id, request)
MEDIA_IMAGE = 0
MEDIA_VIDEO = 1
MEDIA_VIDEO_NOAUDIO = 2
FRIEND_CONFIRMED = 0
FRIEND_UNCONFIRMED = 1
FRIEND_BLOCKED = 2
PRIVACY_EVERYONE = 0
PRIVACY_FRIENDS = 1
def is_video(data):
return len(data) > 1 and data[0:2] == b'\x00\x00'
def is_image(data):
return len(data) > 1 and data[0:2] == b'\xFF\xD8'
def is_zip(data):
return len(data) > 1 and data[0:2] == 'PK'
def get_file_extension(media_type):
if media_type in (MEDIA_VIDEO, MEDIA_VIDEO_NOAUDIO):
return 'mp4'
if media_type == MEDIA_IMAGE:
return 'jpg'
return ''
def get_media_type(data):
if is_video(data):
return MEDIA_VIDEO
if is_image(data):
return MEDIA_IMAGE
return None
def _map_keys(snap):
return {
u'id': snap.get('id', None),
u'media_id': snap.get('c_id', None),
u'media_type': snap.get('m', None),
u'time': snap.get('t', None),
u'sender': snap.get('sn', None),
u'recipient': snap.get('rp', None),
u'status': snap.get('st', None),
u'screenshot_count': snap.get('c', None),
u'sent': snap.get('sts', None),
u'opened': snap.get('ts', None)
}
class Snapchat(object):
"""Construct a :class:`Snapchat` object used for communicating
with the Snapchat API.
Usage:
from pysnap import Snapchat
snapchat = Snapchat()
snapchat.login('username', 'password')
...
"""
def __init__(self):
self.username = None
self.auth_token = None
def _request(self, endpoint, data=None, files=None,
raise_for_status=True, req_type='post'):
return request(endpoint, self.auth_token, data, files,
raise_for_status, req_type)
def _unset_auth(self):
self.username = None
self.auth_token = None
def login(self, username, password):
"""Login to Snapchat account
Returns a dict containing user information on successful login, the
data returned is similar to get_updates.
:param username Snapchat username
:param password Snapchat password
"""
self._unset_auth()
r = self._request('login', {
'username': username,
'password': password
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
if 'username' in result:
self.username = username
return result
def logout(self):
"""Logout of Snapchat account
Returns true if logout was successful.
"""
r = self._request('logout', {'username': self.username})
return len(r.content) == 0
def get_updates(self, update_timestamp=0):
"""Get user, friend and snap updates
Returns a dict containing user, friends and snap information.
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request('updates', {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
return result
def get_snaps(self, update_timestamp=0):
"""Get snaps
Returns a dict containing metadata for snaps
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
updates = self.get_updates(update_timestamp)
# Filter out snaps containing c_id as these are sent snaps
return [_map_keys(snap) for snap in updates['snaps']
if 'c_id' not in snap]
def get_friend_stories(self, update_timestamp=0):
"""Get stories
Returns a dict containing metadata for stories
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request("all_updates", {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
stories = []
story_groups = result['stories_response']['friend_stories']
for group in story_groups:
sender = group['username']
for story in group['stories']:
obj = story['story']
obj['sender'] = sender
stories.append(obj)
return stories
def get_story_blob(self, story_id, story_key, story_iv):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param story_id: Media id to fetch
:param story_key: Encryption key of the story
:param story_iv: Encryption IV of the story
"""
r = self._request('story_blob', {'story_id': story_id},
raise_for_status=False, req_type='get')
data = decrypt_story(r.content, story_key.decode('base64'),
story_iv.decode('base64'))
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def get_blob(self, snap_id):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param snap_id: Snap id to fetch
"""
r = self._request('blob', {'username': self.username, 'id': snap_id},
raise_for_status=False)
data = decrypt(r.content)
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def send_events(self, events, data=None):
"""Send event data
Returns true on success.
:param events: List of events to send
:param data: Additional data to send
"""
if data is None:
data = {}
r = self._request('update_snaps', {
'username': self.username,
'events': json.dumps(events),
'json': json.dumps(data)
})
return len(r.content) == 0
def mark_viewed(self, snap_id, view_duration=1):
"""Mark a snap as viewed
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration}}
events = [
{
u'eventName': u'SNAP_VIEW', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
},
{
u'eventName': u'SNAP_EXPIRED', u'params': {u'id': snap_id},
u'ts': int(round(now))
}
]
return self.send_events(events, data)
def mark_screenshot(self, snap_id, view_duration=1):
"""Mark a snap as screenshotted
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration, u'c': 3}}
events = [
{
u'eventName': u'SNAP_SCREENSHOT', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
}
]
return self.send_events(events, data)
def update_privacy(self, friends_only):
"""Set privacy settings
Returns true on success.
:param friends_only: True to allow snaps from friends only
"""
setting = lambda f: PRIVACY_FRIENDS if f else PRIVACY_EVERYONE
r = self._request('settings', {
'username': self.username,
'action': 'updatePrivacy',
'privacySetting': setting(friends_only)
})
return r.json().get('param') == str(setting(friends_only))
def get_friends(self):
"""Get friends
Returns a list of friends.
"""
return self.get_updates().get('friends', [])
def get_best_friends(self):
"""Get best friends
Returns a list of best friends.
"""
return self.get_updates().get('bests', [])
def add_friend(self, username):
"""Add user as friend
Returns JSON response.
Expected messages:
Success: '{username} is now your friend!'
Pending: '{username} is private. Friend request sent.'
Failure: 'Sorry! Couldn't find {username}'
:param username: Username to add as a friend
"""
r = self._request('friend', {
'action': 'add',
'friend': username,
'username': self.username
})
return r.json()
def delete_friend(self, username):
"""Remove user from friends
Returns true on success.
:param username: Username to remove from friends
"""
r = self._request('friend', {
'action': 'delete',
'friend': username,
'username': self.username
})
return r.json().get('logged')
def block(self, username):
"""Block a user
Returns true on success.
:param username: Username to block
"""
r = self._request('friend', {
'action': 'block',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was blocked'.format(username)
def unblock(self, username):
"""Unblock a user
Returns true on success.
:param username: Username to unblock
"""
r = self._request('friend', {
'action': 'unblock',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was unblocked'.format(username)
def get_blocked(self):
"""Find blocked users
Returns a list of currently blocked users.
"""
return [f for f in self.get_friends() if f['type'] == FRIEND_BLOCKED]
def upload(self, path):
"""Upload media
Returns the media ID on success. The media ID is used when sending
the snap.
"""
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
data = f.read()
media_type = get_media_type(data)
if media_type is None:
raise ValueError('Could not determine media type for given data')
media_id = make_media_id(self.username)
r = self._request('upload', {
'username': self.username,
'media_id': media_id,
'type': media_type
}, files={'data': encrypt(data)})
return media_id if len(r.content) == 0 else None
def send(self, media_id, recipients, time=5):
"""Send a snap. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully
"""
r = self._request('send', {
'username': self.username,
'media_id': media_id,
'recipient': recipients,
'time': time,
'zipped': '0'
})
return len(r.content) == 0
def send_to_story(self, media_id, time=5, media_type=0):
"""Send a snap to your story. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully.
"""
r = self._request('post_story', {
'username': self.username,
'media_id': media_id,
'client_id': media_id,
'time': time,
'type': media_type,
'zipped': '0'
})
return r.json()
def clear_feed(self):
"""Clear the user's feed
Returns true if feed was successfully cleared.
"""
r = self._request('clear', {
'username': self.username
})
return len(r.content) == 0
|
|
# -*- coding: utf-8 -*-
"""
Django settings for tecnoservicio project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
from celery.schedules import crontab
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('tecnoservicio')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin
'suit',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
#'kombu.transport.django',
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'reversion',
'djcelery',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'tecnoservicio.users', # custom users app
'tecnoservicio.ordenes',
'tecnoservicio.tareas',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'tecnoservicio.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Gustavo Castellanos""", 'g@gustavo-castellanos.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tecnoservicio_test',
'USER': 'postgres',
'PASSWORD': 'postgresql',
'HOST': 'localhost',
'PORT': '5433',
}
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Mexico_City'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-MX'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID=1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'optional'
ACCOUNT_SESSION_REMEMBER = False
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'inicio'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Django Suit configuration example
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'Tecnoservicio',
'HEADER_DATE_FORMAT': 'l, j F Y',
'HEADER_TIME_FORMAT': 'H:i',
# forms
'SHOW_REQUIRED_ASTERISK': True, # Default True
'CONFIRM_UNSAVED_CHANGES': True, # Default True
# misc
'LIST_PER_PAGE': 30
}
CELERY_IMPORTS = ("tecnoservicio.tareas",)
CELERYBEAT_SCHEDULE = {
'actualizar_ordenes': {
'task': 'tecnoservicio.tareas.tasks.actualizar_ordenes',
'schedule': crontab(minute=0, hour='*/3,9-21'),
#'schedule': crontab(),
'args': (),
},
}
# Your common stuff: Below this line define 3rd party library settings
DOMINIO = 'http://mexicof.com'
CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'
DEFAULT_FROM_EMAIL = 'MexicoF <g@zaresdelweb.com>'
EMAIL_SUBJECT_PREFIX = '[MexicoF] '
|
|
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from unittest import mock
import fixtures
from oslotest import base as test_base
from oslo_utils import excutils
from oslo_utils import timeutils
class Fail1(excutils.CausedByException):
pass
class Fail2(excutils.CausedByException):
pass
class CausedByTest(test_base.BaseTestCase):
def test_caused_by_explicit(self):
e = self.assertRaises(Fail1,
excutils.raise_with_cause,
Fail1, "I was broken",
cause=Fail2("I have been broken"))
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
def test_caused_by_implicit(self):
def raises_chained():
try:
raise Fail2("I have been broken")
except Fail2:
excutils.raise_with_cause(Fail1, "I was broken")
e = self.assertRaises(Fail1, raises_chained)
self.assertIsInstance(e.cause, Fail2)
e_p = e.pformat()
self.assertIn("I have been broken", e_p)
self.assertIn("Fail2", e_p)
class SaveAndReraiseTest(test_base.BaseTestCase):
def test_save_and_reraise_exception_forced(self):
def _force_reraise():
try:
raise IOError("I broke")
except Exception:
with excutils.save_and_reraise_exception() as e:
e.reraise = False
e.force_reraise()
self.assertRaises(IOError, _force_reraise)
def test_save_and_reraise_exception_capture_reraise(self):
def _force_reraise():
try:
raise IOError("I broke")
except Exception:
excutils.save_and_reraise_exception().capture().force_reraise()
self.assertRaises(IOError, _force_reraise)
def test_save_and_reraise_exception_capture_not_active(self):
e = excutils.save_and_reraise_exception()
self.assertRaises(RuntimeError, e.capture, check=True)
def test_save_and_reraise_exception_forced_not_active(self):
e = excutils.save_and_reraise_exception()
self.assertRaises(RuntimeError, e.force_reraise)
e = excutils.save_and_reraise_exception()
e.capture(check=False)
self.assertRaises(RuntimeError, e.force_reraise)
def test_save_and_reraise_exception(self):
e = None
msg = 'foo'
try:
try:
raise Exception(msg)
except Exception:
with excutils.save_and_reraise_exception():
pass
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped(self, get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception():
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertTrue(logger.error.called)
def test_save_and_reraise_exception_no_reraise(self):
"""Test that suppressing the reraise works."""
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = False
@mock.patch('logging.getLogger')
def test_save_and_reraise_exception_dropped_no_reraise(self,
get_logger_mock):
logger = get_logger_mock()
e = None
msg = 'second exception'
try:
try:
raise Exception('dropped')
except Exception:
with excutils.save_and_reraise_exception(reraise=False):
raise Exception(msg)
except Exception as _e:
e = _e
self.assertEqual(str(e), msg)
self.assertFalse(logger.error.called)
def test_save_and_reraise_exception_provided_logger(self):
fake_logger = mock.MagicMock()
try:
try:
raise Exception('foo')
except Exception:
with excutils.save_and_reraise_exception(logger=fake_logger):
raise Exception('second exception')
except Exception:
pass
self.assertTrue(fake_logger.error.called)
class ForeverRetryUncaughtExceptionsTest(test_base.BaseTestCase):
def setUp(self):
super(ForeverRetryUncaughtExceptionsTest, self).setUp()
self._exceptions = []
self.useFixture(fixtures.MockPatch('time.sleep', return_value=None))
@excutils.forever_retry_uncaught_exceptions
def exception_generator(self):
while self._exceptions:
raise self._exceptions.pop(0)
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_1exc_gives_1log(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected %d' % 1),
]
mock_now.side_effect = [0]
self.exception_generator()
self.assertEqual([], self._exceptions)
# log should only be called once
mock_log.assert_called_once_with(
'Unexpected exception occurred %d time(s)... retrying.' % 1
)
mock_now.assert_has_calls([
mock.call(),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_same_10exc_1min_gives_1log(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected 1'),
]
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect = [0]
# By design, the following exceptions won't get logged because they
# are within the same minute.
for i in range(2, 11):
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly
# triggered.
mock_now_side_effect.append(i)
mock_now.side_effect = mock_now_side_effect
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(10, len(mock_now.mock_calls))
self.assertEqual(1, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_same_2exc_2min_gives_2logs(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected 1'),
Exception('unexpected 1'),
]
mock_now.side_effect = [
# Timestamp calls that happen after the logging is possibly
# triggered
0,
# Timestamp calls that happen before the logging is possibly
# triggered
65,
# Timestamp calls that happen after the logging is possibly
# triggered.
65,
66,
]
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(4, len(mock_now.mock_calls))
self.assertEqual(2, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_same_10exc_2min_gives_2logs(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected 1'),
]
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect = [
0,
]
for ts in [12, 23, 34, 45]:
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly
# triggered.
mock_now_side_effect.append(ts)
# The previous 4 exceptions are counted here
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly triggered.
mock_now_side_effect.append(106)
for ts in [106, 107]:
# Timestamp calls that happen after the logging is possibly
# triggered.
mock_now_side_effect.append(ts)
# Again, the following are not logged due to being within
# the same minute
for ts in [117, 128, 139, 150]:
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly
# triggered.
mock_now_side_effect.append(ts)
mock_now.side_effect = mock_now_side_effect
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(12, len(mock_now.mock_calls))
self.assertEqual(2, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
mock.call('Unexpected exception occurred 5 time(s)... retrying.'),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_mixed_4exc_1min_gives_2logs(self, mock_now, mock_log):
# The stop watch will be started, which will consume one timestamp
# call.
self._exceptions = [
Exception('unexpected 1'),
]
# Timestamp calls that happen after the logging is possibly
# triggered.
mock_now_side_effect = [0]
# By design, this second 'unexpected 1' exception is not counted. This
# is likely a rare thing and is a sacrifice for code simplicity.
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly triggered.
# Since the exception will be the same the expiry method will be
# called, which uses up a timestamp call.
mock_now_side_effect.append(5)
self._exceptions.append(Exception('unexpected 2'))
# Timestamp calls that happen after the logging is possibly triggered.
# The watch should get reset, which uses up two timestamp calls.
mock_now_side_effect.extend([10, 20])
self._exceptions.append(Exception('unexpected 2'))
# Timestamp calls that happen before the logging is possibly triggered.
# Since the exception will be the same the expiry method will be
# called, which uses up a timestamp call.
mock_now_side_effect.append(25)
mock_now.side_effect = mock_now_side_effect
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(5, len(mock_now.mock_calls))
self.assertEqual(2, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_mixed_4exc_2min_gives_2logs(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected 1'),
]
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect = [0]
# Again, this second exception of the same type is not counted
# for the sake of code simplicity.
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly triggered.
mock_now_side_effect.append(10)
# The difference between this and the previous case is the log
# is also triggered by more than a minute expiring.
self._exceptions.append(Exception('unexpected 2'))
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect.extend([100, 105])
self._exceptions.append(Exception('unexpected 2'))
# Timestamp calls that happen before the logging is possibly triggered.
mock_now_side_effect.append(110)
mock_now.side_effect = mock_now_side_effect
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(5, len(mock_now.mock_calls))
self.assertEqual(2, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
])
@mock.patch.object(logging, 'exception')
@mock.patch.object(timeutils, 'now')
def test_exc_retrier_mixed_4exc_2min_gives_3logs(self, mock_now, mock_log):
self._exceptions = [
Exception('unexpected 1'),
]
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect = [0]
# This time the second 'unexpected 1' exception is counted due
# to the same exception occurring same when the minute expires.
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly triggered.
mock_now_side_effect.append(10)
self._exceptions.append(Exception('unexpected 1'))
# Timestamp calls that happen before the logging is possibly triggered.
mock_now_side_effect.extend([100, 100, 105])
self._exceptions.append(Exception('unexpected 2'))
# Timestamp calls that happen after the logging is possibly triggered.
mock_now_side_effect.extend([110, 111])
mock_now.side_effect = mock_now_side_effect
self.exception_generator()
self.assertEqual([], self._exceptions)
self.assertEqual(7, len(mock_now.mock_calls))
self.assertEqual(3, len(mock_log.mock_calls))
mock_log.assert_has_calls([
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
mock.call('Unexpected exception occurred 2 time(s)... retrying.'),
mock.call('Unexpected exception occurred 1 time(s)... retrying.'),
])
class ExceptionFilterTest(test_base.BaseTestCase):
def _make_filter_func(self, ignore_classes=AssertionError):
@excutils.exception_filter
def ignore_exceptions(ex):
'''Ignore some exceptions F.'''
return isinstance(ex, ignore_classes)
return ignore_exceptions
def _make_filter_method(self, ignore_classes=AssertionError):
class ExceptionIgnorer(object):
def __init__(self, ignore):
self.ignore = ignore
@excutils.exception_filter
def ignore_exceptions(self, ex):
'''Ignore some exceptions M.'''
return isinstance(ex, self.ignore)
return ExceptionIgnorer(ignore_classes).ignore_exceptions
def _make_filter_classmethod(self, ignore_classes=AssertionError):
class ExceptionIgnorer(object):
ignore = ignore_classes
@excutils.exception_filter
@classmethod
def ignore_exceptions(cls, ex):
'''Ignore some exceptions C.'''
return isinstance(ex, cls.ignore)
return ExceptionIgnorer.ignore_exceptions
def _make_filter_staticmethod(self, ignore_classes=AssertionError):
class ExceptionIgnorer(object):
@excutils.exception_filter
@staticmethod
def ignore_exceptions(ex):
'''Ignore some exceptions S.'''
return isinstance(ex, ignore_classes)
return ExceptionIgnorer.ignore_exceptions
def test_filter_func_call(self):
ignore_assertion_error = self._make_filter_func()
try:
assert False, "This is a test"
except Exception as exc:
ignore_assertion_error(exc)
def test_raise_func_call(self):
ignore_assertion_error = self._make_filter_func()
try:
raise RuntimeError
except Exception as exc:
self.assertRaises(RuntimeError, ignore_assertion_error, exc)
def test_raise_previous_func_call(self):
ignore_assertion_error = self._make_filter_func()
try:
raise RuntimeError
except Exception as exc1:
try:
raise RuntimeError
except Exception as exc2:
self.assertIsNot(exc1, exc2)
raised = self.assertRaises(RuntimeError,
ignore_assertion_error,
exc1)
self.assertIs(exc1, raised)
def test_raise_previous_after_filtered_func_call(self):
ignore_assertion_error = self._make_filter_func()
try:
raise RuntimeError
except Exception as exc1:
try:
assert False, "This is a test"
except Exception:
pass
self.assertRaises(RuntimeError, ignore_assertion_error, exc1)
def test_raise_other_func_call(self):
@excutils.exception_filter
def translate_exceptions(ex):
raise RuntimeError
try:
assert False, "This is a test"
except Exception as exc:
self.assertRaises(RuntimeError, translate_exceptions, exc)
def test_filter_func_context_manager(self):
ignore_assertion_error = self._make_filter_func()
with ignore_assertion_error:
assert False, "This is a test"
def test_raise_func_context_manager(self):
ignore_assertion_error = self._make_filter_func()
def try_runtime_err():
with ignore_assertion_error:
raise RuntimeError
self.assertRaises(RuntimeError, try_runtime_err)
def test_raise_other_func_context_manager(self):
@excutils.exception_filter
def translate_exceptions(ex):
raise RuntimeError
def try_assertion():
with translate_exceptions:
assert False, "This is a test"
self.assertRaises(RuntimeError, try_assertion)
def test_noexc_func_context_manager(self):
ignore_assertion_error = self._make_filter_func()
with ignore_assertion_error:
pass
def test_noexc_nocall_func_context_manager(self):
@excutils.exception_filter
def translate_exceptions(ex):
raise RuntimeError
with translate_exceptions:
pass
def test_func_docstring(self):
ignore_func = self._make_filter_func()
self.assertEqual('Ignore some exceptions F.', ignore_func.__doc__)
def test_filter_method_call(self):
ignore_assertion_error = self._make_filter_method()
try:
assert False, "This is a test"
except Exception as exc:
ignore_assertion_error(exc)
def test_raise_method_call(self):
ignore_assertion_error = self._make_filter_method()
try:
raise RuntimeError
except Exception as exc:
self.assertRaises(RuntimeError, ignore_assertion_error, exc)
def test_filter_method_context_manager(self):
ignore_assertion_error = self._make_filter_method()
with ignore_assertion_error:
assert False, "This is a test"
def test_raise_method_context_manager(self):
ignore_assertion_error = self._make_filter_method()
def try_runtime_err():
with ignore_assertion_error:
raise RuntimeError
self.assertRaises(RuntimeError, try_runtime_err)
def test_method_docstring(self):
ignore_func = self._make_filter_method()
self.assertEqual('Ignore some exceptions M.', ignore_func.__doc__)
def test_filter_classmethod_call(self):
ignore_assertion_error = self._make_filter_classmethod()
try:
assert False, "This is a test"
except Exception as exc:
ignore_assertion_error(exc)
def test_raise_classmethod_call(self):
ignore_assertion_error = self._make_filter_classmethod()
try:
raise RuntimeError
except Exception as exc:
self.assertRaises(RuntimeError, ignore_assertion_error, exc)
def test_filter_classmethod_context_manager(self):
ignore_assertion_error = self._make_filter_classmethod()
with ignore_assertion_error:
assert False, "This is a test"
def test_raise_classmethod_context_manager(self):
ignore_assertion_error = self._make_filter_classmethod()
def try_runtime_err():
with ignore_assertion_error:
raise RuntimeError
self.assertRaises(RuntimeError, try_runtime_err)
def test_classmethod_docstring(self):
ignore_func = self._make_filter_classmethod()
self.assertEqual('Ignore some exceptions C.', ignore_func.__doc__)
def test_filter_staticmethod_call(self):
ignore_assertion_error = self._make_filter_staticmethod()
try:
assert False, "This is a test"
except Exception as exc:
ignore_assertion_error(exc)
def test_raise_staticmethod_call(self):
ignore_assertion_error = self._make_filter_staticmethod()
try:
raise RuntimeError
except Exception as exc:
self.assertRaises(RuntimeError, ignore_assertion_error, exc)
def test_filter_staticmethod_context_manager(self):
ignore_assertion_error = self._make_filter_staticmethod()
with ignore_assertion_error:
assert False, "This is a test"
def test_raise_staticmethod_context_manager(self):
ignore_assertion_error = self._make_filter_staticmethod()
def try_runtime_err():
with ignore_assertion_error:
raise RuntimeError
self.assertRaises(RuntimeError, try_runtime_err)
def test_staticmethod_docstring(self):
ignore_func = self._make_filter_staticmethod()
self.assertEqual('Ignore some exceptions S.', ignore_func.__doc__)
|
|
"""
Wrapper for rsync
.. versionadded:: 2014.1.0
This data can also be passed into :ref:`pillar <pillar-walk-through>`.
Options passed into opts will overwrite options passed into pillar.
"""
import errno
import logging
import re
import tempfile
import salt.utils.files
import salt.utils.path
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
__virtualname__ = "rsync"
def __virtual__():
"""
Only load module if rsync binary is present
"""
if salt.utils.path.which("rsync"):
return __virtualname__
return (
False,
"The rsync execution module cannot be loaded: "
"the rsync binary is not in the path.",
)
def _check(delete, force, update, passwordfile, exclude, excludefrom, dryrun, rsh):
"""
Generate rsync options
"""
options = ["-avz"]
if delete:
options.append("--delete")
if force:
options.append("--force")
if update:
options.append("--update")
if rsh:
options.append("--rsh={}".format(rsh))
if passwordfile:
options.extend(["--password-file", passwordfile])
if excludefrom:
options.extend(["--exclude-from", excludefrom])
if exclude:
exclude = False
if exclude:
if isinstance(exclude, list):
for ex_ in exclude:
options.extend(["--exclude", ex_])
else:
options.extend(["--exclude", exclude])
if dryrun:
options.append("--dry-run")
return options
def rsync(
src,
dst,
delete=False,
force=False,
update=False,
passwordfile=None,
exclude=None,
excludefrom=None,
dryrun=False,
rsh=None,
additional_opts=None,
saltenv="base",
):
"""
.. versionchanged:: 2016.3.0
Return data now contains just the output of the rsync command, instead
of a dictionary as returned from :py:func:`cmd.run_all
<salt.modules.cmdmod.run_all>`.
Rsync files from src to dst
src
The source location where files will be rsynced from.
dst
The destination location where files will be rsynced to.
delete : False
Whether to enable the rsync `--delete` flag, which
will delete extraneous files from dest dirs
force : False
Whether to enable the rsync `--force` flag, which
will force deletion of dirs even if not empty.
update : False
Whether to enable the rsync `--update` flag, which
forces rsync to skip any files which exist on the
destination and have a modified time that is newer
than the source file.
passwordfile
A file that contains a password for accessing an
rsync daemon. The file should contain just the
password.
exclude
Whether to enable the rsync `--exclude` flag, which
will exclude files matching a PATTERN.
excludefrom
Whether to enable the rsync `--excludefrom` flag, which
will read exclude patterns from a file.
dryrun : False
Whether to enable the rsync `--dry-run` flag, which
will perform a trial run with no changes made.
rsh
Whether to enable the rsync `--rsh` flag, to
specify the remote shell to use.
additional_opts
Any additional rsync options, should be specified as a list.
saltenv
Specify a salt fileserver environment to be used.
CLI Example:
.. code-block:: bash
salt '*' rsync.rsync /path/to/src /path/to/dest delete=True update=True passwordfile=/etc/pass.crt exclude=exclude/dir
salt '*' rsync.rsync /path/to/src delete=True excludefrom=/xx.ini
salt '*' rsync.rsync /path/to/src delete=True exclude='[exclude1/dir,exclude2/dir]' additional_opts='["--partial", "--bwlimit=5000"]'
"""
if not src:
src = __salt__["config.option"]("rsync.src")
if not dst:
dst = __salt__["config.option"]("rsync.dst")
if not delete:
delete = __salt__["config.option"]("rsync.delete")
if not force:
force = __salt__["config.option"]("rsync.force")
if not update:
update = __salt__["config.option"]("rsync.update")
if not passwordfile:
passwordfile = __salt__["config.option"]("rsync.passwordfile")
if not exclude:
exclude = __salt__["config.option"]("rsync.exclude")
if not excludefrom:
excludefrom = __salt__["config.option"]("rsync.excludefrom")
if not dryrun:
dryrun = __salt__["config.option"]("rsync.dryrun")
if not rsh:
rsh = __salt__["config.option"]("rsync.rsh")
if not src or not dst:
raise SaltInvocationError("src and dst cannot be empty")
tmp_src = None
if src.startswith("salt://"):
_src = src
_path = re.sub("salt://", "", _src)
src_is_dir = False
if _path in __salt__["cp.list_master_dirs"](saltenv=saltenv):
src_is_dir = True
if src_is_dir:
tmp_src = tempfile.mkdtemp()
dir_src = __salt__["cp.get_dir"](_src, tmp_src, saltenv)
if dir_src:
src = tmp_src
# Ensure src ends in / so we
# get the contents not the tmpdir
# itself.
if not src.endswith("/"):
src = "{}/".format(src)
else:
raise CommandExecutionError("{} does not exist".format(src))
else:
tmp_src = salt.utils.files.mkstemp()
file_src = __salt__["cp.get_file"](_src, tmp_src, saltenv)
if file_src:
src = tmp_src
else:
raise CommandExecutionError("{} does not exist".format(src))
option = _check(
delete, force, update, passwordfile, exclude, excludefrom, dryrun, rsh
)
if additional_opts and isinstance(additional_opts, list):
option = option + additional_opts
cmd = ["rsync"] + option + [src, dst]
log.debug("Running rsync command: %s", cmd)
try:
return __salt__["cmd.run_all"](cmd, python_shell=False)
except OSError as exc:
raise CommandExecutionError(exc.strerror)
finally:
if tmp_src:
__salt__["file.remove"](tmp_src)
def version():
"""
.. versionchanged:: 2016.3.0
Return data now contains just the version number as a string, instead
of a dictionary as returned from :py:func:`cmd.run_all
<salt.modules.cmdmod.run_all>`.
Returns rsync version
CLI Example:
.. code-block:: bash
salt '*' rsync.version
"""
try:
out = __salt__["cmd.run_stdout"](["rsync", "--version"], python_shell=False)
except OSError as exc:
raise CommandExecutionError(exc.strerror)
try:
return out.split("\n")[0].split()[2]
except IndexError:
raise CommandExecutionError("Unable to determine rsync version")
def config(conf_path="/etc/rsyncd.conf"):
"""
.. versionchanged:: 2016.3.0
Return data now contains just the contents of the rsyncd.conf as a
string, instead of a dictionary as returned from :py:func:`cmd.run_all
<salt.modules.cmdmod.run_all>`.
Returns the contents of the rsync config file
conf_path : /etc/rsyncd.conf
Path to the config file
CLI Example:
.. code-block:: bash
salt '*' rsync.config
"""
ret = ""
try:
with salt.utils.files.fopen(conf_path, "r") as fp_:
for line in fp_:
ret += salt.utils.stringutils.to_unicode(line)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise CommandExecutionError("{} does not exist".format(conf_path))
elif exc.errno == errno.EACCES:
raise CommandExecutionError(
"Unable to read {}, access denied".format(conf_path)
)
elif exc.errno == errno.EISDIR:
raise CommandExecutionError(
"Unable to read {}, path is a directory".format(conf_path)
)
else:
raise CommandExecutionError("Error {}: {}".format(exc.errno, exc.strerror))
else:
return ret
|
|
import logging
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DATA_ROOT = os.path.join(PROJECT_DIR, '.gaedata')
# Overrides for os.environ.
env_ext = {'DJANGO_SETTINGS_MODULE': 'settings'}
def setup_env():
"""Configures GAE environment for command-line apps."""
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError:
for k in [k for k in sys.modules if k.startswith('google')]:
del sys.modules[k]
# Not on the system path. Build a list of alternative paths
# where it may be. First look within the project for a local
# copy, then look for where the Mac OS SDK installs it.
paths = [os.path.join(PROJECT_DIR, '.google_appengine'),
os.environ.get('APP_ENGINE_SDK'),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine']
for path in os.environ.get('PATH', '').split(os.pathsep):
path = path.rstrip(os.sep)
if path.endswith('google_appengine'):
paths.append(path)
if os.name in ('nt', 'dos'):
path = r'%(PROGRAMFILES)s\Google\google_appengine' % os.environ
paths.append(path)
# Loop through all possible paths and look for the SDK dir.
sdk_path = None
for path in paths:
if not path:
continue
path = os.path.expanduser(path)
path = os.path.realpath(path)
if os.path.exists(path):
sdk_path = path
break
# The SDK could not be found in any known location.
if sdk_path is None:
sys.stderr.write("The Google App Engine SDK could not be found!\n"
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n")
sys.exit(1)
# Add the SDK and the libraries within it to the system path.
extra_paths = [sdk_path]
lib = os.path.join(sdk_path, 'lib')
# Automatically add all packages in the SDK's lib folder:
for name in os.listdir(lib):
root = os.path.join(lib, name)
subdir = name
# Package can be under 'lib/<pkg>/<pkg>/' or
# 'lib/<pkg>/lib/<pkg>/'.
detect = (os.path.join(root, subdir),
os.path.join(root, 'lib', subdir))
for path in detect:
if os.path.isdir(path):
extra_paths.append(os.path.dirname(path))
break
else:
if name == 'webapp2':
extra_paths.append(root)
elif name == 'webob_0_9':
extra_paths.append(root)
sys.path = extra_paths + sys.path
from google.appengine.api import apiproxy_stub_map
setup_project()
from .utils import have_appserver
if have_appserver:
# App Engine's threading.local is broken.
setup_threading()
elif not os.path.exists(DATA_ROOT):
os.mkdir(DATA_ROOT)
setup_logging()
if not have_appserver:
# Patch Django to support loading management commands from zip
# files.
from django.core import management
management.find_commands = find_commands
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the
command names that are available.
This version works for django deployments which are file based or
contained in a ZIP (in sys.path).
Returns an empty list if no commands are defined.
"""
import pkgutil
return [modname for importer, modname, ispkg in pkgutil.iter_modules(
[os.path.join(management_dir, 'commands')]) if not ispkg]
def setup_threading():
if sys.version_info >= (2, 7):
return
# XXX: On Python 2.5 GAE's threading.local doesn't work correctly
# with subclassing.
try:
from django.utils._threading_local import local
import threading
threading.local = local
except ImportError:
pass
def setup_logging():
# Fix Python 2.6 logging module.
logging.logMultiprocessing = 0
# Enable logging.
level = logging.DEBUG
from .utils import have_appserver
if have_appserver:
# We can't import settings at this point when running a normal
# manage.py command because this module gets imported from
# settings.py.
from django.conf import settings
if not settings.DEBUG:
level = logging.INFO
logging.getLogger().setLevel(level)
def setup_project():
from .utils import have_appserver, on_production_server
if have_appserver:
# This fixes a pwd import bug for os.path.expanduser().
env_ext['HOME'] = PROJECT_DIR
# The dev_appserver creates a sandbox which restricts access to
# certain modules and builtins in order to emulate the production
# environment. Here we get the subprocess module back into the
# dev_appserver sandbox.This module is just too important for
# development. Also we add the compiler/parser module back and
# enable https connections (seem to be broken on Windows because
# the _ssl module is disallowed).
if not have_appserver:
from google.appengine.tools import dev_appserver
try:
# Backup os.environ. It gets overwritten by the
# dev_appserver, but it's needed by the subprocess module.
env = dev_appserver.DEFAULT_ENV
dev_appserver.DEFAULT_ENV = os.environ.copy()
dev_appserver.DEFAULT_ENV.update(env)
# Backup the buffer() builtin. The subprocess in Python 2.5
# on Linux and OS X uses needs it, but the dev_appserver
# removes it.
dev_appserver.buffer = buffer
except AttributeError:
logging.warn("Could not patch the default environment. "
"The subprocess module will not work correctly.")
try:
# Allow importing compiler/parser, _ssl (for https),
# _io for Python 2.7 io support on OS X
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io'))
except AttributeError:
logging.warn("Could not patch modules whitelist. the compiler "
"and parser modules will not work and SSL support "
"is disabled.")
elif not on_production_server:
try:
# Restore the real subprocess module.
from google.appengine.api.mail_stub import subprocess
sys.modules['subprocess'] = subprocess
# Re-inject the buffer() builtin into the subprocess module.
from google.appengine.tools import dev_appserver
subprocess.buffer = dev_appserver.buffer
except Exception, e:
logging.warn("Could not add the subprocess module to the "
"sandbox: %s" % e)
os.environ.update(env_ext)
extra_paths = [PROJECT_DIR, os.path.join(os.path.dirname(__file__), 'lib')]
zip_packages_dir = os.path.join(PROJECT_DIR, 'zip-packages')
# We support zipped packages in the common and project folders.
if os.path.isdir(zip_packages_dir):
for zip_package in os.listdir(zip_packages_dir):
extra_paths.append(os.path.join(zip_packages_dir, zip_package))
# App Engine causes main.py to be reloaded if an exception gets
# raised on the first request of a main.py instance, so don't call
# setup_project() multiple times. We ensure this indirectly by
# checking if we've already modified sys.path, already.
if len(sys.path) < len(extra_paths) or \
sys.path[:len(extra_paths)] != extra_paths:
for path in extra_paths:
while path in sys.path:
sys.path.remove(path)
sys.path = extra_paths + sys.path
|
|
# -*- coding: utf-8 -*-
import furl
import lxml
import time
import logging
import requests
from django.db.models import QuerySet
from framework.auth.utils import impute_names
from website.identifiers.metadata import remove_control_characters
from website.identifiers.clients.base import AbstractIdentifierClient
from website import settings
logger = logging.getLogger(__name__)
CROSSREF_NAMESPACE = 'http://www.crossref.org/schema/4.4.1'
CROSSREF_SCHEMA_LOCATION = 'http://www.crossref.org/schema/4.4.1 http://www.crossref.org/schemas/crossref4.4.1.xsd'
CROSSREF_ACCESS_INDICATORS = 'http://www.crossref.org/AccessIndicators.xsd'
CROSSREF_RELATIONS = 'http://www.crossref.org/relations.xsd'
CROSSREF_SCHEMA_VERSION = '4.4.1'
JATS_NAMESPACE = 'http://www.ncbi.nlm.nih.gov/JATS1'
XSI = 'http://www.w3.org/2001/XMLSchema-instance'
CROSSREF_DEPOSITOR_NAME = 'Open Science Framework'
CROSSREF_SUFFIX_LIMIT = 10
CROSSREF_SURNAME_LIMIT = 60
CROSSREF_GIVEN_NAME_LIMIT = 60
class CrossRefClient(AbstractIdentifierClient):
def __init__(self, base_url):
self.base_url = base_url
def get_credentials(self):
return (settings.CROSSREF_USERNAME, settings.CROSSREF_PASSWORD)
def build_doi(self, preprint):
from osf.models import PreprintProvider
prefix = preprint.provider.doi_prefix or PreprintProvider.objects.get(_id='osf').doi_prefix
return settings.DOI_FORMAT.format(prefix=prefix, guid=preprint._id)
def build_metadata(self, preprint, status='public', include_relation=True, **kwargs):
"""Return the crossref metadata XML document for a given preprint as a string for DOI minting purposes
:param preprint: the preprint, or list of preprints to build metadata for
"""
is_batch = False
if isinstance(preprint, (list, QuerySet)):
is_batch = True
preprints = preprint
else:
preprints = [preprint]
element = lxml.builder.ElementMaker(nsmap={
None: CROSSREF_NAMESPACE,
'xsi': XSI},
)
# batch_id is used to get the guid of preprints for error messages down the line
# but there is a size limit -- for bulk requests, include only the first 5 guids
batch_id = ','.join([prep._id for prep in preprints[:5]])
head = element.head(
element.doi_batch_id(batch_id),
element.timestamp(str(int(time.time()))),
element.depositor(
element.depositor_name(CROSSREF_DEPOSITOR_NAME),
element.email_address(settings.CROSSREF_DEPOSITOR_EMAIL)
),
element.registrant('Center for Open Science')
)
# if this is a batch update, let build_posted_content determine status for each preprint
status = status if not is_batch else None
body = element.body()
for preprint in preprints:
body.append(self.build_posted_content(preprint, element, status, include_relation))
root = element.doi_batch(
head,
body,
version=CROSSREF_SCHEMA_VERSION
)
root.attrib['{%s}schemaLocation' % XSI] = CROSSREF_SCHEMA_LOCATION
return lxml.etree.tostring(root, pretty_print=kwargs.get('pretty_print', True))
def build_posted_content(self, preprint, element, status, include_relation):
"""Build the <posted_content> element for a single preprint
preprint - preprint to build posted_content for
element - namespace element to use when building parts of the XML structure
"""
status = status or self.get_status(preprint)
posted_content = element.posted_content(
element.group_title(preprint.provider.name),
type='preprint'
)
if status == 'public':
posted_content.append(element.contributors(*self._crossref_format_contributors(element, preprint)))
title = element.title(remove_control_characters(preprint.node.title)) if status == 'public' else element.title('')
posted_content.append(element.titles(title))
posted_content.append(element.posted_date(*self._crossref_format_date(element, preprint.date_published)))
if status == 'public':
posted_content.append(element.item_number('osf.io/{}'.format(preprint._id)))
if preprint.node.description:
posted_content.append(
element.abstract(element.p(remove_control_characters(preprint.node.description)), xmlns=JATS_NAMESPACE))
if preprint.license and preprint.license.node_license.url:
posted_content.append(
element.program(
element.license_ref(preprint.license.node_license.url,
start_date=preprint.date_published.strftime('%Y-%m-%d')),
xmlns=CROSSREF_ACCESS_INDICATORS
)
)
else:
posted_content.append(
element.program(xmlns=CROSSREF_ACCESS_INDICATORS)
)
if preprint.node.preprint_article_doi and include_relation:
posted_content.append(
element.program(
element.related_item(
element.intra_work_relation(
preprint.node.preprint_article_doi,
**{'relationship-type': 'isPreprintOf', 'identifier-type': 'doi'}
)
), xmlns=CROSSREF_RELATIONS
)
)
doi = self.build_doi(preprint)
doi_data = [
element.doi(doi),
element.resource(settings.DOMAIN + preprint._id)
]
posted_content.append(element.doi_data(*doi_data))
return posted_content
def _process_crossref_name(self, contributor):
# Adapted from logic used in `api/citations/utils.py`
# If the user has a family and given name, use those
if contributor.family_name and contributor.given_name:
given = contributor.given_name
middle = contributor.middle_names
family = contributor.family_name
suffix = contributor.suffix
else:
names = impute_names(contributor.fullname)
given = names.get('given')
middle = names.get('middle')
family = names.get('family')
suffix = names.get('suffix')
given_name = ' '.join([given, middle]).strip()
given_stripped = remove_control_characters(given_name)
# For crossref, given_name is not allowed to have numbers or question marks
given_processed = ''.join(
[char for char in given_stripped if (not char.isdigit() and char != '?')]
)
surname_processed = remove_control_characters(family)
surname = surname_processed or given_processed or contributor.fullname
processed_names = {'surname': surname[:CROSSREF_SURNAME_LIMIT].strip()}
if given_processed and surname_processed:
processed_names['given_name'] = given_processed[:CROSSREF_GIVEN_NAME_LIMIT].strip()
if suffix and (surname_processed or given_processed):
processed_names['suffix'] = suffix[:CROSSREF_SUFFIX_LIMIT].strip()
return processed_names
def _crossref_format_contributors(self, element, preprint):
contributors = []
for index, contributor in enumerate(preprint.node.visible_contributors):
if index == 0:
sequence = 'first'
else:
sequence = 'additional'
name_parts = self._process_crossref_name(contributor)
person = element.person_name(sequence=sequence, contributor_role='author')
if name_parts.get('given_name'):
person.append(element.given_name(name_parts['given_name']))
person.append(element.surname(name_parts['surname']))
if name_parts.get('suffix'):
person.append(element.suffix(remove_control_characters(name_parts['suffix'])))
if contributor.external_identity.get('ORCID'):
orcid = contributor.external_identity['ORCID'].keys()[0]
verified = contributor.external_identity['ORCID'].values()[0] == 'VERIFIED'
if orcid and verified:
person.append(
element.ORCID('https://orcid.org/{}'.format(orcid), authenticated='true')
)
contributors.append(person)
return contributors
def _crossref_format_date(self, element, date):
elements = [
element.month(date.strftime('%m')),
element.day(date.strftime('%d')),
element.year(date.strftime('%Y'))
]
return elements
def _build_url(self, **query):
url = furl.furl(self.base_url)
url.args.update(query)
return url.url
def create_identifier(self, preprint, category, status=None, include_relation=True):
if status is None:
status = self.get_status(preprint)
if category == 'doi':
metadata = self.build_metadata(preprint, status, include_relation)
doi = self.build_doi(preprint)
filename = doi.split('/')[-1]
username, password = self.get_credentials()
logger.info('Sending metadata for DOI {}:\n{}'.format(doi, metadata))
# Crossref sends an email to CROSSREF_DEPOSITOR_EMAIL to confirm
requests.request(
'POST',
self._build_url(
operation='doMDUpload',
login_id=username,
login_passwd=password,
fname='{}.xml'.format(filename)
),
files={'file': ('{}.xml'.format(filename), metadata)},
)
# Don't wait for response to confirm doi because it arrives via email.
return {'doi': doi}
else:
raise NotImplementedError()
def update_identifier(self, preprint, category, status=None):
return self.create_identifier(preprint, category, status)
def get_status(self, preprint):
return 'public' if preprint.verified_publishable else 'unavailable'
def bulk_create(self, metadata, filename):
# Crossref sends an email to CROSSREF_DEPOSITOR_EMAIL to confirm
username, password = self.get_credentials()
requests.request(
'POST',
self._build_url(
operation='doMDUpload',
login_id=username,
login_passwd=password,
fname='{}.xml'.format(filename)
),
files={'file': ('{}.xml'.format(filename), metadata)},
)
logger.info('Sent a bulk update of metadata to CrossRef')
class ECSArXivCrossRefClient(CrossRefClient):
def get_credentials(self):
return (settings.ECSARXIV_CROSSREF_USERNAME, settings.ECSARXIV_CROSSREF_PASSWORD)
|
|
'Toolkit for automatic-differentiation of Python functions'
# Resources for automatic-differentiation:
# https://en.wikipedia.org/wiki/Automatic_differentiation
# https://justindomke.wordpress.com/2009/02/17/
# http://www.autodiff.org/
from __future__ import division
import math
## Dual Number Class #####################################################
class Num(float):
''' The auto-differentiation number class works likes a float
for a function input, but all operations on that number
will concurrently compute the derivative.
Creating Nums
-------------
New numbers are created with: Num(x, dx)
Make constants (not varying with respect to x) with: Num(3.5)
Make variables (that vary with respect to x) with: Num(3.5, 1.0)
The short-cut for Num(3.5, 1.0) is: Var(3.5)
Accessing Nums
--------------
Convert a num back to a float with: float(n)
The derivative is accessed with: n.dx
Or with a the short-cut function: d(n)
Functions of One Variable
-------------------------
>>> f = lambda x: cos(2.5 * x) ** 3
>>> y = f(Var(1.5)) # Evaluate at x=1.5
>>> y # f(1.5)
-0.552497105486732
>>> y.dx # f'(1.5)
2.88631746797551
Partial Derivatives and Gradients of Multi-variable Functions
-------------------------------------------------------------
The tool can also be used to compute gradients of multivariable
functions by making one of the inputs variable and the keeping
the remaining inputs constant:
>>> f = lambda x, y: x*y + sin(x)
>>> f(2.5, 3.5) # Evaluate at (2.5, 3.5)
9.348472144103956
>>> d(f(Var(2.5), 3.5)) # Partial with respect to x
2.6988563844530664
>>> d(f(2.5, Var(3.5))) # Partial with respect to y
2.5
>>> gradient(f, (2.5, 3.5))
(2.6988563844530664, 2.5)
See: https://www.wolframalpha.com/input/?lk=3&i=grad(x*y+%2B+sin(x))
'''
# Tables of Derivatives:
# http://hyperphysics.phy-astr.gsu.edu/hbase/math/derfunc.html
# http://tutorial.math.lamar.edu/pdf/Common_Derivatives_Integrals.pdf
# http://www.nps.edu/Academics/Schools/GSEAS/Departments/Math/pdf_sources/BlueBook27.pdf
# https://www.wolframalpha.com/input/?lk=3&i=d%2Fdx(u(x)%5E(v(x)))
__slots__ = ['dx']
def __new__(cls, value, dx=0.0):
if isinstance(value, cls): return value
inst = float.__new__(cls, value)
inst.dx = dx
return inst
def __add__(u, v):
return Num(float(u) + float(v), d(u) + d(v))
def __sub__(u, v):
return Num(float(u) - float(v), d(u) - d(v))
def __mul__(u, v):
u, v, du, dv = float(u), float(v), d(u), d(v)
return Num(u * v, u * dv + v * du)
def __truediv__(u, v):
u, v, du, dv = float(u), float(v), d(u), d(v)
return Num(u / v, (v * du - u * dv) / v ** 2.0)
def __pow__(u, v):
u, v, du, dv = float(u), float(v), d(u), d(v)
return Num(u ** v,
(v * u ** (v - 1.0) * du if du else 0.0) +
(math.log(u) * u ** v * dv if dv else 0.0))
def __floordiv__(u, v):
return Num(float(u) // float(v), 0.0)
def __mod__(u, v):
u, v, du, dv = float(u), float(v), d(u), d(v)
return Num(u % v, du - u // v * dv)
def __pos__(u):
return u
def __neg__(u):
return Num(-float(u), -d(u))
__radd__ = __add__
__rmul__ = __mul__
def __rsub__(self, other):
return -(self - other)
def __rtruediv__(self, other):
return Num(other) / self
def __rpow__(self, other):
return Num(other) ** self
def __rmod__(u, v):
return Num(v) % u
def __rfloordiv__(self, other):
return Num(other) // self
def __abs__(self):
return self if self >= 0.0 else -self
## Convenience Functions #################################################
Var = lambda x: Num(x, 1.0)
d = lambda x: getattr(x, 'dx', 0.0)
## Math Module Functions and Constants ###################################
sqrt = lambda u: Num(math.sqrt(u), d(u) / (2.0 * math.sqrt(u)))
log = lambda u: Num(math.log(u), d(u) / float(u))
log2 = lambda u: Num(math.log2(u), d(u) / (float(u) * math.log(2.0)))
log10 = lambda u: Num(math.log10(u), d(u) / (float(u) * math.log(10.0)))
log1p = lambda u: Num(math.log1p(u), d(u) / (float(u) + 1.0))
exp = lambda u: Num(math.exp(u), math.exp(u) * d(u))
expm1 = lambda u: Num(math.expm1(u), math.exp(u) * d(u))
sin = lambda u: Num(math.sin(u), math.cos(u) * d(u))
cos = lambda u: Num(math.cos(u), -math.sin(u) * d(u))
tan = lambda u: Num(math.tan(u), d(u) / math.cos(u) ** 2.0)
sinh = lambda u: Num(math.sinh(u), math.cosh(u) * d(u))
cosh = lambda u: Num(math.cosh(u), math.sinh(u) * d(u))
tanh = lambda u: Num(math.tanh(u), d(u) / math.cosh(u) ** 2.0)
asin = lambda u: Num(math.asin(u), d(u) / math.sqrt(1.0 - float(u) ** 2.0))
acos = lambda u: Num(math.acos(u), -d(u) / math.sqrt(1.0 - float(u) ** 2.0))
atan = lambda u: Num(math.atan(u), d(u) / (1.0 + float(u) ** 2.0))
asinh = lambda u: Num(math.asinh(u), d(u) / math.hypot(u, 1.0))
acosh = lambda u: Num(math.acosh(u), d(u) / math.sqrt(float(u) ** 2.0 - 1.0))
atanh = lambda u: Num(math.atanh(u), d(u) / (1.0 - float(u) ** 2.0))
radians = lambda u: Num(math.radians(u), math.radians(d(u)))
degrees = lambda u: Num(math.degrees(u), math.degrees(d(u)))
erf = lambda u: Num(math.erf(u),
2.0 / math.sqrt(math.pi) * math.exp(-(float(u) ** 2.0)) * d(u))
erfc = lambda u: Num(math.erfc(u),
-2.0 / math.sqrt(math.pi) * math.exp(-(float(u) ** 2.0)) * d(u))
hypot = lambda u, v: Num(math.hypot(u, v),
(u * d(u) + v * d(v)) / math.hypot(u, v))
fsum = lambda u: Num(math.fsum(map(float, u)), math.fsum(map(d, u)))
fabs = lambda u: abs(Num(u))
fmod = lambda u, v: Num(u) % v
copysign = lambda u, v: Num(math.copysign(u, v),
d(u) if math.copysign(1.0, float(u) * float(v)) > 0.0 else -d(u))
ceil = lambda u: Num(math.ceil(u), 0.0)
floor = lambda u: Num(math.floor(u), 0.0)
trunc = lambda u: Num(math.trunc(u), 0.0)
pi = Num(math.pi)
e = Num(math.e)
## Backport Python 3 Math Module Functions ###############################
if not hasattr(math, 'isclose'):
math.isclose = lambda x, y, rel_tol=1e-09: abs(x/y - 1.0) <= rel_tol
if not hasattr(math, 'log2'):
math.log2 = lambda x: math.log(x) / math.log(2.0)
## Vector Functions ######################################################
def partial(func, point, index):
''' Partial derivative at a given point
>>> func = lambda x, y: x*y + sin(x)
>>> point = (2.5, 3.5)
>>> partial(func, point, 0) # Partial with respect to x
2.6988563844530664
>>> partial(func, point, 1) # Partial with respect to y
2.5
'''
return d(func(*[Num(x, i==index) for i, x in enumerate(point)]))
def gradient(func, point):
''' Vector of the partial derivatives of a scalar field
>>> func = lambda x, y: x*y + sin(x)
>>> point = (2.5, 3.5)
>>> gradient(func, point)
(2.6988563844530664, 2.5)
See: https://www.wolframalpha.com/input/?lk=3&i=grad(x*y+%2B+sin(x))
'''
return tuple(partial(func, point, index) for index in range(len(point)))
def directional_derivative(func, point, direction):
''' The dot product of the gradient and a direction vector.
Computed directly with a single function call.
>>> func = lambda x, y: x*y + sin(x)
>>> point = (2.5, 3.5)
>>> direction = (1.5, -2.2)
>>> directional_derivative(func, point, direction)
-1.4517154233204006
Same result as separately computing and dotting the gradient:
>>> math.fsum(g * d for g, d in zip(gradient(func, point), direction))
-1.4517154233204002
See: https://en.wikipedia.org/wiki/Directional_derivative
'''
return d(func(*map(Num, point, direction)))
def divergence(F, point):
''' Sum of the partial derivatives of a vector field
>>> F = lambda x, y, z: (x*y+sin(x)+3*x, x-y-5*x, cos(2*x)-sin(y)**2)
>>> divergence(F, (3.5, 2.1, -3.3))
3.163543312709203
# http://www.wolframalpha.com/input/?i=div+%7Bx*y%2Bsin(x)%2B3*x,+x-y-5*x,+cos(2*x)-sin(y)%5E2%7D
>>> x, y, z = (3.5, 2.1, -3.3)
>>> math.cos(x) + y + 2
3.1635433127092036
>>> F = lambda x, y, z: (8 * exp(-x), cosh(z), - y**2)
>>> divergence(F, (2, -1, 4))
-1.0826822658929016
# https://www.youtube.com/watch?v=S2rT2zK2bdo
>>> x, y, z = (2, -1, 4)
>>> -8 * math.exp(-x)
-1.0826822658929016
'''
return math.fsum(d(F(*[Num(x, i==index) for i, x in enumerate(point)])[index])
for index in range(len(point)))
def curl(F, point):
''' Rotation around a vector field
>>> F = lambda x, y, z: (x*y+sin(x)+3*x, x-y-5*x, cos(2*x)-sin(y)**2)
>>> curl(F, (3.5, 2.1, -3.3))
(0.8715757724135881, 1.3139731974375781, -7.5)
# http://www.wolframalpha.com/input/?i=curl+%7Bx*y%2Bsin(x)%2B3*x,+x-y-5*x,+cos(2*x)-sin(y)%5E2%7D
>>> x, y, z = (3.5, 2.1, -3.3)
>>> (-2 * math.sin(y) * math.cos(y), 2 * math.sin(2 * x), -x - 4)
(0.8715757724135881, 1.3139731974375781, -7.5)
# https://www.youtube.com/watch?v=UW4SQz29TDc
>>> F = lambda x, y, z: (y**4 - x**2 * z**2, x**2 + y**2, -x**2 * y * z)
>>> curl(F, (1, 3, -2))
(2.0, -8.0, -106.0)
>>> F = lambda x, y, z: (8 * exp(-x), cosh(z), - y**2)
>>> curl(F, (2, -1, 4))
(-25.289917197127753, 0.0, 0.0)
# https://www.youtube.com/watch?v=S2rT2zK2bdo
>>> x, y, z = (2, -1, 4)
>>> (-(x * y + math.sinh(z)), 0.0, 0.0)
(-25.289917197127753, 0.0, 0.0)
'''
x, y, z = point
_, Fyx, Fzx = map(d, F(Var(x), y, z))
Fxy, _, Fzy = map(d, F(x, Var(y), z))
Fxz, Fyz, _ = map(d, F(x, y, Var(z)))
return (Fzy - Fyz, Fxz - Fzx, Fyx - Fxy)
if __name__ == '__main__':
# River flow example: https://www.youtube.com/watch?v=vvzTEbp9lrc
W = 20 # width of river in meters
C = 0.1 # max flow divided by (W/2)**2
F = lambda x, y=0, z=0: (0.0, C * x * (W - x), 0.0)
for x in range(W+1):
print('%d --> %r' % (x, curl(F, (x, 0.0, 0.0))))
def numeric_derivative(func, x, eps=0.001):
'Estimate the derivative using numerical methods'
y0 = func(x - eps)
y1 = func(x + eps)
return (y1 - y0) / (2.0 * eps)
def test(x_array, *testcases):
for f in testcases:
print(f.__name__.center(40))
print('-' * 40)
for x in map(Var, x_array):
y = f(x)
actual = d(y)
expected = numeric_derivative(f, x, 2**-16)
print('%7.3f %12.4f %12.4f' % (x, actual, expected))
assert math.isclose(expected, actual, rel_tol=1e-5)
print('')
def test_pow_const_base(x):
return 3.1 ** (2.3 * x + 0.4)
def test_pow_const_exp(x):
return (2.3 * x + 0.4) ** (-1.3)
def test_pow_general(x):
return (x / 3.5) ** sin(3.5 * x)
def test_hyperbolics(x):
return 3 * cosh(1/x) + 5 * sinh(x/2.5) ** 2 - 0.7 * tanh(1.7/x) ** 1.5
def test_sqrt(x):
return cos(sqrt(abs(sin(x) + 5)))
def test_conversions(x):
return degrees(x ** 1.5 + 18) * radians(0.83 ** x + 37)
def test_hypot(x):
return hypot(sin(x), cos(1.1 / x))
def test_erf(x):
return (sin(x) * erf(x**0.85 - 3.123) +
cos(x) * erfc(x**0.851 - 3.25))
def test_rounders(x):
return (tan(x) * floor(cos(x**2 + 0.37) * 2.7) +
log(x) * ceil(cos(x**3 + 0.31) * 12.1) * 10.1 +
exp(x) * trunc(sin(x**1.4 + 8.0)) * 1234.567)
def test_inv_trig(x):
return (atan((x - 0.303) ** 2.9 + 0.1234) +
acos((x - 4.1) / 3.113) * 5 +
asin((x - 4.3) / 3.717))
def test_mod(x):
return 137.1327 % (sin(x + 0.3) * 40.123) + cos(x) % 5.753
def test_logs(x):
return log2(fabs(sin(x))) + log10(fabs(cos(x))) + log1p(fabs(tan(x)))
def test_fsum(x):
import random
random.seed(8675309)
data = [Num(random.random()**x, random.random()**x) for i in range(100)]
return fsum(data)
def test_inv_hyperbolics(x):
return (acosh(x**1.1234567 + 0.89) + 3.51 * asinh(x**1.234567 + 8.9) +
atanh(x / 15.0823))
def test_copysign(x):
return (copysign(7.17 * x + 5.11, 1.0) + copysign(4.1 * x, 0.0) +
copysign(8.909 * x + 0.18, -0.0) + copysign(4.321 * x + .12, -1.0) +
copysign(-3.53 * x + 11.5, 1.0) + copysign(-1.4 * x + 2.1, 0.0) +
copysign(-9.089 * x + 0.813, -0.0) + copysign(-1.2347 * x, -1.0) +
copysign(sin(x), x - math.pi))
def test_combined(x):
return (1.7 - 3 * cos(x) ** 2 / sin(3 * x) * 0.1 * exp(+cos(x)) +
sqrt(abs(x - 4.13)) + tan(2.5 * x) * log(3.1 * x**1.5) +
(4.7 * x + 3.1) ** cos(0.43 * x + 8.1) - 2.9 + tan(-x) +
sqrt(radians(log(x) + 1.7)) + e / x + expm1(x / pi))
x_array = [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5]
tests = [test_combined, test_pow_const_base, test_pow_const_exp,
test_pow_general, test_hyperbolics, test_sqrt, test_copysign,
test_inv_trig, test_conversions, test_hypot, test_rounders,
test_inv_hyperbolics, test_mod, test_logs, test_fsum, test_erf]
test(x_array, *tests)
# Run doctests when the underlying C math library matches the one used to
# generate the code examples (the approximation algorithms vary slightly).
if 2 + math.sinh(4) == 29.289917197127753:
import doctest
print(doctest.testmod())
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import tags_type, get_enum_type, resource_group_name_type, name_type, get_location_type, get_three_state_flag, get_resource_name_completion_list
from azure.cli.core.commands.validators import get_default_location_from_resource_group
def load_arguments_sb(self, _):
from azure.cli.command_modules.servicebus._completers import get_queue_command_completion_list, \
get_rules_command_completion_list, get_subscriptions_command_completion_list, get_topic_command_completion_list
from azure.cli.command_modules.servicebus._validators import _validate_auto_delete_on_idle, \
_validate_duplicate_detection_history_time_window, \
_validate_default_message_time_to_live, \
_validate_lock_duration, validate_partner_namespace, validate_premiumsku_capacity, validate_target_namespace, validate_rights
from knack.arguments import CLIArgumentType
from azure.mgmt.servicebus.models import SkuName, AccessRights, KeyType, FilterType
rights_arg_type = CLIArgumentType(options_list=['--rights'], nargs='+', arg_type=get_enum_type(AccessRights), validator=validate_rights, help='Space-separated list of Authorization rule rights')
key_arg_type = CLIArgumentType(options_list=['--key'], arg_type=get_enum_type(KeyType), help='specifies Primary or Secondary key needs to be reset')
keyvalue_arg_type = CLIArgumentType(options_list=['--key-value'], help='Optional, if the key value provided, is set for KeyType or autogenerated Key value set for keyType.')
with self.argument_context('servicebus') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('namespace_name', options_list=['--namespace-name'], id_part='name', help='Name of Namespace')
with self.argument_context('servicebus namespace') as c:
c.argument('namespace_name', id_part='name', arg_type=name_type, completer=get_resource_name_completion_list('Microsoft.ServiceBus/namespaces'), help='Name of Namespace')
c.argument('default_action', help='Default action for network rule set.')
c.argument('tags', arg_type=tags_type)
c.argument('sku', arg_type=get_enum_type(SkuName), help='Namespace SKU.')
c.argument('capacity', type=int, choices=[1, 2, 4, 8, 16], help='Number of message units. This property is only applicable to namespaces of Premium SKU', validator=validate_premiumsku_capacity)
with self.argument_context('servicebus namespace exists') as c:
c.argument('name', arg_type=name_type, help='Namespace name. Name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number.')
with self.argument_context('servicebus namespace create') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
# region Namespace Authorization Rule
with self.argument_context('servicebus namespace authorization-rule list') as c:
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of the Namespace')
with self.argument_context('servicebus namespace authorization-rule') as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part='child_name_1', help='Name of Namespace Authorization Rule')
c.argument('namespace_name', id_part='name', options_list=['--namespace-name'], help='Name of Namespace')
for scope in ['servicebus namespace authorization-rule create', 'servicebus namespace authorization-rule update', 'servicebus queue authorization-rule create', 'servicebus queue authorization-rule update', 'servicebus topic authorization-rule create', 'servicebus topic authorization-rule update']:
with self.argument_context(scope) as c:
c.argument('name', arg_type=name_type, help='Name of Authorization Rule')
c.argument('rights', arg_type=rights_arg_type)
with self.argument_context('servicebus namespace authorization-rule keys renew') as c:
c.argument('name', arg_type=name_type, help='Name of Namespace Authorization Rule')
c.argument('key_type', arg_type=key_arg_type)
c.argument('key', arg_type=keyvalue_arg_type)
with self.argument_context('servicebus namespace authorization-rule keys list') as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part=None, help='Name of Namespace Authorization Rule')
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
# region Queue
with self.argument_context('servicebus queue') as c:
c.argument('queue_name', arg_type=name_type, id_part='child_name_1', completer=get_queue_command_completion_list, help='Name of Queue')
# region - Queue Create
for scope in ['create', 'update']:
with self.argument_context('servicebus queue {}'.format(scope)) as c:
c.argument('queue_name', arg_type=name_type, id_part='child_name_1', help='Name of Queue')
c.argument('lock_duration', validator=_validate_lock_duration, help='String ISO 8601 timespan or duration format for duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1 minute.')
c.argument('max_size_in_megabytes', options_list=['--max-size'], type=int, choices=[1024, 2048, 3072, 4096, 5120, 10240, 20480, 40960, 81920], help='Maximum size of queue in megabytes, which is the size of the memory allocated for the queue. Default is 1024. Max for Standard SKU is 5120 and for Premium SKU is 81920')
c.argument('requires_duplicate_detection', options_list=['--enable-duplicate-detection'], arg_type=get_three_state_flag(), help='A boolean value indicating if this queue requires duplicate detection.')
c.argument('requires_session', options_list=['--enable-session'], arg_type=get_three_state_flag(), help='A boolean value indicating whether the queue supports the concept of sessions.')
c.argument('default_message_time_to_live', validator=_validate_default_message_time_to_live, help='ISO 8601 timespan or duration time format for default message to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.')
c.argument('dead_lettering_on_message_expiration', options_list=['--enable-dead-lettering-on-message-expiration'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether this queue has dead letter support when a message expires.')
c.argument('duplicate_detection_history_time_window', validator=_validate_duplicate_detection_history_time_window, help='ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.')
c.argument('max_delivery_count', type=int, help='The maximum delivery count. A message is automatically deadlettered after this number of deliveries. default value is 10.')
c.argument('status', arg_type=get_enum_type(['Active', 'Disabled', 'SendDisabled']), help='Enumerates the possible values for the status of a messaging entity.')
c.argument('auto_delete_on_idle', validator=_validate_auto_delete_on_idle, help='ISO 8601 timeSpan or duration time format for idle interval after which the queue is automatically deleted. The minimum duration is 5 minutes.')
c.argument('enable_partitioning', arg_type=get_three_state_flag(), help='A boolean value that indicates whether the queue is to be partitioned across multiple message brokers.')
c.argument('enable_express', arg_type=get_three_state_flag(), help='A boolean value that indicates whether Express Entities are enabled. An express queue holds a message in memory temporarily before writing it to persistent storage.')
c.argument('forward_to', help='Queue/Topic name to forward the messages')
c.argument('forward_dead_lettered_messages_to', help='Queue/Topic name to forward the Dead Letter message')
c.argument('enable_batched_operations', arg_type=get_three_state_flag(), help='Allow server-side batched operations.')
with self.argument_context('servicebus queue list') as c:
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
# region Queue Authorization Rule
with self.argument_context('servicebus queue authorization-rule') as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part='child_name_2', help='Name of Queue Authorization Rule')
c.argument('queue_name', id_part='child_name_1', options_list=['--queue-name'], help='Name of Queue')
with self.argument_context('servicebus queue authorization-rule list') as c:
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
c.argument('queue_name', id_part=None, options_list=['--queue-name'], help='Name of Queue')
with self.argument_context('servicebus queue authorization-rule keys renew') as c:
c.argument('name', arg_type=name_type, help='Name of Queue Authorization Rule')
c.argument('key_type', arg_type=key_arg_type)
c.argument('key', arg_type=keyvalue_arg_type)
with self.argument_context('servicebus queue authorization-rule keys list') as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part=None, help='Name of Queue Authorization Rule')
c.argument('queue_name', id_part=None, options_list=['--queue-name'], help='Name of Queue')
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
# region - Topic
for scope in ['servicebus topic show', 'servicebus topic delete']:
with self.argument_context(scope) as c:
c.argument('topic_name', arg_type=name_type, id_part='child_name_1', completer=get_topic_command_completion_list, help='Name of Topic')
# region - Topic Create
for scope in ['create', 'update']:
with self.argument_context('servicebus topic {}'.format(scope)) as c:
c.argument('topic_name', arg_type=name_type, id_part='child_name_1', completer=get_topic_command_completion_list, help='Name of Topic')
c.argument('default_message_time_to_live', validator=_validate_default_message_time_to_live, help='ISO 8601 or duration time format for Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.')
c.argument('max_size_in_megabytes', options_list=['--max-size'], type=int, choices=[1024, 2048, 3072, 4096, 5120, 10240, 20480, 40960, 81920], help='Maximum size of topic in megabytes, which is the size of the memory allocated for the topic. Default is 1024. Max for Standard SKU is 5120 and for Premium SKU is 81920')
c.argument('requires_duplicate_detection', options_list=['--enable-duplicate-detection'], arg_type=get_three_state_flag(), help='A boolean value indicating if this topic requires duplicate detection.')
c.argument('duplicate_detection_history_time_window', validator=_validate_duplicate_detection_history_time_window, help='ISO 8601 timespan or duration time format for structure that defines the duration of the duplicate detection history. The default value is 10 minutes.')
c.argument('enable_batched_operations', arg_type=get_three_state_flag(), help='Allow server-side batched operations.')
c.argument('status', arg_type=get_enum_type(['Active', 'Disabled', 'SendDisabled']), help='Enumerates the possible values for the status of a messaging entity.')
c.argument('support_ordering', options_list=['--enable-ordering'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether the topic supports ordering.')
c.argument('auto_delete_on_idle', validator=_validate_auto_delete_on_idle, help='ISO 8601 timespan or duration time format for idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.')
c.argument('enable_partitioning', arg_type=get_three_state_flag(), help='A boolean value that indicates whether the topic to be partitioned across multiple message brokers is enabled.')
c.argument('enable_express', arg_type=get_three_state_flag(), help='A boolean value that indicates whether Express Entities are enabled. An express topic holds a message in memory temporarily before writing it to persistent storage.')
for scope in ['servicebus topic show', 'servicebus topic delete']:
with self.argument_context(scope) as c:
c.argument('topic_name', arg_type=name_type, id_part='child_name_1', completer=get_topic_command_completion_list, help='Name of Topic')
with self.argument_context('servicebus topic list') as c:
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
# region Topic Authorization Rule
with self.argument_context('servicebus topic authorization-rule') as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part='child_name_2', help='name of Topic Authorization Rule')
c.argument('topic_name', options_list=['--topic-name'], id_part='child_name_1', help='name of Topic')
with self.argument_context('servicebus topic authorization-rule list') as c:
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
c.argument('topic_name', options_list=['--topic-name'], id_part=None, help='name of Topic')
with self.argument_context('servicebus topic authorization-rule keys renew') as c:
c.argument('name', arg_type=name_type, help='Name of Topic Authorization Rule')
c.argument('key_type', arg_type=key_arg_type)
c.argument('key', arg_type=keyvalue_arg_type)
with self.argument_context('servicebus topic authorization-rule keys list') as c:
c.argument('namespace_name', id_part=None, options_list=['--namespace-name'], help='Name of Namespace')
c.argument('authorization_rule_name', arg_type=name_type, id_part=None, help='name of Topic Authorization Rule')
c.argument('topic_name', options_list=['--topic-name'], id_part=None, help='Name of Topic')
with self.argument_context('servicebus topic subscription') as c:
c.argument('subscription_name', arg_type=name_type, id_part='child_name_2', completer=get_subscriptions_command_completion_list, help='Name of Subscription')
c.argument('topic_name', id_part='child_name_1', options_list=['--topic-name'], help='Name of Topic')
# region - Subscription Create and update
for scope in ['create', 'update']:
with self.argument_context('servicebus topic subscription {}'.format(scope)) as c:
c.argument('lock_duration', validator=_validate_lock_duration, help='ISO 8601 or duration format (day:minute:seconds) for lock duration timespan for the subscription. The default value is 1 minute.')
c.argument('requires_session', options_list=['--enable-session'], arg_type=get_three_state_flag(), help='A boolean value indicating if a subscription supports the concept of sessions.')
c.argument('default_message_time_to_live', validator=_validate_default_message_time_to_live, help='ISO 8601 or duration time format for Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.')
c.argument('dead_lettering_on_message_expiration', options_list=['--enable-dead-lettering-on-message-expiration'], arg_type=get_three_state_flag(), help='A boolean Value that indicates whether a subscription has dead letter support when a message expires.')
c.argument('max_delivery_count', type=int, help='Number of maximum deliveries.')
c.argument('status', arg_type=get_enum_type(['Active', 'Disabled', 'SendDisabled', 'ReceiveDisabled']), help='Enumerates the possible values for the status of a messaging entity.')
c.argument('enable_batched_operations', arg_type=get_three_state_flag(), help='Allow server-side batched operations.')
c.argument('auto_delete_on_idle', validator=_validate_auto_delete_on_idle, options_list=['--auto-delete-on-idle'], help='ISO 8601 timeSpan or duration time format for idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.')
c.argument('forward_to', help='Queue/Topic name to forward the messages')
c.argument('forward_dead_lettered_messages_to', help='Queue/Topic name to forward the Dead Letter message')
c.argument('dead_lettering_on_filter_evaluation_exceptions', options_list=['--dead-letter-on-filter-exceptions'], arg_type=get_three_state_flag(), help='Allow dead lettering when filter evaluation exceptions occur.')
with self.argument_context('servicebus topic subscription list') as c:
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of Namespace')
c.argument('topic_name', options_list=['--topic-name'], id_part=None, help='Name of Topic')
# Region Subscription Rules
# Region Rules Create
with self.argument_context('servicebus topic subscription rule') as c:
c.argument('rule_name', arg_type=name_type, id_part='child_name_3', completer=get_rules_command_completion_list, help='Name of Rule')
c.argument('subscription_name', options_list=['--subscription-name'], id_part='child_name_2', help='Name of Subscription')
c.argument('topic_name', options_list=['--topic-name'], id_part='child_name_1', help='Name of Topic')
for scope in ['servicebus topic subscription rule create', 'servicebus topic subscription rule update']:
with self.argument_context(scope, arg_group='Action') as c:
c.argument('filter_type', arg_type=get_enum_type(FilterType), help='Rule Filter types')
c.argument('action_sql_expression', help='Action SQL expression.')
c.argument('action_compatibility_level', type=int, help='This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.')
c.argument('action_requires_preprocessing', options_list=['--enable-action-preprocessing'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether the rule action requires preprocessing.')
with self.argument_context(scope, arg_group='SQL Filter') as c:
c.argument('filter_sql_expression', help='SQL expression. e.g. myproperty=test')
c.argument('filter_requires_preprocessing', options_list=['--enable-sql-preprocessing'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether the rule action requires preprocessing.')
with self.argument_context(scope, arg_group='Correlation Filter') as c:
c.argument('correlation_id', help='Identifier of correlation.')
c.argument('message_id', help='Identifier of message.')
c.argument('to', help='Address to send to.')
c.argument('reply_to', help='Address of the queue to reply to.')
c.argument('label', help='Application specific label.')
c.argument('session_id', help='Session identifier')
c.argument('reply_to_session_id', help='Session identifier to reply to.')
c.argument('content_type', help='Content type of message.')
c.argument('requires_preprocessing', options_list=['--enable-correlation-preprocessing'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether the rule action requires preprocessing.')
with self.argument_context('servicebus topic subscription rule list') as c:
c.argument('subscription_name', options_list=['--subscription-name'], id_part=None, help='Name of Subscription')
c.argument('topic_name', options_list=['--topic-name'], id_part=None, help='Name of Topic')
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of Namespace')
# Geo DR - Disaster Recovery Configs - Alias : Region
with self.argument_context('servicebus georecovery-alias exists') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('namespace_name', options_list=['--namespace-name'], id_part='name', help='Name of Namespace')
c.argument('name', options_list=['--alias', '-a'], arg_type=name_type, help='Name of Geo-Disaster Recovery Configuration Alias to check availability')
with self.argument_context('servicebus georecovery-alias') as c:
c.argument('alias', options_list=['--alias', '-a'], id_part='child_name_1', help='Name of the Geo-Disaster Recovery Configuration Alias')
with self.argument_context('servicebus georecovery-alias set') as c:
c.argument('resource_group_name', arg_type=resource_group_name_type)
c.argument('namespace_name', options_list=['--namespace-name'], id_part='name', help='Name of Namespace')
c.argument('alias', options_list=['--alias', '-a'], help='Name of the Geo-Disaster Recovery Configuration Alias')
c.argument('partner_namespace', required=True, options_list=['--partner-namespace'], validator=validate_partner_namespace, help='Name (if within the same resource group) or ARM Id of Primary/Secondary Service Bus namespace name, which is part of GEO DR pairing')
c.argument('alternate_name', help='Alternate Name (Post failover) for Primary Namespace, when Namespace name and Alias name are same')
for scope in ['servicebus georecovery-alias authorization-rule show', 'servicebus georecovery-alias authorization-rule keys list']:
with self.argument_context(scope)as c:
c.argument('authorization_rule_name', arg_type=name_type, id_part='child_name_2', help='name of Namespace Authorization Rule')
with self.argument_context('servicebus georecovery-alias list') as c:
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of Namespace')
with self.argument_context('servicebus georecovery-alias authorization-rule list') as c:
c.argument('alias', options_list=['--alias', '-a'], help='Name of Geo-Disaster Recovery Configuration Alias')
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of Namespace')
with self.argument_context('servicebus georecovery-alias authorization-rule keys list') as c:
c.argument('alias', options_list=['--alias', '-a'], id_part=None, help='Name of Geo-Disaster Recovery Configuration Alias')
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of Namespace')
c.argument('authorization_rule_name', arg_type=name_type, help='Name of Namespace AuthorizationRule')
# Standard to Premium Migration: Region
with self.argument_context('servicebus migration start') as c:
c.ignore('config_name')
c.argument('namespace_name', arg_type=name_type, help='Name of Standard Namespace used as source of the migration')
# c.argument('config_name', options_list=['--config-name'], id_part=None, help='Name of configuration. Should always be "$default"')
c.argument('target_namespace', options_list=['--target-namespace'], validator=validate_target_namespace, help='Name (if within the same resource group) or ARM Id of empty Premium Service Bus namespace name that will be target of the migration')
c.argument('post_migration_name', options_list=['--post-migration-name'], help='Post migration name is the name that can be used to connect to standard namespace after migration is complete.')
for scope in ['show', 'complete', 'abort']:
with self.argument_context('servicebus migration {}'.format(scope)) as c:
c.ignore('config_name')
c.argument('namespace_name', arg_type=name_type, help='Name of Standard Namespace')
# Region Namespace NetworkRuleSet
with self.argument_context('servicebus namespace network-rule') as c:
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of the Namespace')
for scope in ['servicebus namespace network-rule add', 'servicebus namespace network-rule remove']:
with self.argument_context(scope) as c:
c.argument('subnet', arg_group='Virtual Network Rule', options_list=['--subnet'], help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('ip_mask', arg_group='IP Address Rule', options_list=['--ip-address'], help='IPv4 address or CIDR range.')
c.argument('namespace_name', options_list=['--namespace-name'], id_part=None, help='Name of the Namespace')
c.extra('vnet_name', arg_group='Virtual Network Rule', options_list=['--vnet-name'], help='Name of the Virtual Network')
with self.argument_context('servicebus namespace network-rule add') as c:
c.argument('ignore_missing_vnet_service_endpoint', arg_group='Virtual Network Rule', options_list=['--ignore-missing-endpoint'], arg_type=get_three_state_flag(), help='A boolean value that indicates whether to ignore missing vnet Service Endpoint')
c.argument('action', arg_group='IP Address Rule', options_list=['--action'], arg_type=get_enum_type(['Allow']), help='Action of the IP rule')
|
|
import types
import sys
import os
from itertools import izip
import django.db.models.manager # Imported to register signal handler.
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS
from django.core import validators
from django.db.models.fields import BaseAutoField, FieldDoesNotExist
from django.db.models.fields.related import OneToOneRel, ManyToOneRel, OneToOneField
from django.db.models.query import delete_objects, Q
from django.db.models.query_utils import CollectedObjects, DeferredAttribute
from django.db.models.options import Options
from django.db import connections, router, transaction, DatabaseError, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
import django.utils.copycompat as copy
from django.utils.functional import curry, update_wrapper
from django.utils.encoding import smart_str, force_unicode, smart_unicode
from django.utils.text import get_text_list, capfirst
from django.conf import settings
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
while base._meta.proxy:
base = base._meta.proxy_for_model
new_class._meta.setup_proxy(base)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
while base._meta.proxy:
# Skip over a proxy class to the "real" base it proxies.
base = base._meta.proxy_for_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
setattr(opts.order_with_respect_to.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls))
setattr(opts.order_with_respect_to.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls))
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
if pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
setattr(self, '_order', order_value)
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
values = [
(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields if not isinstance(f, BaseAutoField)
]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True), connection=connection))
for f in meta.local_fields]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk, using=using)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True, using=using)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Signal that the save is complete
if origin and not meta.auto_created:
signals.post_save.send(sender=origin, instance=self,
created=(not record_exists), raw=raw, using=using)
save_base.alters_data = True
def _collect_sub_objects(self, seen_objs, parent=None, nullable=False):
"""
Recursively populates seen_objs with all objects related to this
object.
When done, seen_objs.items() will be in the format:
[(model_class, {pk_val: obj, pk_val: obj, ...}),
(model_class, {pk_val: obj, pk_val: obj, ...}), ...]
"""
pk_val = self._get_pk_val()
if seen_objs.add(self.__class__, pk_val, self,
type(parent), parent, nullable):
return
for related in self._meta.get_all_related_objects():
rel_opts_name = related.get_accessor_name()
if not related.field.rel.multiple:
try:
sub_obj = getattr(self, rel_opts_name)
except ObjectDoesNotExist:
pass
else:
sub_obj._collect_sub_objects(seen_objs, self, related.field.null)
else:
# To make sure we can access all elements, we can't use the
# normal manager on the related object. So we work directly
# with the descriptor object.
for cls in self.__class__.mro():
if rel_opts_name in cls.__dict__:
rel_descriptor = cls.__dict__[rel_opts_name]
break
else:
# in the case of a hidden fkey just skip it, it'll get
# processed as an m2m
if not related.field.rel.is_hidden():
raise AssertionError("Should never get here.")
else:
continue
delete_qs = rel_descriptor.delete_manager(self).all()
for sub_obj in delete_qs:
sub_obj._collect_sub_objects(seen_objs, self, related.field.null)
for related in self._meta.get_all_related_many_to_many_objects():
if related.field.rel.through:
db = router.db_for_write(related.field.rel.through.__class__, instance=self)
opts = related.field.rel.through._meta
reverse_field_name = related.field.m2m_reverse_field_name()
nullable = opts.get_field(reverse_field_name).null
filters = {reverse_field_name: self}
for sub_obj in related.field.rel.through._base_manager.using(db).filter(**filters):
sub_obj._collect_sub_objects(seen_objs, self, nullable)
for f in self._meta.many_to_many:
if f.rel.through:
db = router.db_for_write(f.rel.through.__class__, instance=self)
opts = f.rel.through._meta
field_name = f.m2m_field_name()
nullable = opts.get_field(field_name).null
filters = {field_name: self}
for sub_obj in f.rel.through._base_manager.using(db).filter(**filters):
sub_obj._collect_sub_objects(seen_objs, self, nullable)
else:
# m2m-ish but with no through table? GenericRelation: cascade delete
for sub_obj in f.value_from_object(self).all():
# Generic relations not enforced by db constraints, thus we can set
# nullable=True, order does not matter
sub_obj._collect_sub_objects(seen_objs, self, True)
# Handle any ancestors (for the model-inheritance case). We do this by
# traversing to the most remote parent classes -- those with no parents
# themselves -- and then adding those instances to the collection. That
# will include all the child instances down to "self".
parent_stack = [p for p in self._meta.parents.values() if p is not None]
while parent_stack:
link = parent_stack.pop()
parent_obj = getattr(self, link.name)
if parent_obj._meta.parents:
parent_stack.extend(parent_obj._meta.parents.values())
continue
# At this point, parent_obj is base class (no ancestor models). So
# delete it and all its descendents.
parent_obj._collect_sub_objects(seen_objs)
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
# Find all the objects than need to be deleted.
seen_objs = CollectedObjects()
self._collect_sub_objects(seen_objs)
# Actually delete the objects.
delete_objects(seen_objs, using)
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be exluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not getattr(self, '_adding', False):
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not getattr(self, '_adding', False) and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not getattr(self, '_adding', False) and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = capfirst(opts.get_field(field_name).verbose_name)
# Insert the error into the error dict, very sneaky
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.special import expit
import pytest
from sklearn import datasets
from sklearn.base import clone
from sklearn.base import BaseEstimator
from sklearn.datasets import (make_classification, fetch_california_housing,
make_regression)
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble._gradient_boosting import predict_stages
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVC
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.svm import NuSVR
GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier,
GradientBoostingRegressor]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf.estimators_)
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0)
leaves = clf.apply(X)
assert leaves.shape == (6, 10, 1)
@pytest.mark.parametrize('presort', ('auto', True, False))
@pytest.mark.parametrize('loss', ('deviance', 'exponential'))
def test_classification_toy(presort, loss):
check_classification_toy(presort, loss)
def test_classifier_parameter_checks():
# Check input parameter validation for GradientBoostingClassifier.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
allowed_presort = ('auto', True, False)
assert_raise_message(ValueError,
"'presort' should be in {}. "
"Got 'invalid' instead.".format(allowed_presort),
GradientBoostingClassifier(presort='invalid')
.fit, X, y)
def test_regressor_parameter_checks():
# Check input parameter validation for GradientBoostingRegressor
assert_raise_message(ValueError, "alpha must be in (0.0, 1.0) but was 1.2",
GradientBoostingRegressor(loss='huber', alpha=1.2)
.fit, X, y)
assert_raise_message(ValueError, "alpha must be in (0.0, 1.0) but was 1.2",
GradientBoostingRegressor(loss='quantile', alpha=1.2)
.fit, X, y)
assert_raise_message(ValueError, "Invalid value for max_features: "
"'invalid'. Allowed string values are 'auto', 'sqrt'"
" or 'log2'.",
GradientBoostingRegressor(max_features='invalid').fit,
X, y)
assert_raise_message(ValueError, "n_iter_no_change should either be None"
" or an integer. 'invalid' was passed",
GradientBoostingRegressor(n_iter_no_change='invalid')
.fit, X, y)
allowed_presort = ('auto', True, False)
assert_raise_message(ValueError,
"'presort' should be in {}. "
"Got 'invalid' instead.".format(allowed_presort),
GradientBoostingRegressor(presort='invalid')
.fit, X, y)
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08
@pytest.mark.parametrize('presort', ('auto', True, False))
@pytest.mark.parametrize('loss', ('deviance', 'exponential'))
def test_classification_synthetic(presort, loss):
check_classification_synthetic(presort, loss)
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert leaves.shape == (506, 100)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
@pytest.mark.parametrize('presort', ('auto', True, False))
@pytest.mark.parametrize('loss', ('ls', 'lad', 'huber'))
@pytest.mark.parametrize('subsample', (1.0, 0.5))
def test_boston(presort, loss, subsample):
check_boston(presort, loss, subsample)
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9
leaves = clf.apply(iris.data)
assert leaves.shape == (150, 100, 3)
@pytest.mark.parametrize('presort', ('auto', True, False))
@pytest.mark.parametrize('subsample', (1.0, 0.5))
@pytest.mark.parametrize('sample_weight', (None, 1))
def test_iris(presort, subsample, sample_weight):
if sample_weight == 1:
sample_weight = np.ones(len(iris.target))
check_iris(presort, subsample, sample_weight)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert hasattr(clf, 'feature_importances_')
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
weight = [0, 0, 0, 1, 1, 1]
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
msg = ("y contains 1 class after sample_weight trimmed classes with "
"zero weights, while a minimum of 2 classes are required.")
assert_raise_message(ValueError, msg, clf.fit, X, y, sample_weight=weight)
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_inputs_predict_stages():
# check that predict_stages through an error if the type of X is not
# supported
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x_sparse_csc = csc_matrix(x)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(x, y)
score = np.zeros((y.shape)).reshape(-1, 1)
assert_raise_message(ValueError,
"When X is a sparse matrix, a CSR format is expected",
predict_stages, clf.estimators_, x_sparse_csc,
clf.learning_rate, score)
x_fortran = np.asfortranarray(x)
assert_raise_message(ValueError,
"X should be C-ordered np.ndarray",
predict_stages, clf.estimators_, x_fortran,
clf.learning_rate, score)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert deviance < 0.5, "GB failed with deviance %.4f" % deviance
@pytest.mark.network
def test_feature_importance_regression():
"""Test that Gini importance is calculated correctly.
This test follows the example from [1]_ (pg. 373).
.. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements
of statistical learning. New York: Springer series in statistics.
"""
california = fetch_california_housing()
X, y = california.data, california.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
reg = GradientBoostingRegressor(loss='huber', learning_rate=0.1,
max_leaf_nodes=6, n_estimators=100,
random_state=0)
reg.fit(X_train, y_train)
sorted_idx = np.argsort(reg.feature_importances_)[::-1]
sorted_features = [california.feature_names[s] for s in sorted_idx]
# The most important feature is the median income by far.
assert sorted_features[0] == 'MedInc'
# The three subsequent features are the following. Their relative ordering
# might change a bit depending on the randomness of the trees and the
# train / test split.
assert set(sorted_features[1:4]) == {'Longitude', 'AveOccup', 'Latitude'}
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.sqrt(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == n_features
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(n_features * 0.3)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.sqrt(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.log2(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == 1
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert y.shape == y_pred.shape
assert_array_almost_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert y_test.shape == y_pred.shape
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert y_test.shape[0] == staged_proba.shape[0]
assert 2 == staged_proba.shape[1]
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
@pytest.mark.parametrize('Estimator', GRADIENT_BOOSTING_ESTIMATORS)
def test_staged_functions_defensive(Estimator):
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
estimator = Estimator()
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert np.all(staged_result[0] != 0)
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert 100 == len(clf.estimators_)
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert 100 == len(clf.estimators_)
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert 10 + 9 == n_lines
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert 100 == n_lines
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start(Cls):
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_array_almost_equal(est_ws.predict_proba(X),
est.predict_proba(X))
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_n_estimators(Cls):
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_max_depth(Cls):
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_clear(Cls):
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_zero_n_estimators(Cls):
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_smaller_n_estimators(Cls):
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_equal_n_estimators(Cls):
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob_switch(Cls):
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob(Cls):
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_sparse(Cls):
# Test that all sparse matrix types are supported
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
sparse_matrix_type = [csr_matrix, csc_matrix, coo_matrix]
est_dense = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_dense.fit(X, y)
est_dense.predict(X)
est_dense.set_params(n_estimators=200)
est_dense.fit(X, y)
y_pred_dense = est_dense.predict(X)
for sparse_constructor in sparse_matrix_type:
X_sparse = sparse_constructor(X)
est_sparse = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_sparse.fit(X_sparse, y)
est_sparse.predict(X)
est_sparse.set_params(n_estimators=200)
est_sparse.fit(X_sparse, y)
y_pred_sparse = est_sparse.predict(X)
assert_array_almost_equal(est_dense.oob_improvement_[:100],
est_sparse.oob_improvement_[:100])
assert_array_almost_equal(y_pred_dense, y_pred_sparse)
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_fortran(Cls):
# Test that feeding a X in Fortran-ordered is giving the same results as
# in C-ordered
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est_c = Cls(n_estimators=1, random_state=1, warm_start=True)
est_fortran = Cls(n_estimators=1, random_state=1, warm_start=True)
est_c.fit(X, y)
est_c.set_params(n_estimators=11)
est_c.fit(X, y)
X_fortran = np.asfortranarray(X)
est_fortran.fit(X_fortran, y)
est_fortran.set_params(n_estimators=11)
est_fortran.fit(X_fortran, y)
assert_array_almost_equal(est_c.predict(X), est_fortran.predict(X))
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
@pytest.mark.parametrize('Cls', GRADIENT_BOOSTING_ESTIMATORS)
def test_monitor_early_stopping(Cls):
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert est.n_estimators == 20 # this is not altered
assert est.estimators_.shape[0] == 10
assert est.train_score_.shape[0] == 10
assert est.oob_improvement_.shape[0] == 10
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert est.n_estimators == 30
assert est.estimators_.shape[0] == 30
assert est.train_score_.shape[0] == 30
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert est.n_estimators == 20
assert est.estimators_.shape[0] == 10
assert est.train_score_.shape[0] == 10
assert est.oob_improvement_.shape[0] == 10
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert est.n_estimators == 30
assert est.train_score_.shape[0] == 30
assert est.estimators_.shape[0] == 30
assert est.oob_improvement_.shape[0] == 30
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == k
assert (tree.children_left[tree.children_left == TREE_LEAF].shape[0] ==
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert (tree.children_left[tree.children_left == TREE_LEAF].shape[0] ==
k + 1)
def test_zero_estimator_reg():
# Test if init='zero' works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if init='zero' works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
@pytest.mark.parametrize('GBEstimator', GRADIENT_BOOSTING_ESTIMATORS)
def test_max_leaf_nodes_max_depth(GBEstimator):
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == 1
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == 1
@pytest.mark.parametrize('GBEstimator', GRADIENT_BOOSTING_ESTIMATORS)
def test_min_impurity_split(GBEstimator):
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = GBEstimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_.flat:
assert tree.min_impurity_split == 0.1
@pytest.mark.parametrize('GBEstimator', GRADIENT_BOOSTING_ESTIMATORS)
def test_min_impurity_decrease(GBEstimator):
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = GBEstimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_.flat:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert tree.min_impurity_decrease == 0.1
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1], expit(2 * score))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if issubclass(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
for res_sparse, res in zip(sparse.staged_decision_function(X_sparse),
sparse.staged_decision_function(X)):
assert_array_almost_equal(res_sparse, res)
@skip_if_32bit
@pytest.mark.parametrize(
'EstimatorClass',
(GradientBoostingClassifier, GradientBoostingRegressor))
@pytest.mark.parametrize('sparse_matrix', (csr_matrix, csc_matrix, coo_matrix))
def test_sparse_input(EstimatorClass, sparse_matrix):
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
check_sparse_input(EstimatorClass, X, sparse_matrix(X), y)
def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
gbr = GradientBoostingRegressor(n_estimators=1000, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in ((gbc, 1e-1, 28), (gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28)):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,
max_depth=3, random_state=42)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(n_estimators=200, learning_rate=0.1,
max_depth=3, random_state=42)
gbr.fit(X, y)
assert gbc.n_estimators_ == 100
assert gbr.n_estimators_ == 200
def test_gradient_boosting_validation_fraction():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(n_estimators=100,
n_iter_no_change=10,
validation_fraction=0.1,
learning_rate=0.1, max_depth=3,
random_state=42)
gbc2 = clone(gbc).set_params(validation_fraction=0.3)
gbc3 = clone(gbc).set_params(n_iter_no_change=20)
gbr = GradientBoostingRegressor(n_estimators=100, n_iter_no_change=10,
learning_rate=0.1, max_depth=3,
validation_fraction=0.1,
random_state=42)
gbr2 = clone(gbr).set_params(validation_fraction=0.3)
gbr3 = clone(gbr).set_params(n_iter_no_change=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if validation_fraction has an effect
gbc.fit(X_train, y_train)
gbc2.fit(X_train, y_train)
assert gbc.n_estimators_ != gbc2.n_estimators_
gbr.fit(X_train, y_train)
gbr2.fit(X_train, y_train)
assert gbr.n_estimators_ != gbr2.n_estimators_
# Check if n_estimators_ increase monotonically with n_iter_no_change
# Set validation
gbc3.fit(X_train, y_train)
gbr3.fit(X_train, y_train)
assert gbr.n_estimators_ < gbr3.n_estimators_
assert gbc.n_estimators_ < gbc3.n_estimators_
def test_early_stopping_stratified():
# Make sure data splitting for early stopping is stratified
X = [[1, 2], [2, 3], [3, 4], [4, 5]]
y = [0, 0, 0, 1]
gbc = GradientBoostingClassifier(n_iter_no_change=5)
with pytest.raises(
ValueError,
match='The least populated class in y has only 1 member'):
gbc.fit(X, y)
class _NoSampleWeightWrapper(BaseEstimator):
def __init__(self, est):
self.est = est
def fit(self, X, y):
self.est.fit(X, y)
def predict(self, X):
return self.est.predict(X)
def predict_proba(self, X):
return self.est.predict_proba(X)
def _make_multiclass():
return make_classification(n_classes=3, n_clusters_per_class=1)
@pytest.mark.parametrize(
"gb, dataset_maker, init_estimator",
[(GradientBoostingClassifier, make_classification, DummyClassifier),
(GradientBoostingClassifier, _make_multiclass, DummyClassifier),
(GradientBoostingRegressor, make_regression, DummyRegressor)],
ids=["binary classification", "multiclass classification", "regression"])
def test_gradient_boosting_with_init(gb, dataset_maker, init_estimator):
# Check that GradientBoostingRegressor works when init is a sklearn
# estimator.
# Check that an error is raised if trying to fit with sample weight but
# inital estimator does not support sample weight
X, y = dataset_maker()
sample_weight = np.random.RandomState(42).rand(100)
# init supports sample weights
init_est = init_estimator()
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
# init does not support sample weights
init_est = _NoSampleWeightWrapper(init_estimator())
gb(init=init_est).fit(X, y) # ok no sample weights
with pytest.raises(ValueError,
match="estimator.*does not support sample weights"):
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
def test_gradient_boosting_with_init_pipeline():
# Check that the init estimator can be a pipeline (see issue #13466)
X, y = make_regression(random_state=0)
init = make_pipeline(LinearRegression())
gb = GradientBoostingRegressor(init=init)
gb.fit(X, y) # pipeline without sample_weight works fine
with pytest.raises(
ValueError,
match='The initial estimator Pipeline does not support sample '
'weights'):
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
# Passing sample_weight to a pipeline raises a ValueError. This test makes
# sure we make the distinction between ValueError raised by a pipeline that
# was passed sample_weight, and a ValueError raised by a regular estimator
# whose input checking failed.
with pytest.raises(
ValueError,
match='nu <= 0 or nu > 1'):
# Note that NuSVR properly supports sample_weight
init = NuSVR(gamma='auto', nu=1.5)
gb = GradientBoostingRegressor(init=init)
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
@pytest.mark.parametrize('estimator, missing_method', [
(GradientBoostingClassifier(init=LinearSVC()), 'predict_proba'),
(GradientBoostingRegressor(init=OneHotEncoder()), 'predict')
])
def test_gradient_boosting_init_wrong_methods(estimator, missing_method):
# Make sure error is raised if init estimators don't have the required
# methods (fit, predict, predict_proba)
message = ("The init parameter must be a valid estimator and support "
"both fit and " + missing_method)
with pytest.raises(ValueError, match=message):
estimator.fit(X, y)
def test_early_stopping_n_classes():
# when doing early stopping (_, , y_train, _ = train_test_split(X, y))
# there might be classes in y that are missing in y_train. As the init
# estimator will be trained on y_train, we need to raise an error if this
# happens.
X = [[1]] * 10
y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples
gb = GradientBoostingClassifier(n_iter_no_change=5, random_state=0,
validation_fraction=8)
with pytest.raises(
ValueError,
match='The training data after the early stopping split'):
gb.fit(X, y)
# No error if we let training data be big enough
gb = GradientBoostingClassifier(n_iter_no_change=5, random_state=0,
validation_fraction=4)
def test_gbr_degenerate_feature_importances():
# growing an ensemble of single node trees. See #13620
X = np.zeros((10, 10))
y = np.ones((10,))
gbr = GradientBoostingRegressor().fit(X, y)
assert_array_equal(gbr.feature_importances_,
np.zeros(10, dtype=np.float64))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import sys
import unittest
from shutil import copyfile, copytree
from tempfile import TemporaryDirectory
import jmespath
import pytest
from parameterized import parameterized
from tests.helm_template_generator import render_chart
class PodTemplateFileTest(unittest.TestCase):
@classmethod
@pytest.fixture(autouse=True, scope="class")
def isolate_chart(cls):
with TemporaryDirectory() as tmp_dir:
cls.temp_chart_dir = tmp_dir + "/chart"
copytree(sys.path[0], cls.temp_chart_dir)
copyfile(
cls.temp_chart_dir + "/files/pod-template-file.kubernetes-helm-yaml",
cls.temp_chart_dir + "/templates/pod-template-file.yaml",
)
yield
def test_should_work(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert jmespath.search("spec.containers[0].image", docs[0]) is not None
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_should_add_an_init_container_if_git_sync_is_true(self):
docs = render_chart(
values={
"images": {
"gitSync": {
"repository": "test-registry/test-repo",
"tag": "test-tag",
"pullPolicy": "Always",
}
},
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"wait": 66,
"maxFailures": 70,
"subPath": "path1/path2",
"rev": "HEAD",
"depth": 1,
"repo": "https://github.com/apache/airflow.git",
"branch": "test-branch",
"sshKeySecret": None,
"credentialsSecret": None,
"knownHosts": None,
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {
"name": "git-sync-test-init",
"securityContext": {"runAsUser": 65533},
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
"env": [
{"name": "GIT_SYNC_REV", "value": "HEAD"},
{"name": "GIT_SYNC_BRANCH", "value": "test-branch"},
{"name": "GIT_SYNC_REPO", "value": "https://github.com/apache/airflow.git"},
{"name": "GIT_SYNC_DEPTH", "value": "1"},
{"name": "GIT_SYNC_ROOT", "value": "/git"},
{"name": "GIT_SYNC_DEST", "value": "repo"},
{"name": "GIT_SYNC_ADD_USER", "value": "true"},
{"name": "GIT_SYNC_WAIT", "value": "66"},
{"name": "GIT_SYNC_MAX_SYNC_FAILURES", "value": "70"},
{"name": "GIT_SYNC_ONE_TIME", "value": "true"},
],
"volumeMounts": [{"mountPath": "/git", "name": "dags"}],
"resources": {},
} == jmespath.search("spec.initContainers[0]", docs[0])
def test_should_not_add_init_container_if_dag_persistence_is_true(self):
docs = render_chart(
values={
"dags": {
"persistence": {"enabled": True},
"gitSync": {"enabled": True},
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert jmespath.search("spec.initContainers", docs[0]) is None
@parameterized.expand(
[
({"gitSync": {"enabled": True}}, True),
({"persistence": {"enabled": True}}, False),
(
{
"gitSync": {"enabled": True},
"persistence": {"enabled": True},
},
True,
),
]
)
def test_dags_mount(self, dag_values, expected_read_only):
docs = render_chart(
values={"dags": dag_values},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"mountPath": "/opt/airflow/dags",
"name": "dags",
"readOnly": expected_read_only,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_validate_if_ssh_params_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": None,
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_SSH_KEY_FILE", "value": "/etc/git-secret/ssh"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_SYNC_SSH", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "false"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "git-sync-ssh-key",
"mountPath": "/etc/git-secret/ssh",
"subPath": "gitSshKey",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
assert {
"name": "git-sync-ssh-key",
"secret": {"secretName": "ssh-secret", "defaultMode": 288},
} in jmespath.search("spec.volumes", docs[0])
def test_validate_if_ssh_known_hosts_are_added(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"containerName": "git-sync-test",
"sshKeySecret": "ssh-secret",
"knownHosts": "github.com ssh-rsa AAAABdummy",
"branch": "test-branch",
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "GIT_KNOWN_HOSTS", "value": "true"} in jmespath.search(
"spec.initContainers[0].env", docs[0]
)
assert {
"name": "GIT_SSH_KNOWN_HOSTS_FILE",
"value": "/etc/git-secret/known_hosts",
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "config",
"mountPath": "/etc/git-secret/known_hosts",
"subPath": "known_hosts",
"readOnly": True,
} in jmespath.search("spec.initContainers[0].volumeMounts", docs[0])
def test_should_set_username_and_pass_env_variables(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"credentialsSecret": "user-pass-secret",
"sshKeySecret": None,
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "GIT_SYNC_USERNAME",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_USERNAME"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
assert {
"name": "GIT_SYNC_PASSWORD",
"valueFrom": {"secretKeyRef": {"name": "user-pass-secret", "key": "GIT_SYNC_PASSWORD"}},
} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_should_set_the_dags_volume_claim_correctly_when_using_an_existing_claim(self):
docs = render_chart(
values={"dags": {"persistence": {"enabled": True, "existingClaim": "test-claim"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "persistentVolumeClaim": {"claimName": "test-claim"}} in jmespath.search(
"spec.volumes", docs[0]
)
def test_should_use_empty_dir_for_gitsync_without_persistence(self):
docs = render_chart(
values={"dags": {"gitSync": {"enabled": True}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "dags", "emptyDir": {}} in jmespath.search("spec.volumes", docs[0])
@parameterized.expand(
[
({"enabled": False}, {"emptyDir": {}}),
({"enabled": True}, {"persistentVolumeClaim": {"claimName": "RELEASE-NAME-logs"}}),
(
{"enabled": True, "existingClaim": "test-claim"},
{"persistentVolumeClaim": {"claimName": "test-claim"}},
),
]
)
def test_logs_persistence_changes_volume(self, log_persistence_values, expected):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "logs", **expected} in jmespath.search("spec.volumes", docs[0])
def test_should_set_a_custom_image_in_pod_template(self):
docs = render_chart(
values={"images": {"pod_template": {"repository": "dummy_image", "tag": "latest"}}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "dummy_image:latest" == jmespath.search("spec.containers[0].image", docs[0])
assert "base" == jmespath.search("spec.containers[0].name", docs[0])
def test_mount_airflow_cfg(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert {'configMap': {'name': 'RELEASE-NAME-airflow-config'}, 'name': 'config'} == jmespath.search(
"spec.volumes[1]", docs[0]
)
assert {
'name': 'config',
'mountPath': '/opt/airflow/airflow.cfg',
'subPath': 'airflow.cfg',
'readOnly': True,
} == jmespath.search("spec.containers[0].volumeMounts[1]", docs[0])
def test_should_create_valid_affinity_and_node_selector(self):
docs = render_chart(
values={
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert re.search("Pod", docs[0]["kind"])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
def test_should_add_fsgroup_to_the_pod_template(self):
docs = render_chart(
values={"gid": 5000},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
self.assertEqual(5000, jmespath.search("spec.securityContext.fsGroup", docs[0]))
def test_should_create_valid_volume_mount_and_volume(self):
docs = render_chart(
values={
"workers": {
"extraVolumes": [{"name": "test-volume", "emptyDir": {}}],
"extraVolumeMounts": [{"name": "test-volume", "mountPath": "/opt/test"}],
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "test-volume" == jmespath.search(
"spec.volumes[2].name",
docs[0],
)
assert "test-volume" == jmespath.search(
"spec.containers[0].volumeMounts[2].name",
docs[0],
)
def test_should_add_env_for_gitsync(self):
docs = render_chart(
values={
"dags": {
"gitSync": {
"enabled": True,
"env": [{"name": "FOO", "value": "bar"}],
}
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {"name": "FOO", "value": "bar"} in jmespath.search("spec.initContainers[0].env", docs[0])
def test_no_airflow_local_settings_by_default(self):
docs = render_chart(show_only=["templates/pod-template-file.yaml"], chart_dir=self.temp_chart_dir)
volume_mounts = jmespath.search("spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.containers[0].volumeMounts", docs[0])
def test_airflow_pod_annotations(self):
docs = render_chart(
values={"airflowPodAnnotations": {"my_annotation": "annotated!"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
annotations = jmespath.search("metadata.annotations", docs[0])
assert "my_annotation" in annotations
assert "annotated!" in annotations["my_annotation"]
def test_should_add_extra_init_containers(self):
docs = render_chart(
values={
"workers": {
"extraInitContainers": [
{"name": "test-init-container", "image": "test-registry/test-repo:test-tag"}
],
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"name": "test-init-container",
"image": "test-registry/test-repo:test-tag",
} == jmespath.search("spec.initContainers[-1]", docs[0])
def test_should_add_pod_labels(self):
docs = render_chart(
values={"labels": {"label1": "value1", "label2": "value2"}},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"label1": "value1",
"label2": "value2",
"release": "RELEASE-NAME",
"component": "worker",
"tier": "airflow",
} == jmespath.search("metadata.labels", docs[0])
def test_should_add_resources(self):
docs = render_chart(
values={
"workers": {
"resources": {
"requests": {"memory": "2Gi", "cpu": "1"},
"limits": {"memory": "3Gi", "cpu": "2"},
}
}
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {
"limits": {
"cpu": "2",
"memory": "3Gi",
},
"requests": {
"cpu": "1",
"memory": "2Gi",
},
} == jmespath.search("spec.containers[0].resources", docs[0])
def test_empty_resources(self):
docs = render_chart(
values={},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert {} == jmespath.search("spec.containers[0].resources", docs[0])
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"executor": "KubernetesExecutor",
"workers": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
},
},
show_only=["templates/pod-template-file.yaml"],
chart_dir=self.temp_chart_dir,
)
assert "Pod" == jmespath.search("kind", docs[0])
assert "foo" == jmespath.search(
"spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.tolerations[0].key",
docs[0],
)
|
|
#!/usr/bin/env python
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
# Suppress warning that occurs on Linux
import sys
if sys.platform.startswith("linux"):
from Crypto.pct_warnings import PowmInsecureWarning
import warnings
warnings.simplefilter("ignore", PowmInsecureWarning)
from getopt import getopt, GetoptError
import operator
import os
import uuid
from calendarserver.tools.cmdline import utilityMain, WorkerService
from calendarserver.tools.util import (
recordForPrincipalID, prettyRecord, action_addProxy, action_removeProxy
)
from twext.who.directory import DirectoryRecord
from twext.who.idirectory import RecordType, InvalidDirectoryRecordError
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twistedcaldav.config import config
from twistedcaldav.cache import MemcacheChangeNotifier
from txdav.who.delegates import CachingDelegates
from txdav.who.idirectory import AutoScheduleMode
from txdav.who.groups import GroupCacherPollingWork
allowedAutoScheduleModes = {
"default": None,
"none": AutoScheduleMode.none,
"accept-always": AutoScheduleMode.accept,
"decline-always": AutoScheduleMode.decline,
"accept-if-free": AutoScheduleMode.acceptIfFree,
"decline-if-busy": AutoScheduleMode.declineIfBusy,
"automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy,
}
def usage(e=None):
if e:
print(e)
print("")
name = os.path.basename(sys.argv[0])
print("usage: %s [options] action_flags principal [principal ...]" % (name,))
print(" %s [options] --list-principal-types" % (name,))
print(" %s [options] --list-principals type" % (name,))
print("")
print(" Performs the given actions against the giving principals.")
print("")
print(" Principals are identified by one of the following:")
print(" Type and shortname (eg.: users:wsanchez)")
# print(" A principal path (eg.: /principals/users/wsanchez/)")
print(" A GUID (eg.: E415DBA7-40B5-49F5-A7CC-ACC81E4DEC79)")
print("")
print("options:")
print(" -h --help: print this help and exit")
print(" -f --config <path>: Specify caldavd.plist configuration path")
print(" -v --verbose: print debugging information")
print("")
print("actions:")
print(" --context <search-context>: {user|group|location|resource|attendee}; must be used in conjunction with --search")
print(" --search <search-tokens>: search using one or more tokens")
print(" --list-principal-types: list all of the known principal types")
print(" --list-principals type: list all principals of the given type")
print(" --list-read-proxies: list proxies with read-only access")
print(" --list-write-proxies: list proxies with read-write access")
print(" --list-proxies: list all proxies")
print(" --list-proxy-for: principals this principal is a proxy for")
print(" --add-read-proxy=principal: add a read-only proxy")
print(" --add-write-proxy=principal: add a read-write proxy")
print(" --remove-proxy=principal: remove a proxy")
print(" --set-auto-schedule-mode={default|none|accept-always|decline-always|accept-if-free|decline-if-busy|automatic}: set auto-schedule mode")
print(" --get-auto-schedule-mode: read auto-schedule mode")
print(" --set-auto-accept-group=principal: set auto-accept-group")
print(" --get-auto-accept-group: read auto-accept-group")
print(" --add {locations|resources|addresses} full-name record-name UID: add a principal")
print(" --remove: remove a principal")
print(" --set-name=name: set the name of a principal")
print(" --get-name: get the name of a principal")
print(" --set-geo=url: set the geo: url for an address (e.g. geo:37.331741,-122.030333)")
print(" --get-geo: get the geo: url for an address")
print(" --set-street-address=streetaddress: set the street address string for an address")
print(" --get-street-address: get the street address string for an address")
print(" --set-address=guid: associate principal with an address (by guid)")
print(" --get-address: get the associated address's guid")
print(" --refresh-groups: schedule a group membership refresh")
print(" --print-group-info <group principals>: prints group delegation and membership")
if e:
sys.exit(64)
else:
sys.exit(0)
class PrincipalService(WorkerService):
"""
Executes principals-related functions in a context which has access to the store
"""
function = None
params = []
@inlineCallbacks
def doWork(self):
"""
Calls the function that's been assigned to "function" and passes the root
resource, directory, store, and whatever has been assigned to "params".
"""
if (
config.EnableResponseCache and
config.Memcached.Pools.Default.ClientEnabled
):
# These class attributes need to be setup with our memcache\
# notifier
CachingDelegates.cacheNotifier = MemcacheChangeNotifier(None, cacheHandle="PrincipalToken")
if self.function is not None:
yield self.function(self.store, *self.params)
def main():
try:
(optargs, args) = getopt(
sys.argv[1:], "a:hf:P:v", [
"help",
"config=",
"add=",
"remove",
"context=",
"search",
"list-principal-types",
"list-principals=",
# Proxies
"list-read-proxies",
"list-write-proxies",
"list-proxies",
"list-proxy-for",
"add-read-proxy=",
"add-write-proxy=",
"remove-proxy=",
# Groups
"list-group-members",
"add-group-member=",
"remove-group-member=",
"print-group-info",
"refresh-groups",
# Scheduling
"set-auto-schedule-mode=",
"get-auto-schedule-mode",
"set-auto-accept-group=",
"get-auto-accept-group",
# Principal details
"set-name=",
"get-name",
"set-geo=",
"get-geo",
"set-address=",
"get-address",
"set-street-address=",
"get-street-address",
"verbose",
],
)
except GetoptError, e:
usage(e)
#
# Get configuration
#
configFileName = None
addType = None
listPrincipalTypes = False
listPrincipals = None
searchContext = None
searchTokens = None
printGroupInfo = False
scheduleGroupRefresh = False
principalActions = []
verbose = False
for opt, arg in optargs:
# Args come in as encoded bytes
arg = arg.decode("utf-8")
if opt in ("-h", "--help"):
usage()
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-f", "--config"):
configFileName = arg
elif opt in ("-a", "--add"):
addType = arg
elif opt in ("-r", "--remove"):
principalActions.append((action_removePrincipal,))
elif opt in ("", "--list-principal-types"):
listPrincipalTypes = True
elif opt in ("", "--list-principals"):
listPrincipals = arg
elif opt in ("", "--context"):
searchContext = arg
elif opt in ("", "--search"):
searchTokens = args
elif opt in ("", "--list-read-proxies"):
principalActions.append((action_listProxies, "read"))
elif opt in ("", "--list-write-proxies"):
principalActions.append((action_listProxies, "write"))
elif opt in ("-L", "--list-proxies"):
principalActions.append((action_listProxies, "read", "write"))
elif opt in ("--list-proxy-for"):
principalActions.append((action_listProxyFor, "read", "write"))
elif opt in ("--add-read-proxy", "--add-write-proxy"):
if "read" in opt:
proxyType = "read"
elif "write" in opt:
proxyType = "write"
else:
raise AssertionError("Unknown proxy type")
principalActions.append((action_addProxy, proxyType, arg))
elif opt in ("", "--remove-proxy"):
principalActions.append((action_removeProxy, arg))
elif opt in ("", "--list-group-members"):
principalActions.append((action_listGroupMembers,))
elif opt in ("--add-group-member"):
principalActions.append((action_addGroupMember, arg))
elif opt in ("", "--remove-group-member"):
principalActions.append((action_removeGroupMember, arg))
elif opt in ("", "--print-group-info"):
printGroupInfo = True
elif opt in ("", "--refresh-groups"):
scheduleGroupRefresh = True
elif opt in ("", "--set-auto-schedule-mode"):
try:
if arg not in allowedAutoScheduleModes:
raise ValueError("Unknown auto-schedule mode: {mode}".format(
mode=arg))
autoScheduleMode = allowedAutoScheduleModes[arg]
except ValueError, e:
abort(e)
principalActions.append((action_setAutoScheduleMode, autoScheduleMode))
elif opt in ("", "--get-auto-schedule-mode"):
principalActions.append((action_getAutoScheduleMode,))
elif opt in ("", "--set-auto-accept-group"):
principalActions.append((action_setAutoAcceptGroup, arg))
elif opt in ("", "--get-auto-accept-group"):
principalActions.append((action_getAutoAcceptGroup,))
elif opt in ("", "--set-name"):
principalActions.append((action_setValue, u"fullNames", [arg]))
elif opt in ("", "--get-name"):
principalActions.append((action_getValue, u"fullNames"))
elif opt in ("", "--set-geo"):
principalActions.append((action_setValue, u"geographicLocation", arg))
elif opt in ("", "--get-geo"):
principalActions.append((action_getValue, u"geographicLocation"))
elif opt in ("", "--set-street-address"):
principalActions.append((action_setValue, u"streetAddress", arg))
elif opt in ("", "--get-street-address"):
principalActions.append((action_getValue, u"streetAddress"))
elif opt in ("", "--set-address"):
principalActions.append((action_setValue, u"associatedAddress", arg))
elif opt in ("", "--get-address"):
principalActions.append((action_getValue, u"associatedAddress"))
else:
raise NotImplementedError(opt)
#
# List principals
#
if listPrincipalTypes:
if args:
usage("Too many arguments")
function = runListPrincipalTypes
params = ()
elif printGroupInfo:
function = printGroupCacherInfo
params = (args,)
elif scheduleGroupRefresh:
function = scheduleGroupRefreshJob
params = ()
elif addType:
try:
addType = matchStrings(
addType,
[
"locations", "resources", "addresses", "users", "groups"
]
)
except ValueError, e:
print(e)
return
try:
fullName, shortName, uid = parseCreationArgs(args)
except ValueError, e:
print(e)
return
if fullName is not None:
fullNames = [fullName]
else:
fullNames = ()
if shortName is not None:
shortNames = [shortName]
else:
shortNames = ()
function = runAddPrincipal
params = (addType, uid, shortNames, fullNames)
elif listPrincipals:
try:
listPrincipals = matchStrings(
listPrincipals,
["users", "groups", "locations", "resources", "addresses"]
)
except ValueError, e:
print(e)
return
if args:
usage("Too many arguments")
function = runListPrincipals
params = (listPrincipals,)
elif searchTokens:
function = runSearch
searchTokens = [t.decode("utf-8") for t in searchTokens]
params = (searchTokens, searchContext)
else:
if not args:
usage("No principals specified.")
unicodeArgs = [a.decode("utf-8") for a in args]
function = runPrincipalActions
params = (unicodeArgs, principalActions)
PrincipalService.function = function
PrincipalService.params = params
utilityMain(configFileName, PrincipalService, verbose=verbose)
def runListPrincipalTypes(service, store):
directory = store.directoryService()
for recordType in directory.recordTypes():
print(directory.recordTypeToOldName(recordType))
return succeed(None)
@inlineCallbacks
def runListPrincipals(service, store, listPrincipals):
directory = store.directoryService()
recordType = directory.oldNameToRecordType(listPrincipals)
try:
records = list((yield directory.recordsWithRecordType(recordType)))
if records:
printRecordList(records)
else:
print("No records of type %s" % (listPrincipals,))
except InvalidDirectoryRecordError, e:
usage(e)
returnValue(None)
@inlineCallbacks
def runPrincipalActions(service, store, principalIDs, actions):
directory = store.directoryService()
for principalID in principalIDs:
# Resolve the given principal IDs to records
try:
record = yield recordForPrincipalID(directory, principalID)
except ValueError:
record = None
if record is None:
sys.stderr.write("Invalid principal ID: %s\n" % (principalID,))
continue
# Performs requested actions
for action in actions:
(yield action[0](store, record, *action[1:]))
print("")
@inlineCallbacks
def runSearch(service, store, tokens, context=None):
directory = store.directoryService()
records = list(
(
yield directory.recordsMatchingTokens(
tokens, context=context
)
)
)
if records:
records.sort(key=operator.attrgetter('fullNames'))
print("{n} matches found:".format(n=len(records)))
for record in records:
print(
"\n{d} ({rt})".format(
d=record.displayName,
rt=record.recordType.name
)
)
print(" UID: {u}".format(u=record.uid,))
try:
print(
" Record name{plural}: {names}".format(
plural=("s" if len(record.shortNames) > 1 else ""),
names=(", ".join(record.shortNames))
)
)
except AttributeError:
pass
try:
if record.emailAddresses:
print(
" Email{plural}: {emails}".format(
plural=("s" if len(record.emailAddresses) > 1 else ""),
emails=(", ".join(record.emailAddresses))
)
)
except AttributeError:
pass
else:
print("No matches found")
print("")
@inlineCallbacks
def runAddPrincipal(service, store, addType, uid, shortNames, fullNames):
directory = store.directoryService()
recordType = directory.oldNameToRecordType(addType)
# See if that UID is in use
record = yield directory.recordWithUID(uid)
if record is not None:
print("UID already in use: {uid}".format(uid=uid))
returnValue(None)
# See if the shortnames are in use
for shortName in shortNames:
record = yield directory.recordWithShortName(recordType, shortName)
if record is not None:
print("Record name already in use: {name}".format(name=shortName))
returnValue(None)
fields = {
directory.fieldName.recordType: recordType,
directory.fieldName.uid: uid,
directory.fieldName.shortNames: shortNames,
directory.fieldName.fullNames: fullNames,
directory.fieldName.hasCalendars: True,
directory.fieldName.hasContacts: True,
}
record = DirectoryRecord(directory, fields)
yield record.service.updateRecords([record], create=True)
print("Added '{name}'".format(name=fullNames[0]))
@inlineCallbacks
def action_removePrincipal(store, record):
directory = store.directoryService()
fullName = record.displayName
shortNames = ",".join(record.shortNames)
yield directory.removeRecords([record.uid])
print(
"Removed '{full}' {shorts} {uid}".format(
full=fullName, shorts=shortNames, uid=record.uid
)
)
@inlineCallbacks
def action_listProxies(store, record, *proxyTypes):
directory = store.directoryService()
for proxyType in proxyTypes:
groupRecordType = {
"read": directory.recordType.readDelegateGroup,
"write": directory.recordType.writeDelegateGroup,
}.get(proxyType)
pseudoGroup = yield directory.recordWithShortName(
groupRecordType,
record.uid
)
proxies = yield pseudoGroup.members()
if proxies:
print("%s proxies for %s:" % (
{"read": "Read-only", "write": "Read/write"}[proxyType],
prettyRecord(record)
))
printRecordList(proxies)
print("")
else:
print("No %s proxies for %s" % (proxyType, prettyRecord(record)))
@inlineCallbacks
def action_listProxyFor(store, record, *proxyTypes):
directory = store.directoryService()
if record.recordType != directory.recordType.user:
print("You must pass a user principal to this command")
returnValue(None)
for proxyType in proxyTypes:
groupRecordType = {
"read": directory.recordType.readDelegatorGroup,
"write": directory.recordType.writeDelegatorGroup,
}.get(proxyType)
pseudoGroup = yield directory.recordWithShortName(
groupRecordType,
record.uid
)
proxies = yield pseudoGroup.members()
if proxies:
print("%s is a %s proxy for:" % (
prettyRecord(record),
{"read": "Read-only", "write": "Read/write"}[proxyType]
))
printRecordList(proxies)
print("")
else:
print(
"{r} is not a {t} proxy for anyone".format(
r=prettyRecord(record),
t={"read": "Read-only", "write": "Read/write"}[proxyType]
)
)
@inlineCallbacks
def action_listGroupMembers(store, record):
members = yield record.members()
if members:
print("Group members for %s:\n" % (
prettyRecord(record)
))
printRecordList(members)
print("")
else:
print("No group members for %s" % (prettyRecord(record),))
@inlineCallbacks
def action_addGroupMember(store, record, *memberIDs):
directory = store.directoryService()
existingMembers = yield record.members()
existingMemberUIDs = set([member.uid for member in existingMembers])
add = set()
for memberID in memberIDs:
memberRecord = yield recordForPrincipalID(directory, memberID)
if memberRecord is None:
print("Invalid member ID: %s" % (memberID,))
elif memberRecord.uid in existingMemberUIDs:
print("Existing member ID: %s" % (memberID,))
else:
add.add(memberRecord)
if add:
yield record.addMembers(add)
for memberRecord in add:
print(
"Added {member} for {record}".format(
member=prettyRecord(memberRecord),
record=prettyRecord(record)
)
)
yield record.service.updateRecords([record], create=False)
@inlineCallbacks
def action_removeGroupMember(store, record, *memberIDs):
directory = store.directoryService()
existingMembers = yield record.members()
existingMemberUIDs = set([member.uid for member in existingMembers])
remove = set()
for memberID in memberIDs:
memberRecord = yield recordForPrincipalID(directory, memberID)
if memberRecord is None:
print("Invalid member ID: %s" % (memberID,))
elif memberRecord.uid not in existingMemberUIDs:
print("Missing member ID: %s" % (memberID,))
else:
remove.add(memberRecord)
if remove:
yield record.removeMembers(remove)
for memberRecord in remove:
print(
"Removed {member} for {record}".format(
member=prettyRecord(memberRecord),
record=prettyRecord(record)
)
)
yield record.service.updateRecords([record], create=False)
@inlineCallbacks
def printGroupCacherInfo(service, store, principalIDs):
"""
Print all groups that have been delegated to, their cached members, and
who delegated to those groups.
"""
directory = store.directoryService()
txn = store.newTransaction()
if not principalIDs:
groupUIDs = yield txn.allGroupDelegates()
else:
groupUIDs = []
for principalID in principalIDs:
record = yield recordForPrincipalID(directory, principalID)
if record:
groupUIDs.append(record.uid)
for groupUID in groupUIDs:
group = yield txn.groupByUID(groupUID)
print("Group: \"{name}\" ({uid})".format(name=group.name, uid=group.groupUID))
for txt, readWrite in (("read-only", False), ("read-write", True)):
delegatorUIDs = yield txn.delegatorsToGroup(group.groupID, readWrite)
for delegatorUID in delegatorUIDs:
delegator = yield directory.recordWithUID(delegatorUID)
print(
"...has {rw} access to {rec}".format(
rw=txt, rec=prettyRecord(delegator)
)
)
print("Group members:")
memberUIDs = yield txn.groupMemberUIDs(group.groupID)
for memberUID in memberUIDs:
record = yield directory.recordWithUID(memberUID)
print(prettyRecord(record))
print("Last cached: {} GMT".format(group.modified))
print()
yield txn.commit()
@inlineCallbacks
def scheduleGroupRefreshJob(service, store):
"""
Schedule GroupCacherPollingWork
"""
txn = store.newTransaction()
print("Scheduling a group refresh")
yield GroupCacherPollingWork.reschedule(txn, 0, force=True)
yield txn.commit()
def action_getAutoScheduleMode(store, record):
print(
"Auto-schedule mode for {record} is {mode}".format(
record=prettyRecord(record),
mode=(
record.autoScheduleMode.description if record.autoScheduleMode
else "Default"
)
)
)
@inlineCallbacks
def action_setAutoScheduleMode(store, record, autoScheduleMode):
if record.recordType == RecordType.group:
print(
"Setting auto-schedule-mode for {record} is not allowed.".format(
record=prettyRecord(record)
)
)
elif (
record.recordType == RecordType.user and
not config.Scheduling.Options.AutoSchedule.AllowUsers
):
print(
"Setting auto-schedule-mode for {record} is not allowed.".format(
record=prettyRecord(record)
)
)
else:
print(
"Setting auto-schedule-mode to {mode} for {record}".format(
mode=("default" if autoScheduleMode is None else autoScheduleMode.description),
record=prettyRecord(record),
)
)
yield record.setAutoScheduleMode(autoScheduleMode)
@inlineCallbacks
def action_setAutoAcceptGroup(store, record, autoAcceptGroup):
if record.recordType == RecordType.group:
print(
"Setting auto-accept-group for {record} is not allowed.".format(
record=prettyRecord(record)
)
)
elif (
record.recordType == RecordType.user and
not config.Scheduling.Options.AutoSchedule.AllowUsers
):
print(
"Setting auto-accept-group for {record} is not allowed.".format(
record=prettyRecord(record)
)
)
else:
groupRecord = yield recordForPrincipalID(record.service, autoAcceptGroup)
if groupRecord is None or groupRecord.recordType != RecordType.group:
print("Invalid principal ID: {id}".format(id=autoAcceptGroup))
else:
print("Setting auto-accept-group to {group} for {record}".format(
group=prettyRecord(groupRecord),
record=prettyRecord(record),
))
# Get original fields
newFields = record.fields.copy()
# Set new values
newFields[record.service.fieldName.autoAcceptGroup] = groupRecord.uid
updatedRecord = DirectoryRecord(record.service, newFields)
yield record.service.updateRecords([updatedRecord], create=False)
@inlineCallbacks
def action_getAutoAcceptGroup(store, record):
if record.autoAcceptGroup:
groupRecord = yield record.service.recordWithUID(
record.autoAcceptGroup
)
if groupRecord is not None:
print(
"Auto-accept-group for {record} is {group}".format(
record=prettyRecord(record),
group=prettyRecord(groupRecord),
)
)
else:
print(
"Invalid auto-accept-group assigned: {uid}".format(
uid=record.autoAcceptGroup
)
)
else:
print(
"No auto-accept-group assigned to {record}".format(
record=prettyRecord(record)
)
)
@inlineCallbacks
def action_setValue(store, record, name, value):
displayValue = value
if isinstance(value, list):
displayValue = u", ".join(value)
print(
"Setting {name} to {value} for {record}".format(
name=name, value=displayValue, record=prettyRecord(record),
)
)
# Get original fields
newFields = record.fields.copy()
# Set new value
newFields[record.service.fieldName.lookupByName(name)] = value
updatedRecord = DirectoryRecord(record.service, newFields)
yield record.service.updateRecords([updatedRecord], create=False)
def action_getValue(store, record, name):
try:
value = record.fields[record.service.fieldName.lookupByName(name)]
if isinstance(value, list):
value = u", ".join(value)
print(
"{name} for {record} is {value}".format(
name=name, record=prettyRecord(record), value=value
)
)
except KeyError:
print(
"{name} is not set for {record}".format(
name=name, record=prettyRecord(record),
)
)
def abort(msg, status=1):
sys.stdout.write("%s\n" % (msg,))
try:
reactor.stop()
except RuntimeError:
pass
sys.exit(status)
def parseCreationArgs(args):
"""
Look at the command line arguments for --add; the first arg is required
and is the full name. If only that one arg is provided, generate a UUID
and use it for record name and uid. If two args are provided, use the
second arg as the record name and generate a UUID for the uid. If three
args are provided, the second arg is the record name and the third arg
is the uid.
"""
numArgs = len(args)
if numArgs == 0:
print(
"When adding a principal, you must provide the full-name"
)
sys.exit(64)
fullName = args[0].decode("utf-8")
if numArgs == 1:
shortName = uid = unicode(uuid.uuid4()).upper()
elif numArgs == 2:
shortName = args[1].decode("utf-8")
uid = unicode(uuid.uuid4()).upper()
else:
shortName = args[1].decode("utf-8")
uid = args[2].decode("utf-8")
return fullName, shortName, uid
def isUUID(value):
try:
uuid.UUID(value)
return True
except:
return False
def matchStrings(value, validValues):
for validValue in validValues:
if validValue.startswith(value):
return validValue
raise ValueError("'%s' is not a recognized value" % (value,))
def printRecordList(records):
results = []
for record in records:
try:
shortNames = record.shortNames
except AttributeError:
shortNames = []
results.append(
(record.displayName, record.recordType.name, record.uid, shortNames)
)
results.sort()
format = "%-22s %-10s %-20s %s"
print(format % ("Full name", "Type", "UID", "Short names"))
print(format % ("---------", "----", "---", "-----------"))
for fullName, recordType, uid, shortNames in results:
print(format % (fullName, recordType, uid, u", ".join(shortNames)))
if __name__ == "__main__":
main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating resource classes via the DB API"""
import json
import mock
import etcd
from etcd import Client as etcd_client
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from zun.common import exception
import zun.conf
from zun.db import api as dbapi
from zun.tests.unit.db import base
from zun.tests.unit.db import utils
from zun.tests.unit.db.utils import FakeEtcdMultipleResult
from zun.tests.unit.db.utils import FakeEtcdResult
CONF = zun.conf.CONF
class DbResourceClassTestCase(base.DbTestCase):
def setUp(self):
cfg.CONF.set_override('db_type', 'sql')
super(DbResourceClassTestCase, self).setUp()
def test_create_resource_class(self):
utils.create_test_resource_class(context=self.context)
def test_create_resource_class_already_exists(self):
utils.create_test_resource_class(
context=self.context, uuid='123')
with self.assertRaisesRegex(exception.ResourceClassAlreadyExists,
'A resource class with uuid 123.*'):
utils.create_test_resource_class(
context=self.context, uuid='123')
def test_get_resource_class_by_uuid(self):
resource = utils.create_test_resource_class(context=self.context)
res = dbapi.get_resource_class(self.context, resource.uuid)
self.assertEqual(resource.uuid, res.uuid)
self.assertEqual(resource.name, res.name)
def test_get_resource_class_by_name(self):
resource = utils.create_test_resource_class(context=self.context)
res = dbapi.get_resource_class(self.context, resource.name)
self.assertEqual(resource.id, res.id)
self.assertEqual(resource.name, res.name)
def test_get_resource_class_that_does_not_exist(self):
self.assertRaises(exception.ResourceClassNotFound,
dbapi.get_resource_class,
self.context, uuidutils.generate_uuid())
def test_list_resource_classes(self):
names = []
for i in range(1, 6):
resource = utils.create_test_resource_class(
context=self.context,
uuid=uuidutils.generate_uuid(),
name='class'+str(i))
names.append(six.text_type(resource['name']))
res = dbapi.list_resource_classes(self.context)
res_names = [r.name for r in res]
self.assertEqual(sorted(names), sorted(res_names))
def test_list_resource_classes_sorted(self):
names = []
for i in range(5):
resource = utils.create_test_resource_class(
context=self.context,
uuid=uuidutils.generate_uuid(),
name='class'+str(i))
names.append(six.text_type(resource.name))
res = dbapi.list_resource_classes(self.context, sort_key='name')
res_names = [r.name for r in res]
self.assertEqual(sorted(names), res_names)
self.assertRaises(exception.InvalidParameterValue,
dbapi.list_resource_classes,
self.context,
sort_key='foo')
def test_destroy_resource_class(self):
resource = utils.create_test_resource_class(context=self.context)
dbapi.destroy_resource_class(self.context, resource.id)
self.assertRaises(exception.ResourceClassNotFound,
dbapi.get_resource_class,
self.context, resource.id)
def test_destroy_resource_class_that_does_not_exist(self):
bad_id = 1111111
self.assertRaises(exception.ResourceClassNotFound,
dbapi.destroy_resource_class, self.context,
bad_id)
def test_update_resource_class(self):
resource = utils.create_test_resource_class(context=self.context)
old_name = resource.name
new_name = 'new-name'
self.assertNotEqual(old_name, new_name)
res = dbapi.update_resource_class(
self.context, resource.id, {'name': new_name})
self.assertEqual(new_name, res.name)
def test_update_resource_class_not_found(self):
bad_id = 1111111
new_name = 'new-name'
self.assertRaises(exception.ResourceClassNotFound,
dbapi.update_resource_class, self.context,
bad_id, {'name': new_name})
class EtcdDbResourceClassTestCase(base.DbTestCase):
def setUp(self):
cfg.CONF.set_override('db_type', 'etcd')
super(EtcdDbResourceClassTestCase, self).setUp()
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_create_resource_class(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
utils.create_test_resource_class(context=self.context)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_create_resource_class_already_exists(self, mock_write,
mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
utils.create_test_resource_class(context=self.context, name='123')
mock_read.side_effect = lambda *args: None
self.assertRaises(exception.ResourceExists,
utils.create_test_resource_class,
context=self.context, name='123')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_get_resource_class_by_uuid(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
resource_class = utils.create_test_resource_class(
context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdResult(
resource_class.as_dict())
res = dbapi.get_resource_class(self.context, resource_class.uuid)
self.assertEqual(resource_class.uuid, res.uuid)
self.assertEqual(resource_class.name, res.name)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_get_resource_class_by_name(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
rcs = utils.create_test_resource_class(context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdMultipleResult(
[rcs.as_dict()])
res = dbapi.get_resource_class(self.context, rcs.name)
self.assertEqual(rcs.uuid, res.uuid)
@mock.patch.object(etcd_client, 'read')
def test_get_resource_class_that_does_not_exist(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
self.assertRaises(exception.ResourceClassNotFound,
dbapi.get_resource_class,
self.context, 'fake-ident')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_list_resource_classes(self, mock_write, mock_read):
names = []
resource_classes = []
mock_read.side_effect = etcd.EtcdKeyNotFound
for i in range(1, 6):
res_class = utils.create_test_resource_class(
context=self.context, name='class'+str(i))
resource_classes.append(res_class.as_dict())
names.append(six.text_type(res_class['name']))
mock_read.side_effect = lambda *args: FakeEtcdMultipleResult(
resource_classes)
res = dbapi.list_resource_classes(self.context)
res_names = [r.name for r in res]
self.assertEqual(sorted(names), sorted(res_names))
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_list_resource_classes_sorted(self, mock_write, mock_read):
names = []
resource_classes = []
mock_read.side_effect = etcd.EtcdKeyNotFound
for i in range(1, 6):
res_class = utils.create_test_resource_class(
context=self.context, name='class'+str(i))
resource_classes.append(res_class.as_dict())
names.append(six.text_type(res_class['name']))
mock_read.side_effect = lambda *args: FakeEtcdMultipleResult(
resource_classes)
res = dbapi.list_resource_classes(self.context, sort_key='name')
res_names = [r.name for r in res]
self.assertEqual(sorted(names), res_names)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(etcd_client, 'delete')
def test_destroy_resource_class(self, mock_delete,
mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
resource_class = utils.create_test_resource_class(
context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdResult(
resource_class.as_dict())
dbapi.destroy_resource_class(self.context, resource_class.uuid)
mock_delete.assert_called_once_with(
'/resource_classes/%s' % resource_class.uuid)
@mock.patch.object(etcd_client, 'read')
def test_destroy_resource_class_that_does_not_exist(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
self.assertRaises(exception.ResourceClassNotFound,
dbapi.destroy_resource_class,
self.context,
'ca3e2a25-2901-438d-8157-de7ffd68d535')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(etcd_client, 'update')
def test_update_resource_class(self, mock_update,
mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
resource_class = utils.create_test_resource_class(
context=self.context)
old_name = resource_class.name
new_name = 'new-name'
self.assertNotEqual(old_name, new_name)
mock_read.side_effect = lambda *args: FakeEtcdResult(
resource_class.as_dict())
dbapi.update_resource_class(
self.context, resource_class.uuid, {'name': new_name})
self.assertEqual(new_name, json.loads(
mock_update.call_args_list[0][0][0].value)['name'])
@mock.patch.object(etcd_client, 'read')
def test_update_resource_class_not_found(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
new_name = 'new-name'
self.assertRaises(exception.ResourceClassNotFound,
dbapi.update_resource_class,
self.context,
'ca3e2a25-2901-438d-8157-de7ffd68d535',
{'name': new_name})
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_update_resource_class_uuid(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
resource_class = utils.create_test_resource_class(
context=self.context)
self.assertRaises(exception.InvalidParameterValue,
dbapi.update_resource_class,
self.context, resource_class.uuid,
{'uuid': ''})
|
|
import logging
import os
import socket
import sqlite3
from datetime import date, datetime, time, timedelta
from os.path import normpath, split
from uuid import uuid4
import icalendar
from atomicwrites import AtomicWriter
from dateutil.tz import tzlocal
from todoman import exceptions
logger = logging.getLogger(name=__name__)
# Initialize this only once
# We were doing this all over the place (even if unused!), so at least only do
# it once.
LOCAL_TIMEZONE = tzlocal()
class UnsafeOperationException(Exception):
"""
Raised when attempting to perform an unsafe operation.
Typical examples of unsafe operations are attempting to save a
partially-loaded todo.
"""
pass
class AlreadyExists(Exception):
"""
Raise when two objects have a same identity.
This can ocurrs when two lists have the same name, or when two Todos have
the same path.
"""
pass
class cached_property: # noqa
'''A read-only @property that is only evaluated once. Only usable on class
instances' methods.
'''
def __init__(self, fget, doc=None):
self.__name__ = fget.__name__
self.__module__ = fget.__module__
self.__doc__ = doc or fget.__doc__
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
class Todo:
"""
Represents a task/todo, and wrapps around icalendar.Todo.
All text attributes are always treated as text, and "" will be returned if
they are not defined.
Date attributes are treated as datetime objects, and None will be returned
if they are not defined.
All datetime objects have tzinfo, either the one defined in the file, or
the local system's one.
"""
def __init__(self, filename=None, mtime=None, new=False, list=None):
"""
Creates a new todo using `todo` as a source.
:param str filename: The name of the file for this todo. Defaults to
the <uid>.ics
:param mtime int: The last modified time for the file backing this
Todo.
:param bool new: Indicate that a new Todo is being created and should
be populated with default values.
:param List list: The list to which this Todo belongs.
"""
self.list = list
now = datetime.now(LOCAL_TIMEZONE)
self.uid = '{}@{}'.format(uuid4().hex, socket.gethostname())
self.list = list
if new:
self.created_at = now
else:
self.created_at = None
# Default values for supported fields
self.categories = []
self.completed_at = None
self.description = ''
self.dtstamp = now
self.due = None
self.id = None
self.last_modified = None
self.location = ''
self.percent_complete = 0
self.priority = 0
self.sequence = 0
self.start = None
self.status = 'NEEDS-ACTION'
self.summary = ''
self.filename = filename or "{}.ics".format(self.uid)
if os.path.basename(self.filename) != self.filename:
raise ValueError(
'Must not be an absolute path: {}' .format(self.filename)
)
self.mtime = mtime or datetime.now()
def clone(self):
"""
Returns a clone of this todo
Returns a copy of this todo, which is almost identical, except that is
has a different UUID and filename.
"""
todo = Todo(new=True, list=self.list)
fields = (
Todo.STRING_FIELDS +
Todo.INT_FIELDS +
Todo.LIST_FIELDS +
Todo.DATETIME_FIELDS
)
fields.remove('uid')
for field in fields:
setattr(todo, field, getattr(self, field))
return todo
STRING_FIELDS = [
'description',
'location',
'status',
'summary',
'uid',
]
INT_FIELDS = [
'percent_complete',
'priority',
'sequence',
]
LIST_FIELDS = [
'categories',
]
DATETIME_FIELDS = [
'completed_at',
'created_at',
'dtstamp',
'start',
'due',
'last_modified',
]
ALL_SUPPORTED_FIELDS = (
DATETIME_FIELDS +
INT_FIELDS +
LIST_FIELDS +
STRING_FIELDS
)
VALID_STATUSES = (
"CANCELLED",
"COMPLETED",
"IN-PROCESS",
"NEEDS-ACTION",
)
def __setattr__(self, name, value):
# Avoids accidentally setting a field to None when that's not a valid
# attribute.
if not value:
if name in Todo.STRING_FIELDS:
return object.__setattr__(self, name, '')
if name in Todo.INT_FIELDS:
return object.__setattr__(self, name, 0)
if name in Todo.LIST_FIELDS:
return object.__setattr__(self, name, [])
return object.__setattr__(self, name, value)
@property
def is_completed(self):
return (
bool(self.completed_at) or
self.status in ('CANCELLED', 'COMPLETED')
)
@is_completed.setter
def is_completed(self, val):
if val:
# Don't fiddle with completed_at if this was already completed:
if not self.is_completed:
self.completed_at = datetime.now(tz=LOCAL_TIMEZONE)
self.percent_complete = 100
self.status = 'COMPLETED'
else:
self.completed_at = None
self.percent_complete = None
self.status = 'NEEDS-ACTION'
@cached_property
def path(self):
return os.path.join(self.list.path, self.filename)
def cancel(self):
self.status = 'CANCELLED'
class VtodoWritter:
"""Writes a Todo as a VTODO file."""
"""Maps Todo field names to VTODO field names"""
FIELD_MAP = {
'summary': 'summary',
'priority': 'priority',
'sequence': 'sequence',
'uid': 'uid',
'categories': 'categories',
'completed_at': 'completed',
'description': 'description',
'dtstamp': 'dtstamp',
'start': 'dtstart',
'due': 'due',
'location': 'location',
'percent_complete': 'percent-complete',
'priority': 'priority',
'status': 'status',
'created_at': 'created',
'last_modified': 'last-modified',
}
def __init__(self, todo):
self.todo = todo
def normalize_datetime(self, dt):
'''
Eliminate several differences between dates, times and datetimes which
are hindering comparison:
- Convert everything to datetime
- Add missing timezones
'''
if isinstance(dt, date) and not isinstance(dt, datetime):
dt = datetime(dt.year, dt.month, dt.day)
if not dt.tzinfo:
dt = dt.replace(tzinfo=LOCAL_TIMEZONE)
return dt
def serialize_field(self, name, value):
if name in Todo.DATETIME_FIELDS:
return self.normalize_datetime(value)
if name in Todo.LIST_FIELDS:
return ','.join(value)
if name in Todo.INT_FIELDS:
return int(value)
if name in Todo.STRING_FIELDS:
return value
raise Exception('Unknown field {} serialized.'.format(name))
def set_field(self, name, value):
if name in self.vtodo:
del(self.vtodo[name])
if value:
logger.debug("Setting field %s to %s.", name, value)
self.vtodo.add(name, value)
def serialize(self, original=None):
"""Serialize a Todo into a VTODO."""
if not original:
original = icalendar.Todo()
self.vtodo = original
for source, target in self.FIELD_MAP.items():
if getattr(self.todo, source):
self.set_field(
target,
self.serialize_field(source, getattr(self.todo, source)),
)
return self.vtodo
def _read(self, path):
with open(path, 'rb') as f:
cal = f.read()
cal = icalendar.Calendar.from_ical(cal)
for component in cal.walk('VTODO'):
return component
def write(self):
if os.path.exists(self.todo.path):
self._write_existing(self.todo.path)
else:
self._write_new(self.todo.path)
return self.vtodo
def _write_existing(self, path):
original = self._read(path)
vtodo = self.serialize(original)
with open(path, 'rb') as f:
cal = icalendar.Calendar.from_ical(f.read())
for index, component in enumerate(cal.subcomponents):
if component.get('uid', None) == self.todo.uid:
cal.subcomponents[index] = vtodo
with AtomicWriter(path, overwrite=True).open() as f:
f.write(cal.to_ical().decode("UTF-8"))
def _write_new(self, path):
vtodo = self.serialize()
c = icalendar.Calendar()
c.add_component(vtodo)
with AtomicWriter(path).open() as f:
c.add('prodid', 'io.barrera.todoman')
c.add('version', '2.0')
f.write(c.to_ical().decode("UTF-8"))
return vtodo
class Cache:
"""
Caches Todos for faster read and simpler querying interface
The Cache class persists relevant[1] fields into an SQL database, which is
only updated if the actual file has been modified. This greatly increases
load times, but, more importantly, provides a simpler interface for
filtering/querying/sorting.
[1]: Relevant fields are those we show when listing todos, or those which
may be used for filtering/sorting.
"""
SCHEMA_VERSION = 4
def __init__(self, path):
self.cache_path = str(path)
os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
self._conn = sqlite3.connect(self.cache_path)
self._conn.row_factory = sqlite3.Row
self._conn.execute("PRAGMA foreign_keys = ON")
self.create_tables()
def save_to_disk(self):
self._conn.commit()
def is_latest_version(self):
"""Checks if the cache DB schema is the latest version."""
try:
return self._conn.execute(
'SELECT version FROM meta WHERE version = ?',
(Cache.SCHEMA_VERSION,),
).fetchone()
except sqlite3.OperationalError:
return False
def create_tables(self):
if self.is_latest_version():
return
self._conn.executescript('''
DROP TABLE IF EXISTS lists;
DROP TABLE IF EXISTS files;
DROP TABLE IF EXISTS todos;
''')
self._conn.execute(
'CREATE TABLE IF NOT EXISTS meta ("version" INT)'
)
self._conn.execute(
'INSERT INTO meta (version) VALUES (?)',
(Cache.SCHEMA_VERSION,),
)
self._conn.execute('''
CREATE TABLE IF NOT EXISTS lists (
"name" TEXT PRIMARY KEY,
"path" TEXT,
"colour" TEXT,
CONSTRAINT path_unique UNIQUE (path)
);
''')
self._conn.execute('''
CREATE TABLE IF NOT EXISTS files (
"path" TEXT PRIMARY KEY,
"list_name" TEXT,
"mtime" INTEGER,
CONSTRAINT path_unique UNIQUE (path),
FOREIGN KEY(list_name) REFERENCES lists(name) ON DELETE CASCADE
);
''')
self._conn.execute('''
CREATE TABLE IF NOT EXISTS todos (
"file_path" TEXT,
"id" INTEGER PRIMARY KEY,
"uid" TEXT,
"summary" TEXT,
"due" INTEGER,
"start" INTEGER,
"priority" INTEGER,
"created_at" INTEGER,
"completed_at" INTEGER,
"percent_complete" INTEGER,
"dtstamp" INTEGER,
"status" TEXT,
"description" TEXT,
"location" TEXT,
"categories" TEXT,
"sequence" INTEGER,
"last_modified" INTEGER,
FOREIGN KEY(file_path) REFERENCES files(path) ON DELETE CASCADE
);
''')
def clear(self):
self._conn.close()
os.remove(self.cache_path)
self._conn = None
def add_list(self, name, path, colour):
"""
Inserts a new list into the cache.
Returns the id of the newly inserted list.
"""
result = self._conn.execute(
'SELECT name FROM lists WHERE path = ?',
(path,),
).fetchone()
if result:
return result['name']
try:
self._conn.execute(
"INSERT INTO lists (name, path, colour) VALUES (?, ?, ?)",
(name, path, colour,),
)
except sqlite3.IntegrityError as e:
raise AlreadyExists(name) from e
return self.add_list(name, path, colour)
def add_file(self, list_name, path, mtime):
try:
self._conn.execute('''
INSERT INTO files (
list_name,
path,
mtime
) VALUES (?, ?, ?);
''', (
list_name,
path,
mtime,
))
except sqlite3.IntegrityError as e:
raise AlreadyExists(list_name) from e
def _serialize_datetime(self, todo, field):
dt = todo.decoded(field, None)
if not dt:
return None
if isinstance(dt, date) and not isinstance(dt, datetime):
dt = datetime(dt.year, dt.month, dt.day)
# XXX: Can we actually read times from files?
elif isinstance(dt, time):
dt = datetime.combine(date.today(), dt)
if not dt.tzinfo:
dt = dt.replace(tzinfo=LOCAL_TIMEZONE)
return dt.timestamp()
def add_vtodo(self, todo, file_path, id=None):
"""
Adds a todo into the cache.
:param icalendar.Todo todo: The icalendar component object on which
"""
sql = '''
INSERT INTO todos (
{}
file_path,
uid,
summary,
due,
start,
priority,
created_at,
completed_at,
percent_complete,
dtstamp,
status,
description,
location,
categories,
sequence,
last_modified
) VALUES ({}?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
due = self._serialize_datetime(todo, 'due')
start = self._serialize_datetime(todo, 'dtstart')
if start and due:
start = None if start >= due else start
params = (
file_path,
todo.get('uid'),
todo.get('summary'),
due,
start,
todo.get('priority', 0) or None,
self._serialize_datetime(todo, 'created'),
self._serialize_datetime(todo, 'completed'),
todo.get('percent-complete', None),
self._serialize_datetime(todo, 'dtstamp'),
todo.get('status', 'NEEDS-ACTION'),
todo.get('description', None),
todo.get('location', None),
todo.get('categories', None),
todo.get('sequence', 1),
self._serialize_datetime(todo, 'last-modified'),
)
if id:
params = (id,) + params
sql = sql.format('id,\n', '?, ')
else:
sql = sql.format('', '')
cursor = self._conn.cursor()
try:
cursor.execute(sql, params)
rv = cursor.lastrowid
finally:
cursor.close()
return rv
def todos(self, lists=[], priority=None, location='', category='', grep='',
sort=[], reverse=True, due=None, start=None, startable=False,
status=['NEEDS-ACTION', 'IN-PROCESS']):
"""
Returns filtered cached todos, in a specified order.
If no order is specified, todos are sorted by the following fields::
completed_at
-priority
due
-created_at
:param list lists: Only return todos for these lists.
:param str location: Only return todos with a location containing this
string.
:param str category: Only return todos with a category containing this
string.
:param str grep: Filter common fields with this substring.
:param list sort: Order returned todos by these fields. Field names
with a ``-`` prepended will be used to sort in reverse order.
:param bool reverse: Reverse the order of the todos after sorting.
:param int due: Return only todos due within ``due`` hours.
:param str priority: Only return todos with priority at least as
high as specified.
:param tuple(bool, datetime) start: Return only todos before/after
``start`` date
:param list(str) status: Return only todos with any of the given
statuses.
:return: A sorted, filtered list of todos.
:rtype: generator
"""
extra_where = []
params = []
if 'ANY' not in status:
extra_where.append(
'AND status IN ({})'.format(', '.join(['?'] * len(status)))
)
params.extend(s.upper() for s in status)
if lists:
lists = [l.name if isinstance(l, List) else l for l in lists]
q = ', '.join(['?'] * len(lists))
extra_where.append('AND files.list_name IN ({})'.format(q))
params.extend(lists)
if priority:
extra_where.append('AND PRIORITY > 0 AND PRIORITY <= ?')
params.append('{}'.format(priority))
if location:
extra_where.append('AND location LIKE ?')
params.append('%{}%'.format(location))
if category:
extra_where.append('AND categories LIKE ?')
params.append('%{}%'.format(category))
if grep:
# # requires sqlite with pcre, which won't be available everywhere:
# extra_where.append('AND summary REGEXP ?')
# params.append(grep)
extra_where.append('AND summary LIKE ?')
params.append('%{}%'.format(grep))
if due:
max_due = (datetime.now() + timedelta(hours=due)).timestamp()
extra_where.append('AND due IS NOT NULL AND due < ?')
params.append(max_due)
if start:
is_before, dt = start
dt = dt.timestamp()
if is_before:
extra_where.append('AND start <= ?')
params.append(dt)
else:
extra_where.append('AND start >= ?')
params.append(dt)
if startable:
extra_where.append('AND (start IS NULL OR start <= ?)')
params.append(datetime.now().timestamp())
if sort:
order = []
for s in sort:
if s.startswith('-'):
order.append(' {} ASC'.format(s[1:]))
else:
order.append(' {} DESC'.format(s))
order = ','.join(order)
else:
order = '''
completed_at DESC,
priority IS NOT NULL, priority DESC,
due IS NOT NULL, due DESC,
created_at ASC
'''
if not reverse:
# Note the change in case to avoid swapping all of them. sqlite
# doesn't care about casing anyway.
order = order.replace(' DESC', ' asc').replace(' ASC', ' desc')
query = '''
SELECT todos.*, files.list_name, files.path
FROM todos, files
WHERE todos.file_path = files.path {}
ORDER BY {}
'''.format(' '.join(extra_where), order,)
logger.debug(query)
logger.debug(params)
result = self._conn.execute(query, params)
seen_paths = set()
warned_paths = set()
for row in result:
todo = self._todo_from_db(row)
path = row['path']
if path in seen_paths and path not in warned_paths:
logger.warning('Todo is in read-only mode because there are '
'multiple todos in %s', path)
warned_paths.add(path)
seen_paths.add(path)
yield todo
def _dt_from_db(self, dt):
if dt:
return datetime.fromtimestamp(dt, LOCAL_TIMEZONE)
return None
def _todo_from_db(self, row):
todo = Todo()
todo.id = row['id']
todo.uid = row['uid']
todo.summary = row['summary']
todo.due = self._dt_from_db(row['due'])
todo.start = self._dt_from_db(row['start'])
todo.priority = row['priority']
todo.created_at = self._dt_from_db(row['created_at'])
todo.completed_at = self._dt_from_db(row['completed_at'])
todo.dtstamp = self._dt_from_db(row['dtstamp'])
todo.percent_complete = row['percent_complete']
todo.status = row['status']
todo.description = row['description']
todo.location = row['location']
todo.sequence = row['sequence']
todo.last_modified = row['last_modified']
todo.list = self.lists_map[row['list_name']]
todo.filename = os.path.basename(row['path'])
return todo
def lists(self):
result = self._conn.execute("SELECT * FROM lists")
for row in result:
yield List(
name=row['name'],
path=row['path'],
colour=row['colour'],
)
@cached_property
def lists_map(self):
return {l.name: l for l in self.lists()}
def expire_lists(self, paths):
results = self._conn.execute("SELECT path, name from lists")
for result in results:
if result['path'] not in paths:
self.delete_list(result['name'])
def delete_list(self, name):
self._conn.execute("DELETE FROM lists WHERE lists.name = ?", (name,))
def todo(self, id, read_only=False):
# XXX: DON'T USE READ_ONLY
result = self._conn.execute('''
SELECT todos.*, files.list_name, files.path
FROM todos, files
WHERE files.path = todos.file_path
AND todos.id = ?
''', (id,)
).fetchone()
if not result:
raise exceptions.NoSuchTodo(id)
if not read_only:
count = self._conn.execute('''
SELECT count(id) AS c
FROM files, todos
WHERE todos.file_path = files.path
AND path=?
''', (result['path'],)
).fetchone()
if count['c'] > 1:
raise exceptions.ReadOnlyTodo(result['path'])
return self._todo_from_db(result)
def expire_files(self, paths_to_mtime):
"""Remove stale cache entries based on the given fresh data."""
result = self._conn.execute("SELECT path, mtime FROM files")
for row in result:
path, mtime = row['path'], row['mtime']
if paths_to_mtime.get(path, None) != mtime:
self.expire_file(path)
def expire_file(self, path):
self._conn.execute("DELETE FROM files WHERE path = ?", (path,))
class List:
def __init__(self, name, path, colour=None):
self.path = path
self.name = name or List.name_for_path(path)
self.colour = colour or List.colour_for_path(self.path)
@staticmethod
def colour_for_path(path):
try:
with open(os.path.join(path, 'color')) as f:
return f.read().strip()
except (OSError, IOError):
logger.debug('No colour for list %s', path)
@staticmethod
def name_for_path(path):
try:
with open(os.path.join(path, 'displayname')) as f:
return f.read().strip()
except (OSError, IOError):
return split(normpath(path))[1]
@cached_property
def color_rgb(self):
color = self.colour
if not color or not color.startswith('#'):
return
r = color[1:3]
g = color[3:5]
b = color[5:8]
if len(r) == len(g) == len(b) == 2:
return int(r, 16), int(g, 16), int(b, 16)
@cached_property
def color_ansi(self):
rv = self.color_rgb
if rv:
return '\33[38;2;{!s};{!s};{!s}m'.format(*rv)
def __eq__(self, other):
if isinstance(other, List):
return self.name == other.name
return object.__eq__(self, other)
def __str__(self):
return self.name
class Database:
"""
This class is essentially a wrapper around all the lists (which in turn,
contain all the todos).
Caching in abstracted inside this class, and is transparent to outside
classes.
"""
def __init__(self, paths, cache_path):
self.cache = Cache(cache_path)
self.paths = [str(path) for path in paths]
self.update_cache()
def update_cache(self):
self.cache.expire_lists(self.paths)
paths_to_mtime = {}
paths_to_list_name = {}
for path in self.paths:
list_name = self.cache.add_list(
List.name_for_path(path),
path,
List.colour_for_path(path),
)
for entry in os.listdir(path):
if not entry.endswith('.ics'):
continue
entry_path = os.path.join(path, entry)
mtime = _getmtime(entry_path)
paths_to_mtime[entry_path] = mtime
paths_to_list_name[entry_path] = list_name
self.cache.expire_files(paths_to_mtime)
for entry_path, mtime in paths_to_mtime.items():
list_name = paths_to_list_name[entry_path]
try:
self.cache.add_file(list_name, entry_path, mtime)
except AlreadyExists:
logger.debug('File already in cache: %s', entry_path)
continue
with open(entry_path, 'rb') as f:
try:
cal = f.read()
cal = icalendar.Calendar.from_ical(cal)
for component in cal.walk('VTODO'):
self.cache.add_vtodo(component, entry_path)
except Exception as e:
logger.exception("Failed to read entry %s.", entry_path)
self.cache.save_to_disk()
def todos(self, **kwargs):
return self.cache.todos(**kwargs)
def todo(self, id, **kwargs):
return self.cache.todo(id, **kwargs)
def lists(self):
return self.cache.lists()
def move(self, todo, new_list, from_list=None):
from_list = from_list or todo.list
orig_path = os.path.join(from_list.path, todo.filename)
dest_path = os.path.join(new_list.path, todo.filename)
os.rename(orig_path, dest_path)
def delete(self, todo):
path = os.path.join(todo.list.path, todo.filename)
os.remove(path)
def flush(self):
for todo in self.todos(status=['ANY']):
if todo.is_completed:
yield todo
self.delete(todo)
self.cache.clear()
self.cache = None
def save(self, todo):
todo.sequence += 1
todo.last_modified = datetime.now(LOCAL_TIMEZONE)
vtodo = VtodoWritter(todo).write()
self.cache.expire_file(todo.path)
mtime = _getmtime(todo.path)
self.cache.add_file(todo.list.name, todo.path, mtime)
todo.id = self.cache.add_vtodo(vtodo, todo.path, todo.id)
self.cache.save_to_disk()
def _getmtime(path):
stat = os.stat(path)
return getattr(stat, 'st_mtime_ns', stat.st_mtime)
|
|
import ctypes
import fcntl
from .base import DrmObject
from .drm_h import DRM_IOCTL_MODE_OBJ_GETPROPERTIES, DRM_IOCTL_MODE_GETPROPERTY, DRM_IOCTL_MODE_OBJ_SETPROPERTY
from .drm_mode_h import DrmModeObjGetPropertiesC, DrmModeObjGetPropertyC, DrmModePropertyEnumC, DrmModeObjSetPropertyC
from .drm_mode_h import DRM_MODE_PROP_PENDING, DRM_MODE_PROP_RANGE, DRM_MODE_PROP_IMMUTABLE, DRM_MODE_PROP_ENUM, DRM_MODE_PROP_BLOB, DRM_MODE_PROP_BITMASK
from .drm_mode_h import DRM_MODE_PROP_EXTENDED_TYPE, DRM_MODE_PROP_OBJECT, DRM_MODE_PROP_SIGNED_RANGE
class DrmProperty(DrmObject):
def __init__(self, drm, id, obj_id, obj_type, arg=None):
self._drm = drm
self.id = int(id)
self.name = "%d" % id
self.obj_id = obj_id
self.obj_type = obj_type
self._arg = arg
self.immutable = True
self.fetch()
@property
def value(self):
return self.decode(self.get())
@value.setter
def value(self, value):
if self.immutable:
raise ValueError("Can't set an immutable property: %s" % self.name)
self.set(self.encode(value))
def decode(self, value):
return value
def encode(self, value):
return value
def get(self):
arg = DrmModeObjGetPropertiesC()
arg.obj_id = self.obj_id
arg.obj_type = self.obj_type
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, arg)
prop_ids = (ctypes.c_uint32*arg.count_props)()
arg.props_ptr = ctypes.cast(ctypes.pointer(prop_ids), ctypes.c_void_p).value
prop_values = (ctypes.c_uint64*arg.count_props)()
arg.prop_values_ptr = ctypes.cast(ctypes.pointer(prop_values), ctypes.c_void_p).value
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, arg)
self._arg = arg
vals = [v for i, v in zip(prop_ids, prop_values) if i == self.id]
return vals[0]
def set(self, value):
arg = DrmModeObjSetPropertyC()
arg.value = value
arg.prop_id = self.id
arg.obj_id = self.obj_id
arg.obj_type = self.obj_type
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_OBJ_SETPROPERTY, arg)
self._arg = arg
class DrmPropertyRange(DrmProperty):
def fetch(self):
self.type_name = "range"
self.name = self._arg.name
#raise NotImplementedError
def get(self):
return 0
def set(self):
pass
class DrmPropertyEnum(DrmProperty):
def fetch(self):
self.type_name = "enum"
arg = DrmModeObjGetPropertyC()
arg.prop_id = self.id
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_GETPROPERTY, arg)
if not (arg.count_enum_blobs and (arg.flags & DRM_MODE_PROP_ENUM)):
raise ValueError("not an enum property")
if arg.count_values != arg.count_enum_blobs:
raise ValueError("count_values != count_enum_blobs")
values = (ctypes.c_uint64*arg.count_values)()
arg.values_ptr = ctypes.cast(ctypes.pointer(values), ctypes.c_void_p).value
enum_blobs = (DrmModePropertyEnumC*arg.count_enum_blobs)()
arg.enum_blob_ptr = ctypes.cast(ctypes.pointer(enum_blobs), ctypes.c_void_p).value
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_GETPROPERTY, arg)
self.enum = {}
for i in range(arg.count_enum_blobs):
self.enum[int(values[i])] = enum_blobs[i].name
self._arg = arg
self.name = arg.name
self.flags = arg.flags
self.immutable = True if (arg.flags & DRM_MODE_PROP_IMMUTABLE) else False
class DrmPropertyBitmask(DrmProperty):
def fetch(self):
self.type_name = "bitmask"
self.name = self._arg.name
#raise NotImplementedError
def get(self):
return 0
def set(self):
pass
class DrmPropertyBlob(DrmProperty):
def fetch(self):
self.type_name = "blob"
self.name = self._arg.name
#raise NotImplementedError
def get(self):
return 0
def set(self):
pass
class DrmPropertyObject(DrmProperty):
def fetch(self):
self.type_name = "Object"
self.name = self._arg.name
#raise NotImplementedError
def get(self):
return 0
def set(self):
pass
class DrmPropertySignedRange(DrmProperty):
def fetch(self):
self.type_name = "SignedRange"
self.name = self._arg.name
#raise NotImplementedError
def get(self):
return 0
def set(self):
pass
# if (prop->flags & DRM_MODE_PROP_PENDING)
# printf(" pending");
# if (prop->flags & DRM_MODE_PROP_IMMUTABLE)
# printf(" immutable");
# if (drm_property_type_is(prop, DRM_MODE_PROP_SIGNED_RANGE))
# printf(" signed range");
# if (drm_property_type_is(prop, DRM_MODE_PROP_RANGE))
# printf(" range");
# if (drm_property_type_is(prop, DRM_MODE_PROP_ENUM))
# printf(" enum");
# if (drm_property_type_is(prop, DRM_MODE_PROP_BITMASK))
# printf(" bitmask");
# if (drm_property_type_is(prop, DRM_MODE_PROP_BLOB))
# printf(" blob");
# if (drm_property_type_is(prop, DRM_MODE_PROP_OBJECT))
# printf(" object");
class DrmProperties(DrmObject):
def __init__(self, drm, id_, type_):
self._drm = drm
self.id = id_
self.type = type_
self.props = []
arg = DrmModeObjGetPropertiesC()
arg.obj_id = self.id
arg.obj_type = self.type
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, arg)
if arg.count_props == 0:
#print("DrmProperties(%d, 0x%x): arg.count_props=%d" % (self.id, self.type, arg.count_props))
return
prop_ids = (ctypes.c_uint32*arg.count_props)()
arg.props_ptr = ctypes.cast(ctypes.pointer(prop_ids), ctypes.c_void_p).value
prop_values = (ctypes.c_uint64*arg.count_props)()
arg.prop_values_ptr = ctypes.cast(ctypes.pointer(prop_values), ctypes.c_void_p).value
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_OBJ_GETPROPERTIES, arg)
self._arg = arg
for i in range(arg.count_props):
propid = int(prop_ids[i])
propc = DrmModeObjGetPropertyC()
propc.prop_id = propid
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_GETPROPERTY, propc)
if propc.count_enum_blobs:
if propc.flags & DRM_MODE_PROP_ENUM:
prop = DrmPropertyEnum(self._drm, propid, self.id, self.type)
elif propc.flags & DRM_MODE_PROP_BITMASK:
prop = DrmPropertyBitmask(self._drm, propid, self.id, self.type, propc)
else:
import sys
sys.stderr.write("Skipping unsupported property: propc.flags=0x%x\n" % propc.flags)
continue
elif propc.flags & DRM_MODE_PROP_RANGE:
prop = DrmPropertyRange(self._drm, propid, self.id, self.type, propc)
elif propc.flags & DRM_MODE_PROP_BLOB:
prop = DrmPropertyBlob(self._drm, propid, self.id, self.type, propc)
else:
flags = propc.flags & DRM_MODE_PROP_EXTENDED_TYPE
if flags == DRM_MODE_PROP_OBJECT:
prop = DrmPropertyObject(self._drm, propid, self.id, self.type, propc)
elif flags == DRM_MODE_PROP_SIGNED_RANGE:
prop = DrmPropertySignedRange(self._drm, propid, self.id, self.type, propc)
else:
import sys
sys.stderr.write("Skipping unsupported property: propc.flags=0x%x\n" % propc.flags)
continue
self.props.append(prop)
def __iter__(self):
return self.props.__iter__()
def get(self, name):
for prop in self.props:
if prop.name == name:
return prop
raise AttributeError("no such property: %s" % name)
def __repr__(self):
return "%s" % [prop.name for prop in self.props]
|
|
import aredis
import asyncio
import uvloop
import time
import sys
from functools import wraps
from argparse import ArgumentParser
if sys.version_info[0] == 3:
long = int
def parse_args():
parser = ArgumentParser()
parser.add_argument('-n',
type=int,
help='Total number of requests (default 100000)',
default=100000)
parser.add_argument('-P',
type=int,
help=('Pipeline <numreq> requests.'
' Default 1 (no pipeline).'),
default=1)
parser.add_argument('-s',
type=int,
help='Data size of SET/GET value in bytes (default 2)',
default=2)
args = parser.parse_args()
print(args)
return args
async def run():
args = parse_args()
r = aredis.StrictRedis()
await r.flushall()
await set_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await set_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await get_str(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await get_int(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await incr(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lpush(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lrange_300(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await lpop(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
await hmset(conn=r, num=args.n, pipeline_size=args.P, data_size=args.s)
def timer(func):
@wraps(func)
async def wrapper(*args, **kwargs):
start = time.clock()
ret = await func(*args, **kwargs)
duration = time.clock() - start
if 'num' in kwargs:
count = kwargs['num']
else:
count = args[1]
print('{0} - {1} Requests'.format(func.__name__, count))
print('Duration = {}'.format(duration))
print('Rate = {}'.format(count/duration))
print('')
return ret
return wrapper
@timer
async def set_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = format_str.format('a')
for i in range(num):
await conn.set('set_str:%d' % i, set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def set_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = int(format_str.format('1'))
for i in range(num):
await conn.set('set_int:%d' % i, set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def get_str(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.get('set_str:%d' % i)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def get_int(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.get('set_int:%d' % i)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def incr(conn, num, pipeline_size, *args, **kwargs):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.incr('incr_key')
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lpush(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
format_str = '{:0<%d}' % data_size
set_data = int(format_str.format('1'))
for i in range(num):
await conn.lpush('lpush_key', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lrange_300(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.lrange('lpush_key', i, i+300)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def lpop(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
for i in range(num):
await conn.lpop('lpush_key')
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
@timer
async def hmset(conn, num, pipeline_size, data_size):
if pipeline_size > 1:
conn = await conn.pipeline()
set_data = {'str_value': 'string',
'int_value': 123456,
'long_value': long(123456),
'float_value': 123456.0}
for i in range(num):
await conn.hmset('hmset_key', set_data)
if pipeline_size > 1 and i % pipeline_size == 0:
await conn.execute()
if pipeline_size > 1:
await conn.execute()
await conn.reset()
if __name__ == '__main__':
print('WITH ASYNCIO ONLY:')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
print('WITH UVLOOP:')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
class UserPermissions:
"""
A user permission object can be accessed as `frappe.get_user()`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
self.setup_user()
def setup_user(self):
def get_user_doc():
user = None
try:
user = frappe.get_doc("User", self.name).as_dict()
except frappe.DoesNotExistError:
pass
except Exception, e:
# install boo-boo
if e.args[0] != 1146: raise
return user
if not frappe.flags.in_install_db and not frappe.flags.in_test:
user_doc = frappe.cache().hget("user_doc", self.name, get_user_doc)
if user_doc:
self.doc = frappe.get_doc(user_doc)
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, module from tabDocType""", as_dict=1):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
roles = self.get_roles()
for r in frappe.db.sql("""select * from tabDocPerm where docstatus=0
and ifnull(permlevel,0)=0
and role in ({roles})""".format(roles=", ".join(["%s"]*len(roles))), tuple(roles), as_dict=1):
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
self.all_read.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if not dtp.get('module') in self.allow_modules:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
if "System Manager" in self.roles:
self.can_import = frappe.db.sql_list("""select name from `tabDocType`
where allow_import = 1""")
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().hget("user_recent", self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().hset("user_recent", self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name,
email_signature, user_type, language, background_image, background_style, mute_sounds
from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().hget("user_recent", self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
d.all_reports = self.get_all_reports()
return d
def get_all_reports(self):
reports = frappe.db.sql("""select name, report_type, ref_doctype from tabReport
where ref_doctype in ('{0}') and disabled = 0""".format("', '".join(self.can_get_report)), as_dict=1)
return frappe._dict((d.name, d) for d in reports)
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar, name = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image", "name"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar,
"name": name
})
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""select distinct name,
concat_ws(" ", if(first_name="", null, first_name), if(last_name="", null, last_name))
as fullname from tabUser p
where docstatus < 2 and enabled = 1
and name not in ({})
and exists (select * from tabUserRole ur
where ur.parent = p.name and ur.role="System Manager")""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None, send_welcome_email=False):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User",
"send_welcome_email": 1 if send_welcome_email else 0
})
user.insert()
# add roles
roles = frappe.db.sql_list("""select name from `tabRole`
where name not in ("Administrator", "Guest", "All")""")
user.add_roles(*roles)
def get_roles(user=None, with_standard=True):
"""get roles of current user"""
if not user:
user = frappe.session.user
if user=='Guest':
return ['Guest']
def get():
return [r[0] for r in frappe.db.sql("""select role from tabUserRole
where parent=%s and role not in ('All', 'Guest')""", (user,))] + ['All', 'Guest']
roles = frappe.cache().hget("roles", user, get)
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
def get_enabled_system_users():
return frappe.db.sql("""select * from tabUser where
user_type='System User' and enabled=1 and name not in ('Administrator', 'Guest')""", as_dict=1)
def is_website_user():
return frappe.db.get_value('User', frappe.session.user, 'user_type') == "Website User"
def is_system_user(username):
return frappe.db.get_value("User", {"name": username, "enabled": 1, "user_type": "System User"})
def get_users():
from frappe.core.doctype.user.user import get_system_users
users = []
system_managers = frappe.utils.user.get_system_managers(only_name=True)
for user in get_system_users():
users.append({
"full_name": frappe.utils.user.get_user_fullname(user),
"email": user,
"is_system_manager": 1 if (user in system_managers) else 0
})
return users
def set_last_active_to_now(user):
from frappe.utils import now_datetime
frappe.db.set_value("User", user, "last_active", now_datetime())
|
|
import copy
import math
import pickle
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
def _check_array(
array, expected_dtype, expected_shape, expected_data_list=None,
device=None):
expected_dtype = chainerx.dtype(expected_dtype)
assert isinstance(array.dtype, chainerx.dtype)
assert isinstance(array.shape, tuple)
assert array.dtype == expected_dtype
assert array.shape == expected_shape
assert array.itemsize == expected_dtype.itemsize
assert array.size == array_utils.total_size(expected_shape)
assert array.nbytes == expected_dtype.itemsize * \
array_utils.total_size(expected_shape)
if expected_data_list is not None:
assert array._debug_flat_data == expected_data_list
assert array.is_contiguous
array_utils.check_device(array, device)
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init(shape, dtype_spec):
array = chainerx.ndarray(shape, dtype_spec)
_check_array(array, dtype_spec, shape)
@pytest.mark.parametrize(
'device', [None, 'native:1', chainerx.get_device('native:1')])
@chainerx.testing.parametrize_dtype_specifier('dtype_spec')
def test_init_with_device(shape, dtype_spec, device):
array = chainerx.ndarray(shape, dtype_spec, device=device)
_check_array(array, dtype_spec, shape, device=device)
@pytest.mark.parametrize('value', [
0, 1, -1, 0.1, 0.9, -0.1, -0.9, 1.1, -1.1, 1.9, -
1.9, True, False, float('inf'), -float('inf'), float('nan'), -0.0
])
@pytest.mark.parametrize('shape', [
(), (1,), (1, 1, 1)
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cast_scalar(device, value, shape, dtype):
np_dtype = numpy.dtype(dtype)
try:
np_value = np_dtype.type(value)
except (ValueError, OverflowError):
return
a_np = numpy.asarray([np_value], dtype).reshape(shape)
a_chx = chainerx.array(a_np)
def should_cast_succeed(typ):
try:
typ(np_value)
return True
except (ValueError, OverflowError):
return False
# Cast to float
if should_cast_succeed(float):
assert type(float(a_chx)) is float
if math.isnan(float(a_np)):
assert math.isnan(float(a_chx))
else:
assert float(a_np) == float(a_chx)
# Cast to int
if should_cast_succeed(int):
assert type(int(a_chx)) is int
assert int(a_np) == int(a_chx)
# Cast to bool
if should_cast_succeed(bool):
assert type(bool(a_chx)) is bool
assert bool(a_np) == bool(a_chx)
# item()
item_actual = a_chx.item()
np_dtype = numpy.dtype(dtype)
item_expected = np_dtype.type(value).item()
assert isinstance(item_actual, type(item_expected))
assert (
(numpy.isnan(item_actual) and numpy.isnan(item_expected))
or item_actual == item_expected)
@pytest.mark.parametrize('shape', [
(0,), (1, 0), (2,), (1, 2), (2, 3),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_cast_scalar_invalid(device, shape):
dtype = chainerx.float32
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
float(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
int(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
bool(a)
a = chainerx.ones(shape, dtype)
with pytest.raises(chainerx.DimensionError):
a.item()
def test_to_device():
a = chainerx.ones((2,), chainerx.float32, device='native:0')
dst_device = chainerx.get_device('native:1')
b0 = a.to_device(dst_device) # by device instance
assert b0.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b0)
b1 = a.to_device('native:1') # by device name
assert b1.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b1)
b2 = a.to_device('native', 1) # by backend name and index
assert b2.device is dst_device
chainerx.testing.assert_array_equal_ex(a, b2)
def _check_to_numpy(a_np, a_chx, device, copy):
chainerx.testing.assert_array_equal_ex(a_chx, a_np, strides_check=False)
if a_np.size > 0:
# test buffer is shared or not
a_np.fill(1)
expected = not copy and device.backend.name == 'native'
actual = numpy.array_equal(a_np, chainerx.to_numpy(a_chx))
assert expected == actual
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy(shape, dtype, device, copy):
a_chx = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy_non_contiguous(shape, dtype, device, copy):
a_chx = array_utils.create_dummy_ndarray(chainerx, shape, dtype).T
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [True, False])
def test_to_numpy_positive_offset(device, copy):
a_chx = chainerx.arange(6).reshape(2, 3)[:, 1:]
a_np = chainerx.to_numpy(a_chx, copy)
_check_to_numpy(a_np, a_chx, device, copy)
def test_view(shape, dtype):
array = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
view = array.view()
chainerx.testing.assert_array_equal_ex(view, array)
assert view.device is chainerx.get_default_device()
# inplace modification
if array.size > 0:
array *= array
assert array._debug_flat_data == view._debug_flat_data
def test_view_must_not_share_properties():
array = chainerx.array([3.0], chainerx.float32)
view = array.view()
# Test preconditions
assert not array.is_grad_required()
assert not view.is_grad_required()
assert not array.is_backprop_required()
assert not view.is_backprop_required()
array.require_grad()
assert array.is_grad_required()
assert array.is_backprop_required()
assert not view.is_grad_required(
), 'A view must not share is_grad_required with the original array.'
assert not view.is_backprop_required(
), 'A view must not share is_backprop_required with the original array.'
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('copy', [False, True])
# TODO(beam2d): use fixtures.
@pytest.mark.parametrize(
'src_dtype',
['bool_', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'])
@pytest.mark.parametrize(
'dst_dtype',
['bool_', 'uint8', 'int8', 'int16', 'int32', 'int64', 'float16',
'float32', 'float64'])
def test_astype(xp, shape, device, copy, src_dtype, dst_dtype):
a = array_utils.create_dummy_ndarray(xp, shape, src_dtype)
# Casting negative value to unsigned int behaves different in CUDA
if device.name == 'cuda:0' and \
src_dtype in chainerx.testing.signed_dtypes and \
dst_dtype in chainerx.testing.unsigned_dtypes:
a = xp.maximum(a, 0)
b = a.astype(dst_dtype, copy=copy)
assert a is b if src_dtype == dst_dtype and not copy else a is not b
return b
def test_as_grad_stopped_copy(shape, float_dtype):
dtype = float_dtype
def check(array_a, array_b):
chainerx.testing.assert_array_equal_ex(
array_a, array_b, strides_check=False)
assert array_b.is_contiguous
# Check memory addresses only if >0 bytes are allocated
if array_a.size > 0:
assert (array_a._debug_data_memory_address
!= array_b._debug_data_memory_address)
# Stop gradients on all graphs
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
b = a.as_grad_stopped(copy=True)
check(a, b)
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
# Stop gradients on some graphs
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
a.require_grad(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
b = a.as_grad_stopped([bp1, bp2], copy=True)
check(a, b)
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_grad_required(bp3)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert b.is_backprop_required(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
def test_as_grad_stopped_view(shape, float_dtype):
dtype = float_dtype
# Stop gradients on all graphs
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
b = a.as_grad_stopped(copy=False)
chainerx.testing.assert_array_equal_ex(a, b)
assert b.device is a.device
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
# Stop gradients on some graphs
a = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
a.require_grad(bp1)
a.require_grad(bp2)
a.require_grad(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
b = a.as_grad_stopped([bp1, bp2], copy=False)
chainerx.testing.assert_array_equal_ex(a, b)
assert b.device is a.device
assert not b.is_grad_required(bp1)
assert not b.is_grad_required(bp2)
assert not b.is_grad_required(bp3)
assert not b.is_backprop_required(bp1)
assert not b.is_backprop_required(bp2)
assert b.is_backprop_required(bp3)
assert a.is_grad_required(bp1)
assert a.is_grad_required(bp2)
assert a.is_grad_required(bp3)
assert a.is_backprop_required(bp1)
assert a.is_backprop_required(bp2)
assert a.is_backprop_required(bp3)
def test_array_repr():
array = chainerx.array([], chainerx.bool_)
assert ('array([], shape=(0,), dtype=bool, '
'device=\'native:0\')' == str(array))
array = chainerx.array([False], chainerx.bool_)
assert ('array([False], shape=(1,), dtype=bool, '
'device=\'native:0\')' == str(array))
array = chainerx.array([[0, 1, 2], [3, 4, 5]], chainerx.int8)
assert ('array([[0, 1, 2],\n'
' [3, 4, 5]], shape=(2, 3), dtype=int8, '
'device=\'native:0\')') == str(array)
array = chainerx.array([[0, 1, 2], [3.25, 4, 5]], chainerx.float32)
assert ('array([[0. , 1. , 2. ],\n'
' [3.25, 4. , 5. ]], shape=(2, 3), dtype=float32, '
'device=\'native:0\')') == str(array)
def test_array_repr_default_backprop_id():
array = chainerx.array([3.0], chainerx.float32)
array.require_grad()
assert ('array([3.], shape=(1,), dtype=float32, device=\'native:0\', '
'backprop_ids=[\'<default>\'])' == str(array))
def test_array_repr_expired_backprop_id():
with chainerx.backprop_scope('bp1') as bp1:
array = chainerx.array([3.0], chainerx.float32)
array.require_grad(bp1)
assert ('array([3.], shape=(1,), dtype=float32, device=\'native:0\', '
'backprop_ids=[\'<expired>\'])' == str(array))
@pytest.mark.parametrize('backprop_args', [(None,), ()])
def test_array_require_grad_without_backprop_id(backprop_args):
array = chainerx.array([1, 1, 1], chainerx.float32)
assert not array.is_grad_required(*backprop_args)
assert not array.is_backprop_required(*backprop_args)
assert not array.is_backprop_required(chainerx.anygraph)
array.require_grad(*backprop_args)
assert array.is_grad_required(*backprop_args)
assert array.is_backprop_required(*backprop_args)
assert array.is_backprop_required(chainerx.anygraph)
# Repeated calls should not fail, but do nothing
array.require_grad(*backprop_args)
assert array.is_grad_required(*backprop_args)
assert array.is_backprop_required(*backprop_args)
assert array.is_backprop_required(chainerx.anygraph)
def test_array_require_grad_with_backprop_id():
array = chainerx.array([1, 1, 1], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1:
assert not array.is_backprop_required(bp1)
array.require_grad(bp1)
assert array.is_grad_required(bp1)
assert array.is_backprop_required(bp1)
# Repeated calls should not fail, but do nothing
array.require_grad(bp1)
assert array.is_grad_required(bp1)
assert array.is_backprop_required(bp1)
# keyword arguments
with chainerx.backprop_scope('bp2') as bp2:
assert not array.is_backprop_required(backprop_id=bp2)
array.require_grad(backprop_id=bp2)
assert array.is_grad_required(bp2)
assert array.is_grad_required(backprop_id=bp2)
assert array.is_backprop_required(bp2)
assert array.is_backprop_required(backprop_id=bp2)
# Repeated calls should not fail, but do nothing
array.require_grad(backprop_id=bp2)
assert array.is_grad_required(backprop_id=bp2)
assert array.is_backprop_required(backprop_id=bp2)
@pytest.mark.parametrize('backprop_args', [(None,), ()])
def test_array_grad_without_backprop_id(backprop_args):
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
with pytest.raises(chainerx.ChainerxError):
array.get_grad(*backprop_args)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, *backprop_args)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(*backprop_args)
# Gradient methods
array.require_grad().set_grad(grad, *backprop_args)
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(
*backprop_args)._debug_flat_data == grad._debug_flat_data
array.cleargrad(*backprop_args) # clear
assert array.get_grad(*backprop_args) is None
array.set_grad(grad, *backprop_args)
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(
*backprop_args)._debug_flat_data == grad._debug_flat_data
array.set_grad(None, *backprop_args) # clear
assert array.get_grad(*backprop_args) is None
# Gradient attributes
array.grad = grad
assert array.get_grad(*backprop_args) is not None
assert array.get_grad(*backprop_args) is array.grad
array.grad = None # clear
assert array.get_grad(*backprop_args) is None
def test_array_grad_with_backprop_id():
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1:
with pytest.raises(chainerx.ChainerxError):
array.get_grad(bp1)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, bp1)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(bp1)
array.require_grad(bp1).set_grad(grad, bp1)
assert array.get_grad(bp1) is not None
assert array.get_grad(bp1)._debug_flat_data == grad._debug_flat_data
array.cleargrad(bp1) # clear
assert array.get_grad(bp1) is None
# keyword arguments
with chainerx.backprop_scope('bp2') as bp2:
with pytest.raises(chainerx.ChainerxError):
array.get_grad(backprop_id=bp2)
with pytest.raises(chainerx.ChainerxError):
array.set_grad(grad, backprop_id=bp2)
with pytest.raises(chainerx.ChainerxError):
array.cleargrad(backprop_id=bp2)
array.require_grad(backprop_id=bp2).set_grad(grad, backprop_id=bp2)
assert array.get_grad(bp2) is not None
assert array.get_grad(backprop_id=bp2) is not None
assert array.get_grad(bp2)._debug_flat_data == grad._debug_flat_data
assert array.get_grad(
backprop_id=bp2)._debug_flat_data == grad._debug_flat_data
array.cleargrad(backprop_id=bp2) # clear
assert array.get_grad(bp2) is None
assert array.get_grad(backprop_id=bp2) is None
def test_array_grad_no_deepcopy():
dtype = chainerx.float32
array = chainerx.array([2, 5, 1], dtype)
grad = chainerx.array([5, 7, 8], dtype)
# Set grad
array.require_grad().set_grad(grad)
# Retrieve grad twice and assert they share the same underlying data
grad1 = array.get_grad()
grad2 = array.get_grad()
grad1 *= chainerx.array([2, 2, 2], dtype)
assert grad2._debug_flat_data == [
10, 14, 16], 'grad getter must not incur a copy'
def test_array_cleargrad():
dtype = chainerx.float32
array = chainerx.array([2, 5, 1], dtype)
grad = chainerx.array([5, 7, 8], dtype)
# Set grad, get it and save it
array.require_grad().set_grad(grad)
del grad
saved_grad = array.get_grad()
# Clear grad
array.cleargrad()
assert array.get_grad() is None
assert saved_grad._debug_flat_data == [
5, 7, 8], 'Clearing grad must not affect previously retrieved grad'
def test_array_grad_identity():
array = chainerx.array([1., 1., 1.], chainerx.float32)
grad = chainerx.array([0.5, 0.5, 0.5], chainerx.float32)
array.require_grad().set_grad(grad)
assert array.get_grad() is grad, (
'grad must preserve physical identity')
assert array.get_grad() is grad, (
'grad must preserve physical identity in repeated retrieval')
# array.grad and grad share the same data
grad += chainerx.array([2, 2, 2], chainerx.float32)
assert array.get_grad()._debug_flat_data == [
2.5, 2.5, 2.5], 'A modification to grad must affect array.grad'
array_grad = array.get_grad()
array_grad += chainerx.array([1, 1, 1], chainerx.float32)
assert grad._debug_flat_data == [
3.5, 3.5, 3.5], 'A modification to array.grad must affect grad'
def test_array_require_grad_multiple_graphs_forward():
x1 = chainerx.array([1, 1, 1], chainerx.float32)
x2 = chainerx.array([1, 1, 1], chainerx.float32)
with chainerx.backprop_scope('bp1') as bp1, \
chainerx.backprop_scope('bp2') as bp2, \
chainerx.backprop_scope('bp3') as bp3:
x1.require_grad(bp1)
x2.require_grad(bp2)
assert x1.is_grad_required(bp1)
assert x2.is_grad_required(bp2)
assert x1.is_backprop_required(bp1)
assert x2.is_backprop_required(bp2)
assert not x1.is_grad_required(bp2)
assert not x2.is_grad_required(bp1)
assert not x1.is_backprop_required(bp2)
assert not x2.is_backprop_required(bp1)
y = x1 * x2
assert not y.is_grad_required(bp1)
assert not y.is_grad_required(bp2)
assert y.is_backprop_required(bp1)
assert y.is_backprop_required(bp2)
# No unspecified graphs are generated
assert not y.is_backprop_required(None)
assert not y.is_backprop_required(bp3)
@pytest.mark.parametrize(
'invalid_shape,invalid_dtype,invalid_device',
[
(None, chainerx.float32, None),
((2, 1), None, None),
(None, None, 'native:1'),
])
def test_array_grad_invalid_grad(invalid_shape, invalid_dtype, invalid_device):
shape = (3, 1)
dtype = chainerx.float64
device = 'native:0'
array = chainerx.ones(shape, dtype, device=device)
array.require_grad()
grad_shape = shape if invalid_shape is None else invalid_shape
grad_dtype = dtype if invalid_dtype is None else invalid_dtype
grad_device = device if invalid_device is None else invalid_device
invalid_grad = chainerx.ones(
grad_shape, grad_dtype, device=grad_device)
with pytest.raises(chainerx.GradientError):
array.set_grad(invalid_grad)
with pytest.raises(chainerx.GradientError):
array.grad = invalid_grad
def test_array_backward():
with chainerx.backprop_scope('bp1') as bp1:
x1 = chainerx.array(
[1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
x2 = chainerx.array(
[1, 1, 1], chainerx.float32).require_grad(backprop_id=bp1)
y = x1 * x2
y.backward(backprop_id=bp1, enable_double_backprop=True)
gx1 = x1.get_grad(backprop_id=bp1)
x1.set_grad(None, backprop_id=bp1)
gx1.backward(backprop_id=bp1)
with pytest.raises(chainerx.ChainerxError):
gx1.get_grad(backprop_id=bp1)
@chainerx.testing.numpy_chainerx_array_equal(strides_check=False)
@pytest.mark.parametrize(
'value', [-1, 0, 1, 2, 2.3, float('inf'), float('nan')])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_fill(xp, shape, dtype, value, device):
a = xp.empty(shape, dtype)
a.fill(value)
return a
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'slice1', [(0, 30, 1), (30, 0, -1), (10, 40, 7), (40, 10, -7)])
@pytest.mark.parametrize(
'slice2', [(0, 50, 1), (50, 0, -1), (10, 40, 7), (40, 10, -7)])
def test_array_to_numpy_identity(device, slice1, slice2):
start1, end1, step1 = slice1
start2, end2, step2 = slice2
x = numpy.arange(1500).reshape((30, 50))[
start1:end1:step1, start2:end2:step2]
y = chainerx.array(x)
z = chainerx.to_numpy(y)
chainerx.testing.assert_array_equal_ex(x, y, strides_check=False)
chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize(
'slice1', [(0, 30, 1), (30, 0, -1), (10, 40, 7), (40, 10, -7)])
@pytest.mark.parametrize(
'slice2', [(0, 50, 1), (50, 0, -1), (10, 40, 7), (40, 10, -7)])
def test_asarray_to_numpy_identity(device, slice1, slice2):
start1, end1, step1 = slice1
start2, end2, step2 = slice2
x = numpy.arange(1500).reshape((30, 50))[
start1:end1:step1, start2:end2:step2]
y = chainerx.asarray(x)
z = chainerx.to_numpy(y)
chainerx.testing.assert_array_equal_ex(x, y)
chainerx.testing.assert_array_equal_ex(x, z, strides_check=False)
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_array_pickle(device):
arr = chainerx.array([1, 2], chainerx.float32, device=device)
s = pickle.dumps(arr)
del arr
arr2 = pickle.loads(s)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device is device
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
# TODO(niboshi): Add pickle test involving context destruction and re-creation
@pytest.mark.parametrize_device_name(['native:0', 'native:1', 'cuda:0'])
def test_array_pickle_device_name(device_name):
arr = chainerx.array([1, 2], chainerx.float32, device=device_name)
s = pickle.dumps(arr)
del arr
arr2 = pickle.loads(s)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device.name == device_name
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
# TODO(niboshi): Add deepcopy test with arbitrary context
@pytest.mark.parametrize_device(['native:0', 'native:1', 'cuda:0'])
def test_array_deepcopy(device):
arr = chainerx.array([1, 2], chainerx.float32, device=device)
arr2 = copy.deepcopy(arr)
assert isinstance(arr2, chainerx.ndarray)
assert arr2.device is device
assert arr2.dtype == chainerx.float32
chainerx.testing.assert_array_equal(
arr2,
chainerx.array([1, 2], chainerx.float32))
def test_is_chained():
arr = chainerx.array([1, 2], chainerx.float32)
with pytest.raises(chainerx.ChainerxError):
arr._is_chained()
arr.require_grad()
assert not arr._is_chained()
arr2 = 2 * arr
assert arr2._is_chained()
|
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
from collections import namedtuple
import logging
import numpy as np
import time
import ray
from ray.rllib.agents import Trainer, with_common_config
from ray.rllib.agents.es import optimizers, utils
from ray.rllib.agents.es.es_tf_policy import ESTFPolicy, rollout
from ray.rllib.env.env_context import EnvContext
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils import FilterManager
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
Result = namedtuple("Result", [
"noise_indices", "noisy_returns", "sign_noisy_returns", "noisy_lengths",
"eval_returns", "eval_lengths"
])
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
"action_noise_std": 0.01,
"l2_coeff": 0.005,
"noise_stdev": 0.02,
"episodes_per_batch": 1000,
"train_batch_size": 10000,
"eval_prob": 0.003,
"return_proc_mode": "centered_rank",
"num_workers": 10,
"stepsize": 0.01,
"observation_filter": "MeanStdFilter",
"noise_size": 250000000,
"report_length": 10,
# ARS will use Trainer's evaluation WorkerSet (if evaluation_interval > 0).
# Therefore, we must be careful not to use more than 1 env per eval worker
# (would break ESPolicy's compute_action method) and to not do obs-
# filtering.
"evaluation_config": {
"num_envs_per_worker": 1,
"observation_filter": "NoFilter"
},
})
# __sphinx_doc_end__
# yapf: enable
@ray.remote
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers."""
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable:
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, dim):
return np.random.randint(0, len(self.noise) - dim + 1)
@ray.remote
class Worker:
def __init__(self,
config,
policy_params,
env_creator,
noise,
worker_index,
min_task_runtime=0.2):
self.min_task_runtime = min_task_runtime
self.config = config
self.config.update(policy_params)
self.config["single_threaded"] = True
self.noise = SharedNoiseTable(noise)
env_context = EnvContext(config["env_config"] or {}, worker_index)
self.env = env_creator(env_context)
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(
self.env, config["model"])
_policy_class = get_policy_class(config)
self.policy = _policy_class(self.env.observation_space,
self.env.action_space, config)
@property
def filters(self):
return {DEFAULT_POLICY_ID: self.policy.observation_filter}
def sync_filters(self, new_filters):
for k in self.filters:
self.filters[k].sync(new_filters[k])
def get_filters(self, flush_after=False):
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
def rollout(self, timestep_limit, add_noise=True):
rollout_rewards, rollout_fragment_length = rollout(
self.policy,
self.env,
timestep_limit=timestep_limit,
add_noise=add_noise)
return rollout_rewards, rollout_fragment_length
def do_rollouts(self, params, timestep_limit=None):
# Set the network weights.
self.policy.set_flat_weights(params)
noise_indices, returns, sign_returns, lengths = [], [], [], []
eval_returns, eval_lengths = [], []
# Perform some rollouts with noise.
task_tstart = time.time()
while (len(noise_indices) == 0
or time.time() - task_tstart < self.min_task_runtime):
if np.random.uniform() < self.config["eval_prob"]:
# Do an evaluation run with no perturbation.
self.policy.set_flat_weights(params)
rewards, length = self.rollout(timestep_limit, add_noise=False)
eval_returns.append(rewards.sum())
eval_lengths.append(length)
else:
# Do a regular run with parameter perturbations.
noise_index = self.noise.sample_index(self.policy.num_params)
perturbation = self.config["noise_stdev"] * self.noise.get(
noise_index, self.policy.num_params)
# These two sampling steps could be done in parallel on
# different actors letting us update twice as frequently.
self.policy.set_flat_weights(params + perturbation)
rewards_pos, lengths_pos = self.rollout(timestep_limit)
self.policy.set_flat_weights(params - perturbation)
rewards_neg, lengths_neg = self.rollout(timestep_limit)
noise_indices.append(noise_index)
returns.append([rewards_pos.sum(), rewards_neg.sum()])
sign_returns.append(
[np.sign(rewards_pos).sum(),
np.sign(rewards_neg).sum()])
lengths.append([lengths_pos, lengths_neg])
return Result(
noise_indices=noise_indices,
noisy_returns=returns,
sign_noisy_returns=sign_returns,
noisy_lengths=lengths,
eval_returns=eval_returns,
eval_lengths=eval_lengths)
def get_policy_class(config):
if config["framework"] == "torch":
from ray.rllib.agents.es.es_torch_policy import ESTorchPolicy
policy_cls = ESTorchPolicy
else:
policy_cls = ESTFPolicy
return policy_cls
def validate_config(config):
if config["num_workers"] <= 0:
raise ValueError("`num_workers` must be > 0 for ES!")
if config["evaluation_config"]["num_envs_per_worker"] != 1:
raise ValueError(
"`evaluation_config.num_envs_per_worker` must always be 1 for "
"ES/ARS! To parallelize evaluation, increase "
"`evaluation_num_workers` to > 1.")
if config["evaluation_config"]["observation_filter"] != "NoFilter":
raise ValueError(
"`evaluation_config.observation_filter` must always be `NoFilter` "
"for ES/ARS!")
class ESTrainer(Trainer):
"""Large-scale implementation of Evolution Strategies in Ray."""
_name = "ES"
_default_config = DEFAULT_CONFIG
@override(Trainer)
def _init(self, config, env_creator):
validate_config(config)
env_context = EnvContext(config["env_config"] or {}, worker_index=0)
env = env_creator(env_context)
self._policy_class = get_policy_class(config)
self.policy = self._policy_class(
obs_space=env.observation_space,
action_space=env.action_space,
config=config)
self.optimizer = optimizers.Adam(self.policy, config["stepsize"])
self.report_length = config["report_length"]
# Create the shared noise table.
logger.info("Creating shared noise table.")
noise_id = create_shared_noise.remote(config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
logger.info("Creating actors.")
self._workers = [
Worker.remote(config, {}, env_creator, noise_id, idx + 1)
for idx in range(config["num_workers"])
]
self.episodes_so_far = 0
self.reward_list = []
self.tstart = time.time()
@override(Trainer)
def get_policy(self, policy=DEFAULT_POLICY_ID):
if policy != DEFAULT_POLICY_ID:
raise ValueError("ES has no policy '{}'! Use {} "
"instead.".format(policy, DEFAULT_POLICY_ID))
return self.policy
@override(Trainer)
def step(self):
config = self.config
theta = self.policy.get_flat_weights()
assert theta.dtype == np.float32
assert len(theta.shape) == 1
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["episodes_per_batch"], config["train_batch_size"])
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (len(all_noise_indices) == len(all_training_returns) ==
len(all_training_lengths))
self.episodes_so_far += num_episodes
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# Process the returns.
if config["return_proc_mode"] == "centered_rank":
proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)
else:
raise NotImplementedError(config["return_proc_mode"])
# Compute and take a step.
g, count = utils.batched_weighted_sum(
proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params)
for index in noise_indices),
batch_size=500)
g /= noisy_returns.size
assert (g.shape == (self.policy.num_params, ) and g.dtype == np.float32
and count == len(noise_indices))
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g +
config["l2_coeff"] * theta)
# Set the new weights in the local copy of the policy.
self.policy.set_flat_weights(theta)
# Store the rewards
if len(all_eval_returns) > 0:
self.reward_list.append(np.mean(eval_returns))
# Now sync the filters
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.observation_filter
}, self._workers)
info = {
"weights_norm": np.square(theta).sum(),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
}
reward_mean = np.mean(self.reward_list[-self.report_length:])
result = dict(
episode_reward_mean=reward_mean,
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info)
return result
@override(Trainer)
def compute_action(self, observation, *args, **kwargs):
action, _, _ = self.policy.compute_actions([observation], update=False)
if kwargs.get("full_fetch"):
return action[0], [], {}
return action[0]
@override(Trainer)
def _sync_weights_to_workers(self, *, worker_set=None, workers=None):
# Broadcast the new policy weights to all evaluation workers.
assert worker_set is not None
logger.info("Synchronizing weights to evaluation workers.")
weights = ray.put(self.policy.get_flat_weights())
worker_set.foreach_policy(
lambda p, pid: p.set_flat_weights(ray.get(weights)))
@override(Trainer)
def cleanup(self):
# workaround for https://github.com/ray-project/ray/issues/1516
for w in self._workers:
w.__ray_terminate__.remote()
def _collect_results(self, theta_id, min_episodes, min_timesteps):
num_episodes, num_timesteps = 0, 0
results = []
while num_episodes < min_episodes or num_timesteps < min_timesteps:
logger.info(
"Collected {} episodes {} timesteps so far this iter".format(
num_episodes, num_timesteps))
rollout_ids = [
worker.do_rollouts.remote(theta_id) for worker in self._workers
]
# Get the results of the rollouts.
for result in ray.get(rollout_ids):
results.append(result)
# Update the number of episodes and the number of timesteps
# keeping in mind that result.noisy_lengths is a list of lists,
# where the inner lists have length 2.
num_episodes += sum(len(pair) for pair in result.noisy_lengths)
num_timesteps += sum(
sum(pair) for pair in result.noisy_lengths)
return results, num_episodes, num_timesteps
def __getstate__(self):
return {
"weights": self.policy.get_flat_weights(),
"filter": self.policy.observation_filter,
"episodes_so_far": self.episodes_so_far,
}
def __setstate__(self, state):
self.episodes_so_far = state["episodes_so_far"]
self.policy.set_flat_weights(state["weights"])
self.policy.observation_filter = state["filter"]
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.observation_filter
}, self._workers)
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for enwik8 data-set."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import zipfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
def _maybe_download_corpus(tmp_dir):
"""Download and unpack the corpus.
Args:
tmp_dir: directory containing dataset.
Returns:
path to entire corpus as a text file.
"""
corpus_url = "http://mattmahoney.net/dc/enwik8.zip"
corpus_filename = os.path.basename(corpus_url)
compressed_filepath = generator_utils.maybe_download(
tmp_dir, corpus_filename, corpus_url)
zip_ref = zipfile.ZipFile(compressed_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return os.path.join(tmp_dir, "enwik8")
@registry.register_problem
class Enwik8L65k(text_problems.Text2SelfProblem):
"""Enwiki8, with examples up to 65,536 characters long."""
READ_MODE = "r"
DUPE_FACTOR = 4
@property
def is_generate_per_split(self):
return True
@property
def vocab_type(self):
return text_problems.VocabType.CHARACTER
def global_task_id(self):
return problem.TaskID.EN_CHR
@property
def dataset_splits(self):
"""Splits of data to produce and number of output shards for each."""
return [{
"split": problem.DatasetSplit.TRAIN,
"shards": 16,
}, {
"split": problem.DatasetSplit.EVAL,
"shards": 1,
}, {
"split": problem.DatasetSplit.TEST,
"shards": 1,
}]
def max_length(self, model_hparams):
return self.sequence_length
@property
def sequence_length(self):
"""Length of each example (number of characters)."""
return 65536
def generate_samples(self, data_dir, tmp_dir, dataset_split):
filepath = _maybe_download_corpus(tmp_dir)
with tf.io.gfile.GFile(filepath, mode=self.READ_MODE) as f:
data = f.read()
tf.logging.info("Length of enwik8 = %d", len(data))
num_test_chars = 5000000
if dataset_split == problem.DatasetSplit.TRAIN:
part = data[: -2 * num_test_chars]
elif dataset_split == problem.DatasetSplit.EVAL:
part = data[-2 * num_test_chars: -num_test_chars]
elif dataset_split == problem.DatasetSplit.TEST:
part = data[-num_test_chars:]
else:
raise ValueError("Undefined dataset_split")
tf.logging.info("Length of split '%s' = %d", dataset_split, len(part))
# TODO(kitaev): Better handling of evaluation data, to ensure that there is
# always context available.
if dataset_split == problem.DatasetSplit.TRAIN:
offset = self.sequence_length // self.DUPE_FACTOR
for start in range(0, len(part), offset):
yield {"targets": part[start:start+self.sequence_length]}
else:
for start in range(0, len(part), self.sequence_length):
yield {"targets": part[start:start+self.sequence_length]}
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
vocab = self.get_or_create_vocab(data_dir, tmp_dir)
for sample in generator:
sample["targets"] = vocab.encode(sample["targets"])
yield sample
@registry.register_problem
class Enwik8L2k(Enwik8L65k):
"""Enwiki8, with examples up to 2048 characters long.
Reads the input byte-wise and chunks it into fragments of maximum
length of 2048. Does not shift byte indices (we do not assume cls or
pad are used), unlike the base class!
"""
READ_MODE = "rb"
@property
def sequence_length(self):
"""Length of each example (number of characters)."""
return 2048
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
return self.generate_samples(data_dir, tmp_dir, dataset_split)
@registry.register_problem
class Enwik8L32k(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 32768
@registry.register_problem
class Enwik8L16k(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 16384
@registry.register_problem
class Enwik8L8k(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 8192
@registry.register_problem
class Enwik8L4k(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 4096
@registry.register_problem
class Enwik8L1k(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 1024
@registry.register_problem
class Enwik8L512(Enwik8L2k):
@property
def sequence_length(self):
"""Length of each example (in tokens)."""
return 512
|
|
#!/usr/bin/python
#
# Author: Jarkko Vatjus-Anttila <jvatjusanttila@gmail.com>
#
# For conditions of distribution and use, see copyright notice in license.txt
#
import os
import sys
##########################################################################
# Class MaterialContainer
#
class MaterialContainer():
def __init__(self):
self.reset()
def reset(self):
self.materials = []
def addMaterial(self, material):
self.materials.append(material)
def getMaterials(self):
return self.materials
def toFile(self, filename, overwrite=False, append=True, LOD=5):
for m in self.materials:
m.toFile(filename, overwrite=overwrite, append=append, LOD=LOD)
def fromFile(self, filename):
self.reset()
try: f = open(filename, "r")
except IOError:
print "MaterialContainer: Error opening file %s" % filename
return
material = None
lastObject = ""
while 1:
line = f.readline()
if len(line) == 0: break
l = line.strip().split(" ")
try: key = l[0]
except IOError: continue
try: name = l[1].strip()
except IndexError: name = ""
if key == "material":
material = Material(l[1])
self.materials.append(material)
lastObject = key
elif key == "technique":
material.addTechnique(name)
lastObject = key
elif key == "pass":
material.addPass(name)
lastObject = key
elif key == "vertex_program_ref":
material.addVertexprogram(name)
lastObject = key
elif key == "fragment_program_ref":
material.addFragmentprogram(name)
lastObject = key
elif key == "texture_unit":
material.addTextureunit(name)
lastObject = key
elif key == "{": pass
elif key == "}":
lastObject = ""
elif key.startswith("//"): pass
else:
# regular parameters into last defined unit
d = { key:" ".join(l[1:]) }
if lastObject == "pass":
material.addPassParameters(d)
elif lastObject == "material":
material.addMaterialParameters(d)
elif lastObject == "technique":
material.addTechniqueParameters(d)
elif lastObject == "texture_unit":
material.addTextureunitParameters(d)
elif lastObject == "fragment_program_ref":
material.addFragmentprogramParameters(d)
elif lastObject == "vertex_program_ref":
material.addVertexprogramParameters(d)
##########################################################################
# Class Material
#
class Material():
def __init__(self, name):
self.reset(name)
def reset(self, name):
self.name = name
self.techniques = []
self.currenttechnique = None
self.indent = 0
self.currentparams = {}
self.lod1_params = [ ]
self.lod2_params = [ ]
self.lod3_params = [ ]
self.lod4_params = [ ]
self.lod5_params = [ "receive_shadows" ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
##########################################################################
# Subclasses for the material definition
# - Technique
# - Pass
# - TextureUnit
# - Vertexshader
# - Fragmentshader
#
class Technique():
def __init__(self, parentmaterial, name=""):
self.parentmaterial = parentmaterial
self.passes = []
self.currentpass = None
self.name = name
self.currentparams = {}
self.lod1_params = [ ]
self.lod2_params = [ ]
self.lod3_params = [ ]
self.lod4_params = [ ]
self.lod5_params = [ ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
def addPass(self, p):
self.passes.append(p)
self.currentpass = p
def addPassParameters(self, d):
self.currentpass.addPassParameters(d)
def addTextureunit(self, t):
self.currentpass.addTextureunit(t)
def addTextureunitParameters(self, d):
self.currentpass.addTextureunitParameters(d)
def addVertexprogram(self, vp):
self.currentpass.addVertexprogram(vp)
def addVertexprogramParameters(self, d):
self.currentpass.addVertexprogramParameters(d)
def addFragmentprogram(self, fp):
self.currentpass.addFragmentprogram(fp)
def addFragmentprogramParameters(self, d):
self.currentpass.addFragmentprogramParameters(d)
def addTechniqueParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Warning: Trying to set param '%s' for current Technique, but it is not a valid parameter" % key
##########################################################################
# Technique output methods
#
def startTechnique(self):
self.parentmaterial.writeMaterialString("technique %s" % self.name)
self.parentmaterial.writeMaterialString("{")
self.parentmaterial.increaseIndent()
def endTechnique(self):
self.parentmaterial.decreaseIndent()
self.parentmaterial.writeMaterialString("}")
def outputTechnique(self, LOD=5):
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.parentmaterial.writeMaterialString("%s %s" % (key, value))
for p in self.passes:
p.startPass()
p.outputPass(LOD=LOD)
p.endPass()
##########################################################################
# Pass Subclass
#
class Pass():
def __init__(self, parentmaterial, name=""):
self.name = name
self.parentmaterial = parentmaterial
self.textureunits = []
self.vertexprograms = []
self.fragmentprograms = []
self.currentparams = {}
self.currenttextureunit = None
self.currentvertexprogram = None
self.currentfragmentprogram = None
self.lod1_params = [ "ambient", "diffuse", "cull_hardware", "depth_check", "depth_func", "colour_write" ]
self.lod2_params = [ "specular", "emissive", "polygon_mode", "shading", "alpha_to_coverage" ]
self.lod3_params = [ "scene_blend", "depth_write", "transparent_sorting", "illumination_stage" ]
self.lod4_params = [ "lighting", "alpha_rejection", "iteration", "scene_blend_op", "normalise_normals" ]
self.lod5_params = [ "cull_software", "fog_override", "light_scissor", "light_clip_planes" ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
def addPassParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Warning: Trying to set param '%s' for current Pass, but it is not a valid parameter" % key
def addTextureunit(self, t_unit):
self.textureunits.append(t_unit)
self.currenttextureunit = t_unit
def addTextureunitParameters(self, d):
self.currenttextureunit.addTextureunitParameters(d)
def addVertexprogram(self, vp):
self.vertexprograms.append(vp)
self.currentvertexprogram = vp
def addVertexprogramParameters(self, d):
self.currentvertexprogram.addVertexprogramParameters(d)
def addFragmentprogram(self, fp):
self.fragmentprograms.append(fp)
self.currentfragmentprogram = fp
def addFragmentprogramParameters(self, d):
self.currentfragmentprogram.addFragmentprogramParameters(d)
def startPass(self):
self.parentmaterial.writeMaterialString("pass %s" % self.name)
self.parentmaterial.writeMaterialString("{")
self.parentmaterial.increaseIndent()
def endPass(self):
self.parentmaterial.decreaseIndent()
self.parentmaterial.writeMaterialString("}")
def outputPass(self, LOD=5):
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.parentmaterial.writeMaterialString("%s %s" % (key, value))
for vp in self.vertexprograms:
vp.startVertexprogram()
vp.outputVertexprogram(LOD=LOD)
vp.endVertexprogram()
for fp in self.fragmentprograms:
fp.startFragmentprogram()
fp.outputFragmentprogram(LOD=LOD)
fp.endFragmentprogram()
for t in self.textureunits:
t.startTextureunit()
t.outputTextureunit(LOD=LOD)
t.endTextureunit()
##########################################################################
# Vertexprogram Subclass
#
class Vertexprogram():
def __init__(self, parentmaterial, name=""):
self.name = name
self.parentmaterial = parentmaterial
self.currentparams = {}
self.lod1_params = [ "param_named", "param_named_auto" ]
self.lod2_params = [ ]
self.lod3_params = [ ]
self.lod4_params = [ ]
self.lod5_params = [ ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
def addVertexprogramParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Trying to set param '%s' for current Vertexprogram, but it is not a valid parameter" % key
def startVertexprogram(self):
self.parentmaterial.writeMaterialString("vertex_program_ref %s" % self.name)
self.parentmaterial.writeMaterialString("{")
self.parentmaterial.increaseIndent()
def endVertexprogram(self):
self.parentmaterial.decreaseIndent()
self.parentmaterial.writeMaterialString("}")
def outputVertexprogram(self, LOD=5):
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.parentmaterial.writeMaterialString("%s %s" % (key, value))
##########################################################################
# Fragmentprogram Subclass
#
class Fragmentprogram():
def __init__(self, parentmaterial, name=""):
self.name = name
self.parentmaterial = parentmaterial
self.currentparams = {}
self.lod1_params = [ "param_named", "param_named_auto" ]
self.lod2_params = [ ]
self.lod3_params = [ ]
self.lod4_params = [ ]
self.lod5_params = [ ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
def addFragmentprogramParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Trying to set param '%s' for current Fragmentprogram, but it is not a valid parameter" % key
def startFragmentprogram(self):
self.parentmaterial.writeMaterialString("fragment_program_ref %s" % self.name)
self.parentmaterial.writeMaterialString("{")
self.parentmaterial.increaseIndent()
def endFragmentprogram(self):
self.parentmaterial.decreaseIndent()
self.parentmaterial.writeMaterialString("}")
def outputFragmentprogram(self, LOD=5):
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.parentmaterial.writeMaterialString("%s %s" % (key, value))
##########################################################################
# Textureunit Subclass
#
class Textureunit():
def __init__(self, parentmaterial, name=""):
self.parentmaterial = parentmaterial
self.name = name
self.currentparams = {}
self.lod1_params = [ "texture", "texture_alias", "content_type", "scale" ]
self.lod2_params = [ "wave_xform", "colour_op", "tex_coord_set" ]
self.lod3_params = [ "tex_address_mode", "filtering", "cubic_texture" ]
self.lod4_params = [ "rotate_anim", "cubic_texture" ]
self.lod5_params = [ "scroll_anim", "alpha_op_ex", "colour_op_ex", "env_map" ]
self.all_params = " ".join(self.lod1_params)
self.all_params += (" " + " ".join(self.lod2_params))
self.all_params += (" " + " ".join(self.lod3_params))
self.all_params += (" " + " ".join(self.lod4_params))
self.all_params += (" " + " ".join(self.lod5_params))
def addTextureunitParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Trying to set param '%s' for current Texture_unit, but it is not a valid parameter" % key
def startTextureunit(self):
self.parentmaterial.writeMaterialString("texture_unit %s" % self.name)
self.parentmaterial.writeMaterialString("{")
self.parentmaterial.increaseIndent()
def endTextureunit(self):
self.parentmaterial.decreaseIndent()
self.parentmaterial.writeMaterialString("}")
def outputTextureunit(self, LOD=5):
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.parentmaterial.writeMaterialString("%s %s" % (key, value))
##########################################################################
# Material class private methods
#
def __startMaterial(self):
self.writeMaterialString("material %s" % self.name)
self.writeMaterialString("{")
self.increaseIndent()
def __endMaterial(self):
self.decreaseIndent()
self.writeMaterialString("}")
def writeMaterialString(self, string):
s = ""
for i in range(self.indent): s += " "
self.file.write(s + string + "\n")
def increaseIndent(self):
self.indent += 1
def decreaseIndent(self):
self.indent -= 1
##########################################################################
# Material generator API
#
def toFile(self, filename, overwrite=False, append=False, LOD=5):
if os.path.exists(filename):
if overwrite == False and append == False:
sys.stderr.write("MaterialGenerator: ERROR: output file '%s' already exists!\n" % filename)
return
elif overwrite == True:
os.remove(filename)
filemode = "w"
if append == True: filemode = "a"
try: self.file = open(filename, filemode)
except IOError:
sys.stderr.write("MaterialGenerator: ERROR: Unable to open file '%s' for writing!" % filename)
return
self.__startMaterial()
valid = " ".join(self.lod1_params)
if LOD >= 2: valid += (" " + " ".join(self.lod2_params))
if LOD >= 3: valid += (" " + " ".join(self.lod3_params))
if LOD >= 4: valid += (" " + " ".join(self.lod4_params))
if LOD >= 5: valid += (" " + " ".join(self.lod5_params))
for key, value in self.currentparams.items():
if key in valid:
self.writeMaterialString("%s %s" % (key, value))
for t in self.techniques:
t.startTechnique()
t.outputTechnique(LOD=LOD)
t.endTechnique()
self.__endMaterial()
self.file.close()
def addMaterialParameters(self, d):
for key, value in d.items():
if key == "": continue # Suppress cosmetic warning
if key in self.all_params:
self.currentparams[key] = value
else:
print "Warning: Trying to set param '%s' for current Material, but it is not a valid parameter" % key
def addTechnique(self, name=""):
t = Material.Technique(self, name=name)
self.techniques.append(t)
self.currenttechnique = t
def addTechniqueParameters(self, d={}):
self.currenttechnique.addTechniqueParameters(d)
def addPass(self, name=""):
p = Material.Pass(self)
self.currenttechnique.addPass(p)
def addPassParameters(self, d={}):
self.currenttechnique.addPassParameters(d)
def addTextureunit(self, name=""):
t = Material.Textureunit(self, name=name)
self.currenttechnique.addTextureunit(t)
def addTextureunitParameters(self, d={}):
self.currenttechnique.addTextureunitParameters(d)
def addVertexprogram(self, name=""):
vp = Material.Vertexprogram(self, name=name)
self.currenttechnique.addVertexprogram(vp)
def addVertexprogramParameters(self, d={}):
self.currenttechnique.addVertexprogramParameters(d)
def addFragmentprogram(self, name=""):
fp = Material.Fragmentprogram(self, name=name)
self.currenttechnique.addFragmentprogram(fp)
def addFragmentprogramParameters(self, d={}):
self.currenttechnique.addFragmentprogramParameters(d)
##########################################################################
# Material generator pre-defined macros for simple generation of certain
# types of materials. Parameters in these macros are limited. If it seems
# too restricting for you, then use the above API to generate more custom
# materials
#
def createMaterial_Diffuseonly(self, name, diffusecolor="1.0 1.0 1.0"):
self.reset(name)
self.addTechnique()
self.addPass()
self.addPassParameters({"diffuse":diffusecolor})
def createMaterial_Textureonly(self, name, texture, diffusecolor="1.0 1.0 1.0", ambientcolor="0.5 0.5 0.5"):
self.reset(name)
self.addTechnique()
self.addPass()
self.addPassParameters({"diffuse":diffusecolor, "ambient":ambientcolor})
self.addTextureunit(texture)
self.addTextureunitParameters({"texture":texture})
def createMaterial_4channelTerrain(self, name, t1, t2, t3, t4, weightmap):
self.reset(name)
self.addTechnique("TerrainPCF")
self.addPass()
self.addPassParameters({"ambient":"0.0 0.0 0.0 1.0"})
self.addVertexprogram("Rex/TerrainPCFVS_weighted")
self.addFragmentprogram("Rex/TerrainPCFFS_weighted")
self.addTextureunit("weights")
self.addTextureunitParameters({"texture_alias":"weights", "texture":weightmap})
self.addTextureunit("detail0")
self.addTextureunitParameters({"texture_alias":"detail0", "texture":t1})
self.addTextureunit("detail1")
self.addTextureunitParameters({"texture_alias":"detail1", "texture":t2})
self.addTextureunit("detail2")
self.addTextureunitParameters({"texture_alias":"detail2", "texture":t3})
self.addTextureunit("detail3")
self.addTextureunitParameters({"texture_alias":"detail3", "texture":t4})
self.addTextureunit("shadowMap0")
self.addTextureunitParameters({"texture_alias":"shadowMap0", "tex_address_mode":"clamp", "content_type":"shadow"})
self.addTextureunit("shadowMap1")
self.addTextureunitParameters({"texture_alias":"shadowMap1", "tex_address_mode":"clamp", "content_type":"shadow"})
self.addTextureunit("shadowMap2")
##########################################################################
# Material unit testacase
#
if __name__ == "__main__":
m = Material("testmaterial")
m.addTechnique()
m.addPass()
m.addPassParameters({"ambient":"0.5 0.5 0.5", "diffuse":"1.0 1.0 1.0"})
m.addTextureunit()
m.addTextureunitParameters({"texture":"image.png", "scroll_anim":"0.1 0.0", "wave_xform":"scale sine 0.0 0.7 0.0 1.0"})
m.addTextureunit()
m.addTextureunitParameters({"texture":"wobbly.png", "rotate_anim":"0.25", "colour_op":"add"})
m.toFile("./resources/testmaterial.material", overwrite=True)
m.createMaterial_4channelTerrain("terrainsample", "t1.png", "t2.png", "t3.png", "t4.png", "weight.png")
m.toFile("./resources/4channelterrainsample.material", overwrite=True)
m.createMaterial_Diffuseonly("diffuse")
m.toFile("./resources/diffuseonly.material", overwrite=True)
m.createMaterial_Textureonly("textureonly", "tex.png")
m.toFile("./resources/textureonly.material", overwrite=True)
mc = MaterialContainer()
mc.fromFile("./resources/terrainsample.material")
mc.toFile("./resources/terrainsample2.material", overwrite=True, append=True, LOD=5)
mc.fromFile("./resources/twinmaterial.material")
mc.toFile("./resources/twinmaterial2.material", overwrite=False, append=True, LOD=5)
print "Done"
|
|
import six
import pytest
import verta
from verta._protos.public.modeldb import ExperimentRunService_pb2 as _ExperimentRunService
OPERATORS = six.viewkeys(verta.tracking.entities.ExperimentRuns._OP_MAP)
class TestFind:
def test_reject_unsupported_keys(self, client, floats):
# known unsupported keys
all_keys = {
attr
for attr
in _ExperimentRunService.ExperimentRun.__dict__.keys()
if not attr[0].isupper()
and not attr.startswith('_')
}
unsupported_keys = all_keys - verta.tracking.entities.ExperimentRuns._VALID_QUERY_KEYS
proj = client.set_project()
expt = client.set_experiment()
for _ in range(3):
client.set_experiment_run()
for expt_runs in (proj.expt_runs, expt.expt_runs):
for key in unsupported_keys:
for op, val in zip(OPERATORS, floats):
with pytest.raises(ValueError):
expt_runs.find("{} {} {}".format(key, op, val))
def test_reject_random_keys(self, client, strs, floats):
proj = client.set_project()
expt = client.set_experiment()
for _ in range(3):
client.set_experiment_run()
for expt_runs in (proj.expt_runs, expt.expt_runs):
for key in strs:
for op, val in zip(OPERATORS, floats):
with pytest.raises(ValueError):
expt_runs.find("{} {} {}".format(key, op, val))
def test_id(self, client):
proj = client.set_project()
client.set_experiment()
runs = [client.set_experiment_run() for _ in range(3)]
for run_id in (run.id for run in runs):
result = proj.expt_runs.find("id == '{}'".format(run_id))
assert len(result) == 1
assert result[0].id == run_id
def test_project_id(self, client):
proj = client.set_project()
client.set_experiment()
runs = [client.set_experiment_run() for _ in range(3)]
client.set_experiment()
runs.extend([client.set_experiment_run() for _ in range(3)])
result = proj.expt_runs.find("project_id == '{}'".format(proj.id))
assert set(run.id for run in result) == set(run.id for run in runs)
def test_experiment_id(self, client):
proj = client.set_project()
client.set_experiment()
[client.set_experiment_run() for _ in range(3)]
expt = client.set_experiment()
runs = [client.set_experiment_run() for _ in range(3)]
result = proj.expt_runs.find("experiment_id == '{}'".format(expt.id))
assert set(run.id for run in result) == set(run.id for run in runs)
def test_name(self, client):
proj = client.set_project()
run = client.set_experiment_run()
# no quotes around value!
result = proj.expt_runs.find("name == {}".format(run.name))
assert len(result) == 1
assert result[0].id == run.id
@pytest.mark.skip(reason="not implemented")
def test_date_created(self, client):
key = "date_created"
@pytest.mark.skip(reason="not implemented")
def test_date_updated(self, client):
key = "date_updated"
@pytest.mark.skip(reason="not implemented")
def test_start_time(self, client):
key = "start_time"
@pytest.mark.skip(reason="not implemented")
def test_end_time(self, client):
key = "end_time"
@pytest.mark.skip(reason="not implemented")
def test_tags(self, client, strs):
tags = strs[:5]
proj = client.set_project()
client.set_experiment()
for i in range(1, len(tags)+1):
client.set_experiment_run(tags=tags[:i])
expt_runs = proj.expt_runs
for tag in tags:
# contains tag
result = expt_runs.find("tags == '{}'".format(tag))
runs = [run for run in expt_runs if tag in run.get_tags()]
assert set(run.id for run in result) == set(run.id for run in runs)
# does not contain tag
result = expt_runs.find("tags != '{}'".format(tag))
runs = [run for run in expt_runs if tag not in run.get_tags()]
assert set(run.id for run in result) == set(run.id for run in runs)
@pytest.mark.skip(reason="not implemented")
def test_attributes(self, client):
key = "attributes"
def test_metrics_and_hyperparameters(self, client, strs, bools, floats):
proj = client.set_project()
client.set_experiment()
metric_vals = [floats.pop() for _ in range(5)]
hyperparam_vals = list(reversed([floats.pop() for _ in range(5)]))
for metric_val, hyperparam_val in zip(metric_vals, hyperparam_vals):
run = client.set_experiment_run()
run.log_metric('val', metric_val)
run.log_hyperparameter('val', hyperparam_val)
expt_runs = proj.expt_runs
threshold = int(metric_vals[len(metric_vals)//2])
local_filtered_run_ids = set(run.id for run in expt_runs if run.get_metric('val') >= threshold)
backend_filtered_run_ids = set(run.id for run in expt_runs.find("metrics.val >= {}".format(threshold)))
assert local_filtered_run_ids == backend_filtered_run_ids
threshold = int(hyperparam_vals[len(hyperparam_vals)//2])
local_filtered_run_ids = set(run.id for run in expt_runs if run.get_hyperparameter('val') >= threshold)
backend_filtered_run_ids = set(run.id for run in expt_runs.find("hyperparameters.val >= {}".format(threshold)))
assert local_filtered_run_ids == backend_filtered_run_ids
def test_negative_values(self, client):
"""There was a bug that rejected negative numbers as values."""
proj = client.set_project()
client.set_experiment()
for val in range(-6, 0):
client.set_experiment_run().log_metric('val', val)
expt_runs = proj.expt_runs
threshold = -3
local_filtered_run_ids = set(run.id for run in expt_runs if run.get_metric('val') >= threshold)
backend_filtered_run_ids = set(run.id for run in expt_runs.find("metrics.val >= {}".format(threshold)))
assert local_filtered_run_ids == backend_filtered_run_ids
class TestSort:
@pytest.mark.skip("back end sorts numbers lexicographically")
def test_metrics_and_hyperparameters(self, client, floats):
proj = client.set_project()
client.set_experiment()
metric_vals = [floats.pop() for _ in range(5)]
hyperparam_vals = list(reversed([floats.pop() for _ in range(5)]))
for metric_val, hyperparam_val in zip(metric_vals, hyperparam_vals):
run = client.set_experiment_run()
run.log_metric('val', metric_val)
run.log_hyperparameter('val', hyperparam_val)
expt_runs = proj.expt_runs
# by metric
sorted_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_metric('val'))
]
for run_id, run in zip(sorted_run_ids, expt_runs.sort("metrics.val")):
assert run_id == run.id
# by hyperparameter, descending
sorted_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_hyperparameter('val'),
reverse=True)
]
for run_id, run in zip(sorted_run_ids, expt_runs.sort("hyperparameters.val", descending=True)):
assert run_id == run.id
class TestChain:
def test_chain(self, client):
client.set_project()
expt = client.set_experiment()
for acc, loss in zip(range(6), reversed(range(6))):
run = client.set_experiment_run()
run.log_metric('acc', acc)
run.log_metric('loss', loss)
# chain *_k()
runs = expt.expt_runs.bottom_k("metrics.acc", 4).top_k("metrics.acc", 2)
assert [run.get_metric('acc') for run in runs] == [3, 2]
# *_k() overrides prior sort()
runs = expt.expt_runs.sort('metrics.loss').top_k("metrics.acc", 2)
assert [run.get_metric('acc') for run in runs] == [5, 4]
runs = expt.expt_runs.sort('metrics.loss', descending=True).top_k("metrics.acc", 2)
assert [run.get_metric('acc') for run in runs] == [5, 4]
# sort() overrides prior sort()
runs = expt.expt_runs.sort('metrics.loss').sort("metrics.acc")
assert [run.get_metric('acc') for run in runs] == [0, 1, 2, 3, 4, 5]
runs = expt.expt_runs.sort('metrics.acc').sort("metrics.loss")
assert [run.get_metric('loss') for run in runs] == [0, 1, 2, 3, 4, 5]
class TestTopK:
@pytest.mark.skip("back end sorts numbers lexicographically")
def test_metrics_and_hyperparameters(self, client, floats):
k = 3
proj = client.set_project()
client.set_experiment()
metric_vals = [floats.pop() for _ in range(5)]
hyperparam_vals = list(reversed([floats.pop() for _ in range(5)]))
for metric_val, hyperparam_val in zip(metric_vals, hyperparam_vals):
run = client.set_experiment_run()
run.log_metric('val', metric_val)
run.log_hyperparameter('val', hyperparam_val)
expt_runs = proj.expt_runs
# by metric
top_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_metric('val'),
reverse=True)
][:k]
for run_id, run in zip(top_run_ids, expt_runs.top_k("metrics.val", k)):
assert run_id == run.id
# by hyperparameter
top_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_hyperparameter('val'),
reverse=True)
][:k]
for run_id, run in zip(top_run_ids, expt_runs.top_k("hyperparameters.val", k)):
assert run_id == run.id
class TestBottomK:
@pytest.mark.skip("back end sorts numbers lexicographically")
def test_metrics_and_hyperparameters(self, client, floats):
k = 3
proj = client.set_project()
client.set_experiment()
metric_vals = [floats.pop() for _ in range(5)]
hyperparam_vals = list(reversed([floats.pop() for _ in range(5)]))
for metric_val, hyperparam_val in zip(metric_vals, hyperparam_vals):
run = client.set_experiment_run()
run.log_metric('val', metric_val)
run.log_hyperparameter('val', hyperparam_val)
expt_runs = proj.expt_runs
# by metric
bottom_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_metric('val'))
][:k]
for run_id, run in zip(bottom_run_ids, expt_runs.bottom_k("metrics.val", k)):
assert run_id == run.id
# by hyperparameter
bottom_run_ids = [
run.id
for run in sorted(expt_runs,
key=lambda run: run.get_hyperparameter('val'))
][:k]
for run_id, run in zip(bottom_run_ids, expt_runs.bottom_k("hyperparameters.val", k)):
assert run_id == run.id
|
|
#!/usr/bin/python
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import json
import struct
import re
import os
import os.path
import base64
import httplib
import sys
import hashlib
import datetime
import time
from collections import namedtuple
settings = {}
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
def calc_hdr_hash(blk_hdr):
hash1 = hashlib.sha256()
hash1.update(blk_hdr)
hash1_o = hash1.digest()
hash2 = hashlib.sha256()
hash2.update(hash1_o)
hash2_o = hash2.digest()
return hash2_o
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
lastDate = blkDate
if outF:
outF.close()
if setFileTime:
os.utime(outFname, (int(time.time()), highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
outFname = self.settings['output_file']
else:
outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + outFname)
self.outF = open(outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + inMagic.encode('hex'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
hash_str = calc_hash_str(blk_hdr)
if not hash_str in blkmap:
print("Skipping unknown block " + hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'netmagic' not in settings:
settings['netmagic'] = 'f9beb4d9'
if 'genesis' not in settings:
settings['genesis'] = '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000L * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
settings['max_out_sz'] = long(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = settings['netmagic'].decode('hex')
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
|
|
import maya.cmds as mc
def findAllModules(sDir):
#find the available modules in the path
# returns: list of modules excluding the *.py
aModuleList = []
aAllPyFiles = findAllFiles(sDir, '.py')
for file in aAllPyFiles:
if file != '__init__':
aModuleList.append(file)
return aModuleList
def findAllModuleNames(sDir):
# take list of available modules and
# return list of touples: (moduleName, module.CLASS_NAME)
validModules = findAllModules(sDir)
validModuleNames = []
packageDir = sDir.partition('/Modules/')[2]
for module in validModules:
mod = __import__(packageDir+'.' + module, {}, {}, [module])
reload(mod)
validModuleNames.append(mod.CLASS_NAME)
return(validModules, validModuleNames)
def findAllFiles(sDir, sFileExtension):
# Search the given directory for all files with given file extension
# returns: list of file names excluding the extension
import os
sFileDirectory = os.environ['RIGGING_TOOL_ROOT'] + '/' + sDir + '/'
allFiles = os.listdir(sFileDirectory)
# filter list
aReturnList = []
for file in allFiles:
splitString = str(file).rpartition(sFileExtension)
if not splitString[1] == '' and splitString[2] == '':
aReturnList.append(splitString[0])
return aReturnList
def findHighestIndex(aNames, sBaseName):
import re
iHighestValue = 0
for n in aNames:
if n.find(sBaseName) == 0:
suffix = n.partition(sBaseName)[2]
#suffix = sBaseName
#print 'suffix ' + suffix
if re.match('^[0-9]*$', suffix):
iIndex = int(suffix)
if iIndex > iHighestValue:
iHighestValue = iIndex
print 'highest value found for base name {} is: {}'.format(sBaseName, str(iHighestValue))
return iHighestValue
def checkNamespaceIndex(sBaseName):
mc.namespace(setNamespace=':')
namespaces = mc.namespaceInfo(listOnlyNamespaces=True)
for i in range(len(namespaces)):
if namespaces[i].find('__') != -1:
namespaces[i]=namespaces[i].partition('__')[2]
cNamer.idx = utils.findHighestIndex(namespaces, sBaseName) + 1
return sName
def stripLeadingNamespace(sNode):
# returns [0] the first namespace in the node
# [1] everything after the first ":"
if str(sNode).find(':') == -1:
return None
splitString = str(sNode).partition(':')
return [splitString[0], splitString[2]]
def stripAllNamespaces(sNode):
# returns [0] all the namespaces in the node. Everything before the last ":"
# [1] the last name. What's after the last ":"
if str(sNode).find(':') == -1:
return None
splitString = str(sNode).rpartition(':')
return [splitString[0], splitString[2]]
def basicStretchyIK(sRootJoint,
sEndJoint,
sContainer=None,
bMinLengthLock=True,
poleVectorObj=None,
sScaleCorrectionAttr=None):
# calculate the length between the joints passed in
from math import fabs
containedNodes = []
fTotalOrigLen = 0.0
done = False
parent = sRootJoint
childJoints = []
# loop through the joints below and not including the root joint adding up their tX
while not done:
children = mc.listRelatives(parent, children=True)
children = mc.ls(children, type='joint')
if len(children) == 0:
done = True
else:
child = children[0]
childJoints.append(child)
fTotalOrigLen += fabs(mc.getAttr(child+'.tx'))
parent = child
if child == sEndJoint:
done = True
# create RP Ik chain
ikNodes = mc.ikHandle(startJoint=sRootJoint, endEffector=sEndJoint, sol='ikRPsolver', n=sRootJoint+'_ikHandle')
ikNodes[1] = mc.rename(ikNodes[1],sRootJoint+'_ikEffector')
ikEffector = ikNodes[1]
ikHandle = ikNodes[0]
mc.setAttr(ikHandle+'.v', 0)
containedNodes.extend(ikNodes)
# create the pole vector
if poleVectorObj == None:
poleVectorObj = mc.spaceLocator(n=ikHandle+'_pvLoc')[0]
containedNodes.append(poleVectorObj)
mc.xform(poleVectorObj, ws=True, absolute=True, t=mc.xform(sRootJoint, q=True, ws=True, t=True))
mc.xform(poleVectorObj, ws=True, r=True, t=[0.0,1.0,0.0])
#mc.setAttr(poleVectorObj+'.v', 0)
pvConstraint = mc.poleVectorConstraint(poleVectorObj, ikHandle)[0]
containedNodes.append(pvConstraint)
# create the start and end locators
rootLoc = mc.spaceLocator(n=sRootJoint+'_rootPosLoc')[0]
rootLoc_pCon = mc.pointConstraint(sRootJoint, rootLoc, mo=False, n=rootLoc+'_pConst')[0]
endLoc = mc.spaceLocator(n=sEndJoint+'_endPosLoc')[0]
mc.xform(endLoc, ws=True, absolute=True, t=mc.xform(ikHandle, q=True, ws=True, t=True))
ikHandle_pCon = mc.pointConstraint(endLoc, ikHandle, mo=False, n=ikHandle+'_pConst')[0]
containedNodes.extend([rootLoc, endLoc, rootLoc_pCon, ikHandle_pCon])
mc.setAttr(rootLoc+'.v', 0)
mc.setAttr(endLoc+'.v', 0)
# find distance between the locators
rootLoc_noNs = stripAllNamespaces(rootLoc)[1]
endLoc_noNs = stripAllNamespaces(endLoc)[1]
moduleNamespace = stripAllNamespaces(sRootJoint)[0]
distNode = mc.shadingNode('distanceBetween', asUtility=True,
n=moduleNamespace+':distBetween_'+rootLoc_noNs+'_'+endLoc_noNs)
mc.connectAttr(rootLoc+'Shape.worldPosition[0]', distNode+'.point1')
mc.connectAttr(endLoc+'Shape.worldPosition[0]', distNode+'.point2')
containedNodes.append(distNode)
scaleAttr = distNode+'.distance'
# divide distance by total original length = scale factor
scaleFactorMd = mc.shadingNode('multiplyDivide', asUtility=True, n=ikHandle+'_scaleFactor')
containedNodes.append(scaleFactorMd)
mc.setAttr(scaleFactorMd+'.operation', 2) # divide
mc.connectAttr(scaleAttr, scaleFactorMd+'.input1X')
mc.setAttr(scaleFactorMd+'.input2X', fTotalOrigLen)
translationDriver = scaleFactorMd + '.outputX'
# connect joints to stretchy calculations
for joint in childJoints:
multNode = mc.shadingNode('multiplyDivide', asUtility=True, n=joint+'_multScale')
containedNodes.append(multNode)
mc.setAttr(multNode+'.input1X', mc.getAttr(joint+'.tx'))
mc.connectAttr(translationDriver, multNode+'.input2X')
mc.connectAttr(multNode+'.outputX', joint+'.tx')
# add everything to the container and build return dict
if sContainer:
addNodeToContainer(sContainer, containedNodes, ihb=True)
dReturn = {}
dReturn['ikHandle'] = ikHandle
dReturn['ikEffector'] = ikEffector
dReturn['rootLoc'] = rootLoc
dReturn['endLoc'] = endLoc
dReturn['pvObj'] = poleVectorObj
dReturn['ikHandle_pCon'] = ikHandle_pCon
dReturn['rootLoc_pCon'] = rootLoc_pCon
return dReturn
def forceSceneUpdate():
mc.setToolTo('moveSuperContext')
nodes = mc.ls()
for node in nodes:
mc.select(node, replace=True)
mc.select(cl=True)
mc.setToolTo('selectSuperContext')
def addNodeToContainer(sContainer, sNodesIn, includeShapes=False, ihb=False, force=False):
import types
nodes = []
if type(sNodesIn) is types.ListType:
nodes = list(sNodesIn)
else:
nodes = [sNodesIn]
conversionNodes = []
for node in nodes:
node_conversionNodes = mc.listConnections(node, s=True, d=True)
node_conversionNodes = mc.ls(node_conversionNodes, type='unitConversion')
conversionNodes.extend(node_conversionNodes)
nodes.extend(conversionNodes)
mc.container(sContainer, edit=True, addNode=nodes, includeHierarchyBelow=ihb, includeShapes=includeShapes, force=force)
def doesBpUserSpecifiedNameExist(sName):
mc.namespace(setNamespace=':')
namespaces = mc.namespaceInfo(listOnlyNamespaces=True)
names = []
for namespace in namespaces:
if namespace.find('__') != -1:
names.append(namespace.partition('__')[2])
return sName in names
def Rp_2segment_stretchy_IK(rootJoint, hingeJoint, endJoint, container=None, scaleCorrectionAttribute=None):
''' Function that takes 3 joints and creates a RPsolver IK system on them that is both stretchy and
stays on a single plane.
'''
moduleNamespaceInfo = stripAllNamespaces(rootJoint)
moduleNamespace = ''
if moduleNamespaceInfo != None:
moduleNamespace = moduleNamespaceInfo[0]
rootLocation = mc.xform(rootJoint, q=True, ws=True, t=True)
elbowLocation = mc.xform(hingeJoint, q=True, ws=True, t=True)
endLocation = mc.xform(endJoint, q=True, ws=True, t=True)
ikNodes = mc.ikHandle(sj=rootJoint, ee=endJoint, n=rootJoint+'_ikHandle', solver='ikRPsolver')
ikNodes[1] = mc.rename(ikNodes[1], rootJoint+'_ikEffector')
ikEffector = ikNodes[1]
ikHandle = ikNodes[0]
mc.setAttr(ikHandle+'.v', 0)
rootLoc = mc.spaceLocator(n=rootJoint+'_positionLoc')[0]
mc.xform(rootLoc, ws=True, absolute=True, translation=rootLocation)
mc.parent(rootJoint, rootLoc, absolute=True)
endLoc = mc.spaceLocator(n=ikHandle+'_positionLoc')[0]
mc.xform(endLoc, ws=True, absolute=True, translation=endLocation)
mc.parent(ikHandle, endLoc, absolute=True)
elbowLoc = mc.spaceLocator(n=hingeJoint+'_positionLoc')[0]
mc.xform(elbowLoc, ws=True, absolute=True, translation=elbowLocation)
elbowLocConstraint = mc.poleVectorConstraint(elbowLoc, ikHandle)[0]
# setup stretchyness
utilityNodes = []
for locators in ((rootLoc, elbowLoc, hingeJoint), (elbowLoc, endLoc, endJoint)):
from math import fabs # floating point absolute
startLocNamespaceInfo = stripAllNamespaces(locators[0])
startLocWithoutNamespace = ''
if startLocNamespaceInfo != None:
startLocWithoutNamespace = startLocNamespaceInfo[1]
endLocNamespaceInfo = stripAllNamespaces(locators[1])
endLocWithoutNamespace = ''
if endLocNamespaceInfo != None:
endLocWithoutNamespace = endLocNamespaceInfo[1]
startLocShape = locators[0]+'Shape'
endLocShape = locators[1]+'Shape'
# distance between
distNode = mc.shadingNode('distanceBetween', asUtility=True,
n=moduleNamespace+':distBetween_'+startLocWithoutNamespace+'_'+endLocWithoutNamespace)
mc.connectAttr(startLocShape+'.worldPosition[0]', distNode+'.point1')
mc.connectAttr(endLocShape+'.worldPosition[0]', distNode+'.point2')
utilityNodes.append(distNode)
# scale factor
scaleFactor = mc.shadingNode('multiplyDivide', asUtility=True,
n=distNode+'_scaleFactor')
mc.setAttr(scaleFactor+'.operation', 2) # divide
originalLength = mc.getAttr(locators[2]+'.tx')
mc.connectAttr(distNode+'.distance', scaleFactor+'.input1X')
mc.setAttr(scaleFactor+'.input2X', originalLength)
utilityNodes.append(scaleFactor)
translationDriver = scaleFactor + '.outputX'
# scale factor is mutiplied by the abs(originaLength) and that drives the end joints translateX
translateX = mc.shadingNode('multiplyDivide', asUtility=True,
n=distNode+'_translationValue')
mc.setAttr(translateX+'.input1X', fabs(originalLength))
mc.connectAttr(translationDriver, translateX+'.input2X')
mc.connectAttr(translateX+'.outputX', locators[2]+'.tx')
utilityNodes.append(translateX)
if container != None:
containedNodes = list(utilityNodes)
containedNodes.extend(ikNodes)
containedNodes.extend( [rootLoc, elbowLoc, endLoc])
containedNodes.append(elbowLocConstraint)
# addNodeToContainer(container, containedNodes, ihb=True)
return (rootLoc, elbowLoc, endLoc, utilityNodes)
def findJointChain(rootJoint):
joints = [rootJoint]
parent = rootJoint
done = False
while not done:
children = mc.listRelatives(parent, children=1)
children =mc.ls(children, type='joint')
if len(children) == 0:
done = True
else:
child = children[0]
joints.append(child)
parent = child
return joints
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class ReplInstance(BaseTest):
def test_describe_augment_no_tags(self):
session_factory = self.replay_flight_data(
"test_dms_repl_instance_describe_sans_tags"
)
p = self.load_policy(
{"name": "dms-replinstance", "resource": "dms-instance"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["ReplicationInstanceIdentifier"], "replication-instance-1"
)
def test_describe_get_resources(self):
session_factory = self.replay_flight_data("test_dms_repl_instance_delete")
p = self.load_policy(
{"name": "dms-replinstance", "resource": "dms-instance"},
session_factory=session_factory,
)
resources = p.resource_manager.get_resources(["replication-instance-1"])
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["ReplicationInstanceIdentifier"], "replication-instance-1"
)
def test_delete(self):
session_factory = self.replay_flight_data("test_dms_repl_instance_delete")
client = session_factory().client("dms")
p = self.load_policy(
{
"name": "dms-replinstance",
"resource": "dms-instance",
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["ReplicationInstanceIdentifier"], "replication-instance-1"
)
instances = client.describe_replication_instances().get("ReplicationInstances")
self.assertEqual(instances[0]["ReplicationInstanceStatus"], "deleting")
def test_modify(self):
session_factory = self.replay_flight_data("test_dms_repl_instance_modify")
client = session_factory().client("dms")
p = self.load_policy(
{
"name": "dms-replinstance",
"resource": "dms-instance",
"filters": [
{"AutoMinorVersionUpgrade": False},
{"ReplicationInstanceClass": "dms.t2.small"},
],
"actions": [
{
"type": "modify-instance",
"ApplyImmediately": True,
"AutoMinorVersionUpgrade": True,
"ReplicationInstanceClass": "dms.t2.medium",
"PreferredMaintenanceWindow": "Mon:23:00-Mon:23:59",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["ReplicationInstanceIdentifier"], "rep-inst-1")
ri = client.describe_replication_instances().get("ReplicationInstances")
self.assertEqual(
[
ri[0]["AutoMinorVersionUpgrade"],
ri[0]["PendingModifiedValues"]["ReplicationInstanceClass"],
ri[0]["PreferredMaintenanceWindow"],
],
[True, "dms.t2.medium", "mon:23:00-mon:23:59"],
)
class ReplicationInstanceTagging(BaseTest):
def test_replication_instance_tag(self):
session_factory = self.replay_flight_data("test_dms_tag")
p = self.load_policy(
{
"name": "tag-dms-instance",
"resource": "dms-instance",
"filters": [{"tag:RequiredTag": "absent"}],
"actions": [
{"type": "tag", "key": "RequiredTag", "value": "RequiredValue"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("dms")
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]["ReplicationInstanceArn"]
)[
"TagList"
]
tag_value = [t["Value"] for t in tag_list if t["Key"] == "RequiredTag"]
self.assertEqual(tag_value[0], "RequiredValue")
def test_remove_replication_instance_tag(self):
session_factory = self.replay_flight_data("test_dms_tag_remove")
p = self.load_policy(
{
"name": "remove-dms-tag",
"resource": "dms-instance",
"filters": [{"tag:RequiredTag": "RequiredValue"}],
"actions": [{"type": "remove-tag", "tags": ["RequiredTag"]}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("dms")
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]["ReplicationInstanceArn"]
)[
"TagList"
]
self.assertFalse([t for t in tag_list if t["Key"] == "RequiredTag"])
def test_replication_instance_markforop(self):
session_factory = self.replay_flight_data("test_dms_mark_for_op")
p = self.load_policy(
{
"name": "dms-instance-markforop",
"resource": "dms-instance",
"filters": [{"tag:RequiredTag": "absent"}],
"actions": [
{
"type": "mark-for-op",
"tag": "custodian_cleanup",
"op": "delete",
"days": 2,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("dms")
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]["ReplicationInstanceArn"]
)[
"TagList"
]
self.assertTrue(
[t["Value"] for t in tag_list if t["Key"] == "custodian_cleanup"]
)
def test_replication_instance_markedforop(self):
session_factory = self.replay_flight_data("test_dms_marked_for_op")
p = self.load_policy(
{
"name": "dms-instance-markedforop",
"resource": "dms-instance",
"filters": [
{
"type": "marked-for-op",
"tag": "custodian_cleanup",
"op": "delete",
"skew": 2,
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]["ReplicationInstanceIdentifier"], "replication-instance-1"
)
class DmsEndpointTests(BaseTest):
def test_resource_query(self):
session_factory = self.replay_flight_data("test_dms_resource_query")
p = self.load_policy(
{"name": "dms-endpoint-query", "resource": "dms-endpoint"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_endpoint_modify_sql(self):
session_factory = self.replay_flight_data("test_dms_modify_endpoint_sql")
p = self.load_policy(
{
"name": "dms-sql-ssl",
"resource": "dms-endpoint",
"filters": [
{"EndpointIdentifier": "c7n-dms-sql-ep"},
{"ServerName": "c7n-sql-db"},
],
"actions": [
{
"type": "modify-endpoint",
"Port": 3305,
"SslMode": "require",
"Username": "admin",
"Password": "sqlpassword",
"ServerName": "c7n-sql-db-02",
"DatabaseName": "c7n-db-02",
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("dms")
ep = client.describe_endpoints()["Endpoints"][0]
self.assertEqual(
[
ep["Port"],
ep["SslMode"],
ep["Username"],
ep["ServerName"],
ep["DatabaseName"],
],
[3305, "require", "admin", "c7n-sql-db-02", "c7n-db-02"],
)
def test_endpoint_tag_filter(self):
session_factory = self.replay_flight_data("test_dms_tag_filter")
p = self.load_policy(
{
"name": "dms-sql-ssl",
"resource": "dms-endpoint",
"filters": [
{"tag:Owner": "pikachu"},
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Tags'], [{'Key': 'Owner', 'Value': 'pikachu'}])
def test_dms_endpoint_delete(self):
session_factory = self.replay_flight_data("test_dms_endpoint_delete")
policy = {
"name": "dms-delete-endpoint",
"resource": "dms-endpoint",
"filters": [{"EndpointIdentifier": "c7n-test"}],
"actions": ["delete"],
}
policy = self.load_policy(policy, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
client = session_factory(region="us-east-1").client("dms")
ep = client.describe_endpoints(
Filters=[{"Name": "endpoint-id", "Values": ["c7n-test"]}]
)[
"Endpoints"
][
0
]
self.assertEqual(
[ep["EndpointIdentifier"], ep["Status"]], ["c7n-test", "deleting"]
)
|
|
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import mock
from oslo.serialization import jsonutils
from oslo.utils import units
from nova.compute import task_states
from nova import context
from nova import exception
from nova import test
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests import utils
from nova.tests.virt.test_virt_drivers import _VirtDriverTestCase
from novadocker.tests.virt.docker import mock_client
import novadocker.virt.docker
from novadocker.virt.docker import hostinfo
from novadocker.virt.docker import network
class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
driver_module = 'novadocker.virt.docker.DockerDriver'
def setUp(self):
super(DockerDriverTestCase, self).setUp()
self.mock_client = mock_client.MockClient()
self.stubs.Set(novadocker.virt.docker.driver.DockerDriver, 'docker',
self.mock_client)
def fake_plug_vifs(self, instance, network_info):
return
self.stubs.Set(novadocker.virt.docker.driver.DockerDriver,
'plug_vifs',
fake_plug_vifs)
def fake_attach_vifs(self, instance, network_info):
return
self.stubs.Set(novadocker.virt.docker.driver.DockerDriver,
'_attach_vifs',
fake_attach_vifs)
# Note: using mock.object.path on class throws
# errors in test_virt_drivers
def fake_teardown_network(container_id):
return
self.stubs.Set(network, 'teardown_network', fake_teardown_network)
self.context = context.RequestContext('fake_user', 'fake_project')
self.connection.init_host(None)
def test_driver_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
# NOTE(bcwaldon): This exists only because _get_running_instance on the
# base class will not let us set a custom disk/container_format.
def _get_running_instance(self, obj=False, image_name=None, flavor=None):
instance_ref = utils.get_test_instance(obj=obj, flavor=flavor)
network_info = utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = (
'1.1.1.1')
image_info = utils.get_test_image_info(None, instance_ref)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
if image_name:
image_info['name'] = image_name
self.connection.spawn(self.ctxt, jsonutils.to_primitive(instance_ref),
image_info, [], 'herp',
network_info=network_info)
return instance_ref, network_info
def test_get_host_stats(self):
self.mox.StubOutWithMock(socket, 'gethostname')
socket.gethostname().AndReturn('foo')
socket.gethostname().AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
def test_get_available_resource(self):
memory = {
'total': 4 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 50 * units.Gi,
'available': 25 * units.Gi,
'used': 25 * units.Gi
}
# create the mocks
with contextlib.nested(
mock.patch.object(hostinfo, 'get_memory_usage',
return_value=memory),
mock.patch.object(hostinfo, 'get_disk_usage',
return_value=disk)
) as (
get_memory_usage,
get_disk_usage
):
# run the code
stats = self.connection.get_available_resource(nodename='test')
# make our assertions
get_memory_usage.assert_called_once_with()
get_disk_usage.assert_called_once_with()
expected_stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': 4,
'memory_mb_used': 1,
'local_gb': 50L,
'local_gb_used': 25L,
'disk_available_least': 25L,
'hypervisor_type': 'docker',
'hypervisor_version': 1000,
'hypervisor_hostname': 'test',
'cpu_info': '?',
'supported_instances': ('[["i686", "docker", "lxc"],'
' ["x86_64", "docker", "lxc"]]')
}
self.assertEqual(expected_stats, stats)
def test_create_container(self, image_info=None, instance_href=None):
if instance_href is None:
instance_href = utils.get_test_instance()
instance_href = utils.get_test_instance()
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
self._assert_cpu_shares(instance_href)
self.assertEqual(self.mock_client.name, "nova-{0}".format(
instance_href['uuid']))
def test_create_container_vcpus_2(self, image_info=None):
flavor = utils.get_test_flavor(options={
'name': 'vcpu_2',
'flavorid': 'vcpu_2',
'vcpus': 2
})
instance_href = utils.get_test_instance(flavor=flavor)
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
self._assert_cpu_shares(instance_href, vcpus=2)
self.assertEqual(self.mock_client.name, "nova-{0}".format(
instance_href['uuid']))
def _assert_cpu_shares(self, instance_href, vcpus=4):
container_id = self.connection._find_container_by_name(
instance_href['name']).get('id')
container_info = self.connection.docker.inspect_container(container_id)
self.assertEqual(vcpus * 1024, container_info['Config']['CpuShares'])
@mock.patch('novadocker.virt.docker.driver.DockerDriver.plug_vifs',
side_effect=Exception)
def test_create_container_net_setup_fails(self, mock_plug_vifs):
self.assertRaises(exception.InstanceDeployFailure,
self.test_create_container)
self.assertEqual(0, len(self.mock_client.list_containers()))
def test_create_container_wrong_image(self):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'invalid_format'
self.assertRaises(exception.InstanceDeployFailure,
self.test_create_container,
image_info, instance_href)
@mock.patch.object(novadocker.virt.docker.driver.DockerDriver,
'cleanup')
@mock.patch.object(novadocker.virt.docker.driver.DockerDriver,
'_find_container_by_name',
return_value={'id': 'fake_id'})
def test_destroy_container(self, byname_mock, cleanup_mock):
instance = utils.get_test_instance()
self.connection.destroy(self.context, instance, 'fake_networkinfo')
cleanup_mock.assert_called_with(self.context, instance,
'fake_networkinfo', None, True)
@mock.patch.object(network, 'teardown_network')
@mock.patch.object(novadocker.virt.docker.driver.DockerDriver,
'unplug_vifs')
@mock.patch.object(novadocker.virt.docker.driver.DockerDriver,
'_find_container_by_name',
return_value={'id': 'fake_id'})
def test_cleanup_container(self, byname_mock, unplug_mock, teardown_mock):
instance = utils.get_test_instance()
self.connection.cleanup(self.context, instance, 'fake_networkinfo')
byname_mock.assert_called_with(instance['name'])
teardown_mock.assert_called_with('fake_id')
def test_soft_delete_restore_container(self):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
container_id = self.connection._find_container_by_name(
instance_href['name']).get('id')
self.connection.soft_delete(instance_href)
info = self.connection.docker.inspect_container(container_id)
self.assertFalse(info['State']['Running'])
self.connection.restore(instance_href)
info = self.connection.docker.inspect_container(container_id)
self.assertTrue(info['State']['Running'])
def test_get_memory_limit_from_sys_meta_in_object(self):
instance = utils.get_test_instance(obj=True)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
def test_get_memory_limit_from_sys_meta_in_db_instance(self):
instance = utils.get_test_instance(obj=False)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
def test_list_instances(self):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
instances = self.connection.list_instances()
self.assertIn(instance_href.name, instances)
def test_list_instances_none(self):
instances = self.connection.list_instances()
self.assertIsInstance(instances, list)
self.assertFalse(instances)
def test_list_instances_no_inspect_race(self):
"""Assures containers that cannot be inspected are not listed."""
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
with mock.patch('novadocker.tests.virt.docker.mock_client.'
'MockClient.inspect_container',
return_value={}):
instances = self.connection.list_instances()
self.assertFalse(instances)
def test_find_container_pid(self):
driver = novadocker.virt.docker.driver.DockerDriver(None)
with mock.patch.object(driver.docker,
"inspect_container") as inspect_container:
inspect_container.return_value = {'State': {'Pid': '12345'}}
pid = driver._find_container_pid("fake_container_id")
self.assertEqual(pid, '12345')
@mock.patch.object(novadocker.tests.virt.docker.mock_client.MockClient,
'load_repository')
@mock.patch.object(novadocker.tests.virt.docker.mock_client.MockClient,
'get_image')
@mock.patch.object(novadocker.virt.docker.driver.DockerDriver,
'_find_container_by_name',
return_value={'id': 'fake_id'})
def test_snapshot(self, byname_mock, getimage_mock, loadrepo_mock):
# Use mix-case to test that mixed-case image names succeed.
snapshot_name = 'tEsT-SnAp'
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
instance_ref = utils.get_test_instance()
properties = {'instance_id': instance_ref['id'],
'user_id': str(self.context.user_id)}
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
# Because the docker driver doesn't push directly into Glance, we
# cannot check that the images are correctly configured in the
# fake image service, but we can ensuring naming and other
# conventions are accurate.
image_service = nova.tests.image.fake.FakeImageService()
recv_meta = image_service.create(context, sent_meta)
self.connection.snapshot(self.context, instance_ref, recv_meta['id'],
func_call_matcher.call)
snapshot = image_service.show(context, recv_meta['id'])
# self.assertIsNone(func_call_matcher.match())
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], 'raw')
self.assertEqual(snapshot['container_format'], 'docker')
self.assertEqual(snapshot['name'], snapshot_name)
def test_get_image_name(self):
instance_ref = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_ref)
image_info['container_format'] = 'docker'
image_info['name'] = 'MiXeDcAsE-image'
repo = self.connection._get_image_name(self.context,
instance_ref, image_info)
# image_name = repo.split("/")[1]
self.assertEqual(image_info['name'], repo)
def test_get_host_uptime_returns_exec_result(self):
result = '4294967296'
with mock.patch('nova.utils.execute',
return_value=(result, None)):
uptime = self.connection.get_host_uptime(None)
self.assertEqual(result, uptime)
|
|
import re
from collections import defaultdict
from pdfminer.layout import LTComponent, LTImage
from pt_law_parser.auxiliar import eq, middle_x
class Paragraph(object):
@staticmethod
def sanitize(text):
# hyphens in words are getting a space from PDFminer. Remove it.
return re.sub(ur' (\-\w+?)', ur'\1', text, flags=re.U)
def __init__(self, text):
assert(text[-1] != '\n')
self._text = self.sanitize(text.strip())
def merge(self, other_line):
text = other_line.text
if self.text[-1] == '-':
self._text = self._text[:-1]
# don't merge two lines without a space in between if no hyphen
elif text[0] != ' ' and self._text[-1] != ' ':
text = ' ' + text
self._text += self.sanitize(text)
@property
def text(self):
return self._text
def as_html(self):
return '<p>%s</p>' % self.text
class Header(Paragraph):
def as_html(self):
return '<h1>%s</h1>' % self.text
class Table(LTComponent):
"""
A table has the following interface:
1. receives a network and converts it to a set of cells (__init__)
2. receives items and maps then to the correct cells (add)
3. represents itself in HTML (as_html)
"""
class Element():
"""
Represents an element of an HTML table. It has a colspan and rowspan.
"""
def __init__(self, cell):
self.cell = cell
self.row = None
self.column = None
self.colspan = 0
self.rowspan = 0
self._lines = []
self._min_x = 0
self._min_y = 0
@property
def lines(self):
return self._lines
def add(self, row, column):
if self.row is None:
self.row = row
self.column = column
else:
if self.row == row:
self.colspan += 1
if self.column == column:
self.rowspan += 1
def add_line(self, item, bbox):
"""
Adds a line to the cell assuming a bounding box bbox.
"""
# todo: this code is similar to _parse_line. Common implementation?
def remove_dots(text):
return text.replace(' .', '')
text = remove_dots(item.get_text())
if text == '.':
return
line = Paragraph(text)
if not self._lines:
# cell is empty
self._lines.append(line)
self._min_x = item.x0
else:
middle_x_cell = middle_x(bbox)
middle_x_line = middle_x(item.bbox)
is_centered = eq(middle_x_cell, middle_x_line, 1)
if is_centered:
if self._min_y - item.y1 < 0:
self._lines[-1].merge(line)
else:
self._lines.append(line)
elif eq(self._min_x, item.x0, 1):
self._lines.append(line)
else:
self._lines[-1].merge(line)
self._min_y = item.y0
class EmptyTableError(Exception):
"""
Raised by constructor when construction fails because table has no
cells. This means that the constructed network does not constitute a
table and should be ignored.
"""
pass
def __init__(self, network):
if len(network) <= 2:
raise self.EmptyTableError
# construct rows and columns borders by distinct x and y's.
self._rows_borders = sorted(list(
set(point.y for point in network.points)))
self._columns_borders = sorted(list(
set(point.x for point in network.points)))
LTComponent.__init__(self, (self._columns_borders[0],
self._rows_borders[0],
self._columns_borders[-1],
self._rows_borders[-1]))
self._cells = self._create_cells(network)
self._elements = self._build_elements(self._cells)
@staticmethod
def _create_cells(network):
"""
Creates cells from the network and returns then
as LTComponents.
"""
squares_taken = defaultdict(set)
cells = set()
def city_distance(point, point_prime):
return abs(point.x - point_prime.x) + abs(point.y - point_prime.y)
def is_perpendicular(v1_x, v1_y, v2_x, v2_y):
return v1_x*v2_x + v1_y*v2_y == 0
for point in sorted(network, key=lambda p: (p.x, p.y)):
for l1 in sorted(network.links[point],
key=lambda p: city_distance(p, point)):
valid_links = [
link for link in network.links[point] if link != l1 and
is_perpendicular(link.x - point.x, link.y - point.y,
l1.x - point.x, l1.y - point.y)]
for l2 in sorted(valid_links,
key=lambda p: city_distance(p, point)):
inter = network.links[l2].intersection(network.links[l1])
intersection = list(inter)
# remove initial point
intersection.remove(point)
if len(intersection) == 0:
continue
# sort by areas: smallest area first
area = lambda p: (p.x - point.x)*(p.y - point.y)
intersection.sort(key=area)
# square is formed by [point, l1, l2, last_point], in this
# order.
points = [point, l1, l2, intersection[0]]
# compute middle position of the square
middle_x = sum(point.x for point in points)/4.
middle_y = sum(point.y for point in points)/4.
# check if any point already has one of its squares
# (at most 4) used.
is_taken = False
square = range(4)
for i in range(4):
# compute the position of the point in relation to the
# middle corresponding to one of the following squares
# position: [(1,1), (-1,1), (1,-1), (-1,-1)]
vx = middle_x - points[i].x
vy = middle_y - points[i].y
square[i] = (int(vx/abs(vx)), int(vy/abs(vy)))
belongs = square[i] in squares_taken[points[i]]
is_taken = is_taken or belongs
if not is_taken:
cell = LTComponent((point.x, point.y,
intersection[0].x, intersection[0].y))
cells.add(cell)
for i in range(4):
squares_taken[points[i]].add(square[i])
break
return cells
def _build_elements(self, cells):
"""
Converts the cells into elements.
"""
elements = []
for cell in cells:
elements.append(self.Element(cell))
for row in reversed(self._rows_borders[:-1]):
for column in self._columns_borders[:-1]:
for cell_index, cell in enumerate(cells):
if cell.y0 < row + 0.1 < cell.y1 and\
cell.x0 < column + 0.1 < cell.x1:
elements[cell_index].add(row, column)
return sorted(elements, key=lambda e: (e.cell.x0, e.cell.y0))
@property
def cells(self):
return self._cells
def add(self, item):
"""
Adds a text item to the table, inserting it into the correct cell.
"""
for element in self._elements:
if element.cell.is_hoverlap(item) and element.cell.is_voverlap(item):
element.add_line(item, element.cell.bbox)
break
def as_html(self):
string = ''
for row in reversed(self._rows_borders[:-1]):
string += '<tr>\n'
for column in self._columns_borders[:-1]:
for element in self._elements:
if element.column == column and element.row == row:
lines = element.lines
colspan = element.colspan
rowspan = element.rowspan
text = '\n'.join(line.as_html() for line in lines)
if colspan:
colspan = 'colspan="%d"' % (colspan + 1)
else:
colspan = ''
if rowspan:
rowspan = 'rowspan="%d"' % (rowspan + 1)
else:
rowspan = ''
attributes = ''
if rowspan or colspan:
attributes = ' '
if rowspan and colspan:
attributes += rowspan + ' ' + colspan
else:
attributes += rowspan + colspan
string += '<td%s>%s</td>\n' % (attributes, text)
string += '</tr>\n'
return '<table>\n%s</table>' % string
class BlockquoteStart(object):
def as_html(self):
return '<blockquote>'
class BlockquoteEnd(object):
def as_html(self):
return '</blockquote>'
class SimpleImage(LTImage):
def __init__(self, ltimage):
assert(isinstance(ltimage, LTImage))
LTComponent.__init__(self, ltimage.bbox)
self._name = ltimage.name
self._stream = ltimage.stream
def as_html(self):
return '<p>(Ver imagem no documento original.)</p>'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from ebstall.deployers.openvpn import OpenVpnConfig
__author__ = 'dusanklinec'
test1 = """client-to-client
server 10.8.0.0 255.255.255.0
;server 10.7.0.0 255.255.255.0
key server.key # This file should be kept secret
;key server.key # This file should be kept secret
# test"""
test2 = """;persist-tun"""
test3 = """persist-tun"""
test4 = """;key server.key # This file should be kept secret"""
test5 = """push alpha
push beta
push gamma
push delta
push zetta"""
test6 = """remote [(${vpn_hostname})] 1194
resolv-retry infinite"""
test7 = """remote [(${vpn_hostname})] 1194
resolv-retry infinite
<ca>
line1
line2
line3
</ca>
persist-tun"""
test8 = 'proto udp'
class OpenVpnParserTest(unittest.TestCase):
"""Simple test from the readme"""
def __init__(self, *args, **kwargs):
super(OpenVpnParserTest, self).__init__(*args, **kwargs)
def setUp(self):
pass
def tearDown(self):
pass
def test1(self):
parser = OpenVpnConfig(static_config=test1)
parser.load()
data = parser.config_data
# Simple parser test
self.assertEqual(len(data), 6, 'Number of parsed lines does not match')
self.assertEqual(data[0].ltype, 3, 'Parsed command has invalid type')
self.assertEqual(data[0].cmd, 'client-to-client')
self.assertEqual(data[0].params, None)
self.assertEqual(data[0].comment, None)
self.assertEqual(data[1].ltype, 3)
self.assertEqual(data[1].cmd, 'server')
self.assertEqual(data[1].params, '10.8.0.0 255.255.255.0')
self.assertEqual(data[1].comment, None)
self.assertEqual(data[2].ltype, 2)
self.assertEqual(data[2].cmd, 'server')
self.assertEqual(data[2].params, '10.7.0.0 255.255.255.0')
self.assertEqual(data[2].comment, None)
self.assertEqual(data[3].ltype, 3)
self.assertEqual(data[3].cmd, 'key')
self.assertEqual(data[3].params, 'server.key')
self.assertEqual(data[3].comment, '# This file should be kept secret')
self.assertEqual(data[4].ltype, 2)
self.assertEqual(data[4].cmd, 'key')
self.assertEqual(data[4].params, 'server.key')
self.assertEqual(data[4].comment, '# This file should be kept secret')
self.assertEqual(data[5].ltype, 1)
test1x = parser.dump()
parser2 = OpenVpnConfig(static_config=test1x)
parser2.load()
data2 = parser.config_data
self.assertEqual(data2, data, 'Parser did not return the same data')
def test1_remove_single(self):
parser = OpenVpnConfig(static_config=test1)
parser.load()
parser.set_config_value('client-to-client', remove=True)
ctr_comm = 0
for rec in parser.config_data:
if rec.cmd == 'client-to-client':
self.assertEqual(rec.ltype, 2, 'Directive is still active')
if rec.ltype == 2 and rec.cmd == 'client-to-client':
ctr_comm += 1
self.assertLessEqual(ctr_comm, 1, 'Commented out value should be max 1')
def test1_remove_key(self):
parser = OpenVpnConfig(static_config=test1)
parser.load()
parser.set_config_value('key', remove=True)
ctr_comm = 0
for rec in parser.config_data:
if rec.cmd == 'key':
self.assertEqual(rec.ltype, 2, 'Directive is still active')
if rec.ltype == 2 and rec.cmd == 'key':
ctr_comm += 1
self.assertLessEqual(ctr_comm, 2, 'Commented out value should be max 2')
def test2_remove_removed(self):
parser = OpenVpnConfig(static_config=test2)
parser.load()
parser.set_config_value('persist-tun', remove=True)
data = parser.config_data
self.assertEqual(len(data), 1)
self.assertEqual(data[0].ltype, 2)
def test2_add_removed_single(self):
parser = OpenVpnConfig(static_config=test2)
parser.load()
parser.set_config_value('persist-tun')
data = parser.config_data
self.assertEqual(len(data), 1)
self.assertEqual(data[0].ltype, 3)
def test3_add_added(self):
parser = OpenVpnConfig(static_config=test3)
parser.load()
parser.set_config_value('persist-tun')
data = parser.config_data
self.assertEqual(len(data), 1)
self.assertEqual(data[0].ltype, 3)
def test3_remove_added(self):
parser = OpenVpnConfig(static_config=test3)
parser.load()
parser.set_config_value('persist-tun', remove=True)
data = parser.config_data
self.assertEqual(len(data), 1)
self.assertEqual(data[0].ltype, 2)
def test4_add_key(self):
parser = OpenVpnConfig(static_config=test4)
parser.load()
parser.set_config_value('key', 'server.key')
data = parser.config_data
self.assertEqual(len(data), 1)
self.assertEqual(data[0].ltype, 3)
def test5_push(self):
parser = OpenVpnConfig(static_config=test5)
parser.load()
vals = ['alpha', 'beta', 'delta', 'secret']
parser.set_config_value('push', vals)
data = parser.config_data
self.assertEqual(len(data), 6)
vals_present = [False] * len(vals)
for cur in data:
if cur.ltype == 3:
self.assertTrue(cur.params in vals)
vals_present[vals.index(cur.params)] = True
self.assertEqual(vals_present, [True] * len(vals))
def test5_push_remove(self):
parser = OpenVpnConfig(static_config=test5)
parser.load()
vals = ['alpha', 'secret']
parser.set_config_value('push', vals, remove=True)
data = parser.config_data
self.assertEqual(len(data), 5)
vals_present = [False] * len(vals)
for cur in data:
if cur.ltype == 3 and cur.params in vals:
vals_present[vals.index(cur.params)] = True
self.assertEqual(vals_present, [False] * len(vals))
def test6(self):
parser = OpenVpnConfig(static_config=test6)
parser.load()
data = parser.config_data
self.assertEqual(len(data), 2, 'Number of parsed lines does not match')
self.assertEqual(data[0].ltype, 3)
self.assertEqual(data[0].cmd, 'remote')
self.assertEqual(data[1].ltype, 3)
self.assertEqual(parser.dump(), test6, 'Parser did not return the same data')
def test7(self):
parser = OpenVpnConfig(static_config=test7)
parser.load()
data = parser.config_data
self.assertEqual(parser.dump().strip(), test7.strip(), 'Parser did not return the same data')
testx = parser.dump()
parser2 = OpenVpnConfig(static_config=testx)
parser2.load()
data2 = parser.config_data
self.assertEqual(data2, data, 'Parser did not return the same data')
def test8(self):
parser = OpenVpnConfig(static_config=test8)
parser.load()
data = parser.config_data
self.assertEqual(parser.dump().strip(), test8.strip(), 'Parser did not return the same data')
testx = parser.dump()
parser2 = OpenVpnConfig(static_config=testx)
parser2.load()
data2 = parser.config_data
self.assertEqual(data2, data, 'Parser did not return the same data')
parser.set_config_value('proto', 'tcp')
data = parser.config_data
self.assertEqual(len(data), 2)
self.assertEqual(data[0].ltype, 2)
self.assertEqual(data[1].ltype, 3)
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
from django.test import TestCase
from django.core.exceptions import ValidationError
from .models import Part, PartCategory, PartParameter, PartParameterTemplate
class CategoryTest(TestCase):
"""
Tests to ensure that the relational category tree functions correctly.
Loads the following test fixtures:
- category.yaml
"""
fixtures = [
'category',
'part',
'location',
'params',
]
def setUp(self):
# Extract some interesting categories for time-saving
self.electronics = PartCategory.objects.get(name='Electronics')
self.mechanical = PartCategory.objects.get(name='Mechanical')
self.resistors = PartCategory.objects.get(name='Resistors')
self.capacitors = PartCategory.objects.get(name='Capacitors')
self.fasteners = PartCategory.objects.get(name='Fasteners')
self.ic = PartCategory.objects.get(name='IC')
self.transceivers = PartCategory.objects.get(name='Transceivers')
def test_parents(self):
""" Test that the parent fields are properly set,
based on the test fixtures """
self.assertEqual(self.resistors.parent, self.electronics)
self.assertEqual(self.capacitors.parent, self.electronics)
self.assertEqual(self.electronics.parent, None)
self.assertEqual(self.fasteners.parent, self.mechanical)
def test_children_count(self):
""" Test that categories have the correct number of children """
self.assertTrue(self.electronics.has_children)
self.assertTrue(self.mechanical.has_children)
self.assertEqual(len(self.electronics.children.all()), 3)
self.assertEqual(len(self.mechanical.children.all()), 1)
def test_unique_childs(self):
""" Test the 'unique_children' functionality """
childs = [item.pk for item in self.electronics.getUniqueChildren()]
self.assertIn(self.transceivers.id, childs)
self.assertIn(self.ic.id, childs)
self.assertNotIn(self.fasteners.id, childs)
def test_unique_parents(self):
""" Test the 'unique_parents' functionality """
parents = [item.pk for item in self.transceivers.getUniqueParents()]
self.assertIn(self.electronics.id, parents)
self.assertIn(self.ic.id, parents)
self.assertNotIn(self.fasteners.id, parents)
def test_path_string(self):
""" Test that the category path string works correctly """
self.assertEqual(str(self.resistors), 'Electronics/Resistors - Resistors')
self.assertEqual(str(self.transceivers.pathstring), 'Electronics/IC/Transceivers')
def test_url(self):
""" Test that the PartCategory URL works """
self.assertEqual(self.capacitors.get_absolute_url(), '/part/category/3/')
def test_part_count(self):
""" Test that the Category part count works """
self.assertTrue(self.resistors.has_parts)
self.assertTrue(self.fasteners.has_parts)
self.assertFalse(self.transceivers.has_parts)
self.assertEqual(self.fasteners.partcount(), 2)
self.assertEqual(self.capacitors.partcount(), 1)
self.assertEqual(self.electronics.partcount(), 3)
self.assertEqual(self.mechanical.partcount(), 9)
self.assertEqual(self.mechanical.partcount(active=True), 8)
self.assertEqual(self.mechanical.partcount(False), 7)
self.assertEqual(self.electronics.item_count, self.electronics.partcount())
def test_parameters(self):
""" Test that the Category parameters are correctly fetched """
# Check number of SQL queries to iterate other parameters
with self.assertNumQueries(7):
# Prefetch: 3 queries (parts, parameters and parameters_template)
fasteners = self.fasteners.prefetch_parts_parameters()
# Iterate through all parts and parameters
for fastener in fasteners:
self.assertIsInstance(fastener, Part)
for parameter in fastener.parameters.all():
self.assertIsInstance(parameter, PartParameter)
self.assertIsInstance(parameter.template, PartParameterTemplate)
# Test number of unique parameters
self.assertEqual(len(self.fasteners.get_unique_parameters(prefetch=fasteners)), 1)
# Test number of parameters found for each part
parts_parameters = self.fasteners.get_parts_parameters(prefetch=fasteners)
part_infos = ['pk', 'name', 'description']
for part_parameter in parts_parameters:
# Remove part informations
for item in part_infos:
part_parameter.pop(item)
self.assertEqual(len(part_parameter), 1)
def test_invalid_name(self):
# Test that an illegal character is prohibited in a category name
cat = PartCategory(name='test/with/illegal/chars', description='Test category', parent=None)
with self.assertRaises(ValidationError) as err:
cat.full_clean()
cat.save() # pragma: no cover
self.assertIn('Illegal character in name', str(err.exception.error_dict.get('name')))
cat.name = 'good name'
cat.save()
def test_delete(self):
""" Test that category deletion moves the children properly """
# Delete the 'IC' category and 'Transceiver' should move to be under 'Electronics'
self.assertEqual(self.transceivers.parent, self.ic)
self.assertEqual(self.ic.parent, self.electronics)
self.ic.delete()
# Get the data again
transceivers = PartCategory.objects.get(name='Transceivers')
self.assertEqual(transceivers.parent, self.electronics)
# Now delete the 'fasteners' category - the parts should move to 'mechanical'
self.fasteners.delete()
fasteners = Part.objects.filter(description__contains='screw')
for f in fasteners:
self.assertEqual(f.category, self.mechanical)
def test_default_locations(self):
""" Test traversal for default locations """
self.assertEqual(str(self.fasteners.default_location), 'Office/Drawer_1 - In my desk')
# Any part under electronics should default to 'Home'
r1 = Part.objects.get(name='R_2K2_0805')
self.assertIsNone(r1.default_location)
self.assertEqual(r1.get_default_location().name, 'Home')
# But one part has a default_location set
r2 = Part.objects.get(name='R_4K7_0603')
self.assertEqual(r2.get_default_location().name, 'Bathroom')
# And one part should have no default location at all
w = Part.objects.get(name='Widget')
self.assertIsNone(w.get_default_location())
def test_category_tree(self):
"""
Unit tests for the part category tree structure (MPTT)
Ensure that the MPTT structure is rebuilt correctly,
and the correct ancestor tree is observed.
"""
# Clear out any existing parts
Part.objects.all().delete()
# First, create a structured tree of part categories
A = PartCategory.objects.create(
name='A',
description='Top level category',
)
B1 = PartCategory.objects.create(name='B1', parent=A)
B2 = PartCategory.objects.create(name='B2', parent=A)
B3 = PartCategory.objects.create(name='B3', parent=A)
C11 = PartCategory.objects.create(name='C11', parent=B1)
C12 = PartCategory.objects.create(name='C12', parent=B1)
C13 = PartCategory.objects.create(name='C13', parent=B1)
C21 = PartCategory.objects.create(name='C21', parent=B2)
C22 = PartCategory.objects.create(name='C22', parent=B2)
C23 = PartCategory.objects.create(name='C23', parent=B2)
C31 = PartCategory.objects.create(name='C31', parent=B3)
C32 = PartCategory.objects.create(name='C32', parent=B3)
C33 = PartCategory.objects.create(name='C33', parent=B3)
# Check that the tree_id value is correct
for cat in [B1, B2, B3, C11, C22, C33]:
self.assertEqual(cat.tree_id, A.tree_id)
self.assertEqual(cat.level, cat.parent.level + 1)
self.assertEqual(cat.get_ancestors().count(), cat.level)
# Spot check for C31
ancestors = C31.get_ancestors(include_self=True)
self.assertEqual(ancestors.count(), 3)
self.assertEqual(ancestors[0], A)
self.assertEqual(ancestors[1], B3)
self.assertEqual(ancestors[2], C31)
# At this point, we are confident that the tree is correctly structured
# Add some parts to category B3
for i in range(10):
Part.objects.create(
name=f'Part {i}',
description='A test part',
category=B3,
)
self.assertEqual(Part.objects.filter(category=B3).count(), 10)
self.assertEqual(Part.objects.filter(category=A).count(), 0)
# Delete category B3
B3.delete()
# Child parts have been moved to category A
self.assertEqual(Part.objects.filter(category=A).count(), 10)
for cat in [C31, C32, C33]:
# These categories should now be directly under A
cat.refresh_from_db()
self.assertEqual(cat.parent, A)
self.assertEqual(cat.level, 1)
self.assertEqual(cat.get_ancestors().count(), 1)
self.assertEqual(cat.get_ancestors()[0], A)
# Now, delete category A
A.delete()
# Parts have now been moved to the top-level category
self.assertEqual(Part.objects.filter(category=None).count(), 10)
for loc in [B1, B2, C31, C32, C33]:
# These should now all be "top level" categories
loc.refresh_from_db()
self.assertEqual(loc.level, 0)
self.assertEqual(loc.parent, None)
# Check descendants for B1
descendants = B1.get_descendants()
self.assertEqual(descendants.count(), 3)
for loc in [C11, C12, C13]:
self.assertTrue(loc in descendants)
# Check category C1x, should be B1 -> C1x
for loc in [C11, C12, C13]:
loc.refresh_from_db()
self.assertEqual(loc.level, 1)
self.assertEqual(loc.parent, B1)
ancestors = loc.get_ancestors(include_self=True)
self.assertEqual(ancestors.count(), 2)
self.assertEqual(ancestors[0], B1)
self.assertEqual(ancestors[1], loc)
# Check category C2x, should be B2 -> C2x
for loc in [C21, C22, C23]:
loc.refresh_from_db()
self.assertEqual(loc.level, 1)
self.assertEqual(loc.parent, B2)
ancestors = loc.get_ancestors(include_self=True)
self.assertEqual(ancestors.count(), 2)
self.assertEqual(ancestors[0], B2)
self.assertEqual(ancestors[1], loc)
|
|
"""The tests for the Recorder component."""
# pylint: disable=protected-access
import asyncio
from datetime import datetime, timedelta
import sqlite3
from unittest.mock import patch
import pytest
from sqlalchemy.exc import DatabaseError, OperationalError, SQLAlchemyError
from homeassistant.components import recorder
from homeassistant.components.recorder import (
CONF_AUTO_PURGE,
CONF_DB_URL,
CONFIG_SCHEMA,
DOMAIN,
KEEPALIVE_TIME,
SERVICE_DISABLE,
SERVICE_ENABLE,
SERVICE_PURGE,
SERVICE_PURGE_ENTITIES,
SQLITE_URL_PREFIX,
Recorder,
run_information,
run_information_from_instance,
run_information_with_session,
)
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import (
Events,
RecorderRuns,
States,
StatisticsRuns,
process_timestamp,
)
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import (
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
MATCH_ALL,
STATE_LOCKED,
STATE_UNLOCKED,
)
from homeassistant.core import Context, CoreState, HomeAssistant, callback
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util import dt as dt_util
from .common import (
async_wait_recording_done,
async_wait_recording_done_without_instance,
corrupt_db_file,
wait_recording_done,
)
from .conftest import SetupRecorderInstanceT
from tests.common import (
async_fire_time_changed,
async_init_recorder_component,
fire_time_changed,
get_test_home_assistant,
)
def _default_recorder(hass):
"""Return a recorder with reasonable defaults."""
return Recorder(
hass,
auto_purge=True,
keep_days=7,
commit_interval=1,
uri="sqlite://",
db_max_retries=10,
db_retry_wait=3,
entity_filter=CONFIG_SCHEMA({DOMAIN: {}}),
exclude_t=[],
)
async def test_shutdown_before_startup_finishes(hass):
"""Test shutdown before recorder starts is clean."""
hass.state = CoreState.not_running
await async_init_recorder_component(hass)
await hass.data[DATA_INSTANCE].async_db_ready
await hass.async_block_till_done()
session = await hass.async_add_executor_job(hass.data[DATA_INSTANCE].get_session)
with patch.object(hass.data[DATA_INSTANCE], "engine"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
await hass.async_stop()
run_info = await hass.async_add_executor_job(run_information_with_session, session)
assert run_info.run_id == 1
assert run_info.start is not None
assert run_info.end is not None
async def test_state_gets_saved_when_set_before_start_event(
hass: HomeAssistant, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test we can record an event when starting with not running."""
hass.state = CoreState.not_running
await async_init_recorder_component(hass)
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.states.async_set(entity_id, state, attributes)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await async_wait_recording_done_without_instance(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
async def test_saving_state(
hass: HomeAssistant, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test saving and restoring a state."""
instance = await async_setup_recorder_instance(hass)
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.states.async_set(entity_id, state, attributes)
await async_wait_recording_done(hass, instance)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
state = db_states[0].to_native()
assert state == _state_empty_context(hass, entity_id)
async def test_saving_many_states(
hass: HomeAssistant, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test we expire after many commits."""
instance = await async_setup_recorder_instance(hass)
entity_id = "test.recorder"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
with patch.object(
hass.data[DATA_INSTANCE].event_session, "expire_all"
) as expire_all, patch.object(recorder, "EXPIRE_AFTER_COMMITS", 2):
for _ in range(3):
hass.states.async_set(entity_id, "on", attributes)
await async_wait_recording_done(hass, instance)
hass.states.async_set(entity_id, "off", attributes)
await async_wait_recording_done(hass, instance)
assert expire_all.called
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 6
assert db_states[0].event_id > 0
async def test_saving_state_with_intermixed_time_changes(
hass: HomeAssistant, async_setup_recorder_instance: SetupRecorderInstanceT
):
"""Test saving states with intermixed time changes."""
instance = await async_setup_recorder_instance(hass)
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
attributes2 = {"test_attr": 10, "test_attr_10": "mean"}
for _ in range(KEEPALIVE_TIME + 1):
async_fire_time_changed(hass, dt_util.utcnow())
hass.states.async_set(entity_id, state, attributes)
for _ in range(KEEPALIVE_TIME + 1):
async_fire_time_changed(hass, dt_util.utcnow())
hass.states.async_set(entity_id, state, attributes2)
await async_wait_recording_done(hass, instance)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 2
assert db_states[0].event_id > 0
def test_saving_state_with_exception(hass, hass_recorder, caplog):
"""Test saving and restoring a state."""
hass = hass_recorder()
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
def _throw_if_state_in_session(*args, **kwargs):
for obj in hass.data[DATA_INSTANCE].event_session:
if isinstance(obj, States):
raise OperationalError(
"insert the state", "fake params", "forced to fail"
)
with patch("time.sleep"), patch.object(
hass.data[DATA_INSTANCE].event_session,
"flush",
side_effect=_throw_if_state_in_session,
):
hass.states.set(entity_id, "fail", attributes)
wait_recording_done(hass)
assert "Error executing query" in caplog.text
assert "Error saving events" not in caplog.text
caplog.clear()
hass.states.set(entity_id, state, attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) >= 1
assert "Error executing query" not in caplog.text
assert "Error saving events" not in caplog.text
def test_saving_state_with_sqlalchemy_exception(hass, hass_recorder, caplog):
"""Test saving state when there is an SQLAlchemyError."""
hass = hass_recorder()
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
def _throw_if_state_in_session(*args, **kwargs):
for obj in hass.data[DATA_INSTANCE].event_session:
if isinstance(obj, States):
raise SQLAlchemyError(
"insert the state", "fake params", "forced to fail"
)
with patch("time.sleep"), patch.object(
hass.data[DATA_INSTANCE].event_session,
"flush",
side_effect=_throw_if_state_in_session,
):
hass.states.set(entity_id, "fail", attributes)
wait_recording_done(hass)
assert "SQLAlchemyError error processing task" in caplog.text
caplog.clear()
hass.states.set(entity_id, state, attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) >= 1
assert "Error executing query" not in caplog.text
assert "Error saving events" not in caplog.text
assert "SQLAlchemyError error processing task" not in caplog.text
async def test_force_shutdown_with_queue_of_writes_that_generate_exceptions(
hass, async_setup_recorder_instance, caplog
):
"""Test forcing shutdown."""
instance = await async_setup_recorder_instance(hass)
entity_id = "test.recorder"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
await async_wait_recording_done(hass, instance)
with patch.object(instance, "db_retry_wait", 0.2), patch.object(
instance.event_session,
"flush",
side_effect=OperationalError(
"insert the state", "fake params", "forced to fail"
),
):
for _ in range(100):
hass.states.async_set(entity_id, "on", attributes)
hass.states.async_set(entity_id, "off", attributes)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
hass.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
await hass.async_block_till_done()
assert "Error executing query" in caplog.text
assert "Error saving events" not in caplog.text
def test_saving_event(hass, hass_recorder):
"""Test saving and restoring an event."""
hass = hass_recorder()
event_type = "EVENT_TEST"
event_data = {"test_attr": 5, "test_attr_10": "nice"}
events = []
@callback
def event_listener(event):
"""Record events from eventbus."""
if event.event_type == event_type:
events.append(event)
hass.bus.listen(MATCH_ALL, event_listener)
hass.bus.fire(event_type, event_data)
wait_recording_done(hass)
assert len(events) == 1
event = events[0]
hass.data[DATA_INSTANCE].block_till_done()
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
db_event = db_events[0].to_native()
assert event.event_type == db_event.event_type
assert event.data == db_event.data
assert event.origin == db_event.origin
# Recorder uses SQLite and stores datetimes as integer unix timestamps
assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace(
microsecond=0
)
def test_saving_state_with_commit_interval_zero(hass_recorder):
"""Test saving a state with a commit interval of zero."""
hass = hass_recorder({"commit_interval": 0})
assert hass.data[DATA_INSTANCE].commit_interval == 0
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.states.set(entity_id, state, attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
def _add_entities(hass, entity_ids):
"""Add entities."""
attributes = {"test_attr": 5, "test_attr_10": "nice"}
for idx, entity_id in enumerate(entity_ids):
hass.states.set(entity_id, f"state{idx}", attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [st.to_native() for st in session.query(States)]
def _add_events(hass, events):
with session_scope(hass=hass) as session:
session.query(Events).delete(synchronize_session=False)
for event_type in events:
hass.bus.fire(event_type)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [ev.to_native() for ev in session.query(Events)]
def _state_empty_context(hass, entity_id):
# We don't restore context unless we need it by joining the
# events table on the event_id for state_changed events
state = hass.states.get(entity_id)
state.context = Context(id=None)
return state
# pylint: disable=redefined-outer-name,invalid-name
def test_saving_state_include_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"domains": "test2"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_include_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"domains": "test2", "entity_globs": "*.included_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test3.included_entity"]
)
assert len(states) == 2
assert _state_empty_context(hass, "test2.recorder") == states[0]
assert _state_empty_context(hass, "test3.included_entity") == states[1]
def test_saving_state_incl_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"entities": "test2.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_event_exclude_event_type(hass_recorder):
"""Test saving and restoring an event."""
hass = hass_recorder(
{
"exclude": {
"event_types": [
"service_registered",
"homeassistant_start",
"component_loaded",
"core_config_updated",
"homeassistant_started",
"test",
]
}
}
)
events = _add_events(hass, ["test", "test2"])
assert len(events) == 1
assert events[0].event_type == "test2"
def test_saving_state_exclude_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"domains": "test"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"domains": "test", "entity_globs": "*.excluded_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test2.excluded_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"entities": "test.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domain_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"entities": "test.recorder"}, "exclude": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 2
def test_saving_state_exclude_domain_glob_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"include": {"entities": ["test.recorder", "test.excluded_entity"]},
"exclude": {"domains": "test", "entity_globs": "*._excluded_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.excluded_entity"]
)
assert len(states) == 3
def test_saving_state_include_domain_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"entities": "test.recorder"}, "include": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder", "test.ok"])
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_include_domain_glob_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"exclude": {"entities": ["test.recorder", "test2.included_entity"]},
"include": {"domains": "test", "entity_globs": "*._included_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.ok", "test2.included_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_and_removing_entity(hass, hass_recorder):
"""Test saving the state of a removed entity."""
hass = hass_recorder()
entity_id = "lock.mine"
hass.states.set(entity_id, STATE_LOCKED)
hass.states.set(entity_id, STATE_UNLOCKED)
hass.states.remove(entity_id)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 3
assert states[0].entity_id == entity_id
assert states[0].state == STATE_LOCKED
assert states[1].entity_id == entity_id
assert states[1].state == STATE_UNLOCKED
assert states[2].entity_id == entity_id
assert states[2].state is None
def test_recorder_setup_failure(hass):
"""Test some exceptions."""
with patch.object(Recorder, "_setup_connection") as setup, patch(
"homeassistant.components.recorder.time.sleep"
):
setup.side_effect = ImportError("driver not found")
rec = _default_recorder(hass)
rec.async_initialize()
rec.start()
rec.join()
hass.stop()
def test_recorder_setup_failure_without_event_listener(hass):
"""Test recorder setup failure when the event listener is not setup."""
with patch.object(Recorder, "_setup_connection") as setup, patch(
"homeassistant.components.recorder.time.sleep"
):
setup.side_effect = ImportError("driver not found")
rec = _default_recorder(hass)
rec.start()
rec.join()
hass.stop()
async def test_defaults_set(hass):
"""Test the config defaults are set."""
recorder_config = None
async def mock_setup(hass, config):
"""Mock setup."""
nonlocal recorder_config
recorder_config = config["recorder"]
return True
with patch("homeassistant.components.recorder.async_setup", side_effect=mock_setup):
assert await async_setup_component(hass, "history", {})
assert recorder_config is not None
# pylint: disable=unsubscriptable-object
assert recorder_config["auto_purge"]
assert recorder_config["purge_keep_days"] == 10
def run_tasks_at_time(hass, test_time):
"""Advance the clock and wait for any callbacks to finish."""
fire_time_changed(hass, test_time)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
def test_auto_purge(hass_recorder):
"""Test periodic purge scheduling."""
hass = hass_recorder()
original_tz = dt_util.DEFAULT_TIME_ZONE
tz = dt_util.get_time_zone("Europe/Copenhagen")
dt_util.set_default_time_zone(tz)
# Purging is scheduled to happen at 4:12am every day. Exercise this behavior by
# firing time changed events and advancing the clock around this time. Pick an
# arbitrary year in the future to avoid boundary conditions relative to the current
# date.
#
# The clock is started at 4:15am then advanced forward below
now = dt_util.utcnow()
test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz)
run_tasks_at_time(hass, test_time)
with patch(
"homeassistant.components.recorder.purge.purge_old_data", return_value=True
) as purge_old_data, patch(
"homeassistant.components.recorder.perodic_db_cleanups"
) as perodic_db_cleanups:
# Advance one day, and the purge task should run
test_time = test_time + timedelta(days=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
assert len(perodic_db_cleanups.mock_calls) == 1
purge_old_data.reset_mock()
perodic_db_cleanups.reset_mock()
# Advance one day, and the purge task should run again
test_time = test_time + timedelta(days=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
assert len(perodic_db_cleanups.mock_calls) == 1
purge_old_data.reset_mock()
perodic_db_cleanups.reset_mock()
# Advance less than one full day. The alarm should not yet fire.
test_time = test_time + timedelta(hours=23)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 0
assert len(perodic_db_cleanups.mock_calls) == 0
# Advance to the next day and fire the alarm again
test_time = test_time + timedelta(hours=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
assert len(perodic_db_cleanups.mock_calls) == 1
dt_util.set_default_time_zone(original_tz)
def test_auto_purge_disabled(hass_recorder):
"""Test periodic db cleanup still run when auto purge is disabled."""
hass = hass_recorder({CONF_AUTO_PURGE: False})
original_tz = dt_util.DEFAULT_TIME_ZONE
tz = dt_util.get_time_zone("Europe/Copenhagen")
dt_util.set_default_time_zone(tz)
# Purging is scheduled to happen at 4:12am every day. We want
# to verify that when auto purge is disabled perodic db cleanups
# are still scheduled
#
# The clock is started at 4:15am then advanced forward below
now = dt_util.utcnow()
test_time = datetime(now.year + 2, 1, 1, 4, 15, 0, tzinfo=tz)
run_tasks_at_time(hass, test_time)
with patch(
"homeassistant.components.recorder.purge.purge_old_data", return_value=True
) as purge_old_data, patch(
"homeassistant.components.recorder.perodic_db_cleanups"
) as perodic_db_cleanups:
# Advance one day, and the purge task should run
test_time = test_time + timedelta(days=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 0
assert len(perodic_db_cleanups.mock_calls) == 1
purge_old_data.reset_mock()
perodic_db_cleanups.reset_mock()
dt_util.set_default_time_zone(original_tz)
@pytest.mark.parametrize("enable_statistics", [True])
def test_auto_statistics(hass_recorder):
"""Test periodic statistics scheduling."""
hass = hass_recorder()
original_tz = dt_util.DEFAULT_TIME_ZONE
tz = dt_util.get_time_zone("Europe/Copenhagen")
dt_util.set_default_time_zone(tz)
# Statistics is scheduled to happen every 5 minutes. Exercise this behavior by
# firing time changed events and advancing the clock around this time. Pick an
# arbitrary year in the future to avoid boundary conditions relative to the current
# date.
#
# The clock is started at 4:16am then advanced forward below
now = dt_util.utcnow()
test_time = datetime(now.year + 2, 1, 1, 4, 16, 0, tzinfo=tz)
run_tasks_at_time(hass, test_time)
with patch(
"homeassistant.components.recorder.statistics.compile_statistics",
return_value=True,
) as compile_statistics:
# Advance 5 minutes, and the statistics task should run
test_time = test_time + timedelta(minutes=5)
run_tasks_at_time(hass, test_time)
assert len(compile_statistics.mock_calls) == 1
compile_statistics.reset_mock()
# Advance 5 minutes, and the statistics task should run again
test_time = test_time + timedelta(minutes=5)
run_tasks_at_time(hass, test_time)
assert len(compile_statistics.mock_calls) == 1
compile_statistics.reset_mock()
# Advance less than 5 minutes. The task should not run.
test_time = test_time + timedelta(minutes=3)
run_tasks_at_time(hass, test_time)
assert len(compile_statistics.mock_calls) == 0
# Advance 5 minutes, and the statistics task should run again
test_time = test_time + timedelta(minutes=5)
run_tasks_at_time(hass, test_time)
assert len(compile_statistics.mock_calls) == 1
dt_util.set_default_time_zone(original_tz)
def test_statistics_runs_initiated(hass_recorder):
"""Test statistics_runs is initiated when DB is created."""
now = dt_util.utcnow()
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=now):
hass = hass_recorder()
wait_recording_done(hass)
with session_scope(hass=hass) as session:
statistics_runs = list(session.query(StatisticsRuns))
assert len(statistics_runs) == 1
last_run = process_timestamp(statistics_runs[0].start)
assert process_timestamp(last_run) == now.replace(
minute=now.minute - now.minute % 5, second=0, microsecond=0
) - timedelta(minutes=5)
def test_compile_missing_statistics(tmpdir):
"""Test missing statistics are compiled on startup."""
now = dt_util.utcnow().replace(minute=0, second=0, microsecond=0)
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=now):
hass = get_test_home_assistant()
setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
statistics_runs = list(session.query(StatisticsRuns))
assert len(statistics_runs) == 1
last_run = process_timestamp(statistics_runs[0].start)
assert last_run == now - timedelta(minutes=5)
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
with patch(
"homeassistant.components.recorder.dt_util.utcnow",
return_value=now + timedelta(hours=1),
):
hass = get_test_home_assistant()
setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
hass.start()
wait_recording_done(hass)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
statistics_runs = list(session.query(StatisticsRuns))
assert len(statistics_runs) == 13 # 12 5-minute runs
last_run = process_timestamp(statistics_runs[1].start)
assert last_run == now
wait_recording_done(hass)
wait_recording_done(hass)
hass.stop()
def test_saving_sets_old_state(hass_recorder):
"""Test saving sets old state."""
hass = hass_recorder()
hass.states.set("test.one", "on", {})
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.one", "off", {})
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 4
assert states[0].entity_id == "test.one"
assert states[1].entity_id == "test.two"
assert states[2].entity_id == "test.one"
assert states[3].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id is None
assert states[2].old_state_id == states[0].state_id
assert states[3].old_state_id == states[1].state_id
def test_saving_state_with_serializable_data(hass_recorder, caplog):
"""Test saving data that cannot be serialized does not crash."""
hass = hass_recorder()
hass.bus.fire("bad_event", {"fail": CannotSerializeMe()})
hass.states.set("test.one", "on", {"fail": CannotSerializeMe()})
wait_recording_done(hass)
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 2
assert states[0].entity_id == "test.two"
assert states[1].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id == states[0].state_id
assert "State is not JSON serializable" in caplog.text
def test_run_information(hass_recorder):
"""Ensure run_information returns expected data."""
before_start_recording = dt_util.utcnow()
hass = hass_recorder()
run_info = run_information_from_instance(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
with session_scope(hass=hass) as session:
run_info = run_information_with_session(session)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass, before_start_recording)
assert run_info is None
run_info = run_information(hass, dt_util.utcnow())
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
def test_has_services(hass_recorder):
"""Test the services exist."""
hass = hass_recorder()
assert hass.services.has_service(DOMAIN, SERVICE_DISABLE)
assert hass.services.has_service(DOMAIN, SERVICE_ENABLE)
assert hass.services.has_service(DOMAIN, SERVICE_PURGE)
assert hass.services.has_service(DOMAIN, SERVICE_PURGE_ENTITIES)
def test_service_disable_events_not_recording(hass, hass_recorder):
"""Test that events are not recorded when recorder is disabled using service."""
hass = hass_recorder()
assert hass.services.call(
DOMAIN,
SERVICE_DISABLE,
{},
blocking=True,
)
event_type = "EVENT_TEST"
events = []
@callback
def event_listener(event):
"""Record events from eventbus."""
if event.event_type == event_type:
events.append(event)
hass.bus.listen(MATCH_ALL, event_listener)
event_data1 = {"test_attr": 5, "test_attr_10": "nice"}
hass.bus.fire(event_type, event_data1)
wait_recording_done(hass)
assert len(events) == 1
event = events[0]
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 0
assert hass.services.call(
DOMAIN,
SERVICE_ENABLE,
{},
blocking=True,
)
event_data2 = {"attr_one": 5, "attr_two": "nice"}
hass.bus.fire(event_type, event_data2)
wait_recording_done(hass)
assert len(events) == 2
assert events[0] != events[1]
assert events[0].data != events[1].data
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
db_event = db_events[0].to_native()
event = events[1]
assert event.event_type == db_event.event_type
assert event.data == db_event.data
assert event.origin == db_event.origin
assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace(
microsecond=0
)
def test_service_disable_states_not_recording(hass, hass_recorder):
"""Test that state changes are not recorded when recorder is disabled using service."""
hass = hass_recorder()
assert hass.services.call(
DOMAIN,
SERVICE_DISABLE,
{},
blocking=True,
)
hass.states.set("test.one", "on", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
assert len(list(session.query(States))) == 0
assert hass.services.call(
DOMAIN,
SERVICE_ENABLE,
{},
blocking=True,
)
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
assert db_states[0].to_native() == _state_empty_context(hass, "test.two")
def test_service_disable_run_information_recorded(tmpdir):
"""Test that runs are still recorded when recorder is disabled."""
test_db_file = tmpdir.mkdir("sqlite").join("test_run_info.db")
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
hass = get_test_home_assistant()
setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
hass.start()
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_run_info = list(session.query(RecorderRuns))
assert len(db_run_info) == 1
assert db_run_info[0].start is not None
assert db_run_info[0].end is None
assert hass.services.call(
DOMAIN,
SERVICE_DISABLE,
{},
blocking=True,
)
wait_recording_done(hass)
hass.stop()
hass = get_test_home_assistant()
setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
hass.start()
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_run_info = list(session.query(RecorderRuns))
assert len(db_run_info) == 2
assert db_run_info[0].start is not None
assert db_run_info[0].end is not None
assert db_run_info[1].start is not None
assert db_run_info[1].end is None
hass.stop()
class CannotSerializeMe:
"""A class that the JSONEncoder cannot serialize."""
async def test_database_corruption_while_running(hass, tmpdir, caplog):
"""Test we can recover from sqlite3 db corruption."""
def _create_tmpdir_for_test_db():
return tmpdir.mkdir("sqlite").join("test.db")
test_db_file = await hass.async_add_executor_job(_create_tmpdir_for_test_db)
dburl = f"{SQLITE_URL_PREFIX}//{test_db_file}"
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_DB_URL: dburl}})
await hass.async_block_till_done()
caplog.clear()
hass.states.async_set("test.lost", "on", {})
sqlite3_exception = DatabaseError("statement", {}, [])
sqlite3_exception.__cause__ = sqlite3.DatabaseError()
with patch.object(
hass.data[DATA_INSTANCE].event_session,
"close",
side_effect=OperationalError("statement", {}, []),
):
await async_wait_recording_done_without_instance(hass)
await hass.async_add_executor_job(corrupt_db_file, test_db_file)
await async_wait_recording_done_without_instance(hass)
with patch.object(
hass.data[DATA_INSTANCE].event_session,
"commit",
side_effect=[sqlite3_exception, None],
):
# This state will not be recorded because
# the database corruption will be discovered
# and we will have to rollback to recover
hass.states.async_set("test.one", "off", {})
await async_wait_recording_done_without_instance(hass)
assert "Unrecoverable sqlite3 database corruption detected" in caplog.text
assert "The system will rename the corrupt database file" in caplog.text
assert "Connected to recorder database" in caplog.text
# This state should go into the new database
hass.states.async_set("test.two", "on", {})
await async_wait_recording_done_without_instance(hass)
def _get_last_state():
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
return db_states[0].to_native()
state = await hass.async_add_executor_job(_get_last_state)
assert state.entity_id == "test.two"
assert state.state == "on"
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
hass.stop()
def test_entity_id_filter(hass_recorder):
"""Test that entity ID filtering filters string and list."""
hass = hass_recorder(
{"include": {"domains": "hello"}, "exclude": {"domains": "hidden_domain"}}
)
for idx, data in enumerate(
(
{},
{"entity_id": "hello.world"},
{"entity_id": ["hello.world"]},
{"entity_id": ["hello.world", "hidden_domain.person"]},
{"entity_id": {"unexpected": "data"}},
)
):
hass.bus.fire("hello", data)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type="hello"))
assert len(db_events) == idx + 1, data
for data in (
{"entity_id": "hidden_domain.person"},
{"entity_id": ["hidden_domain.person"]},
):
hass.bus.fire("hello", data)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type="hello"))
# Keep referring idx + 1, as no new events are being added
assert len(db_events) == idx + 1, data
async def test_database_lock_and_unlock(hass: HomeAssistant, tmp_path):
"""Test writing events during lock getting written after unlocking."""
# Use file DB, in memory DB cannot do write locks.
config = {recorder.CONF_DB_URL: "sqlite:///" + str(tmp_path / "pytest.db")}
await async_init_recorder_component(hass, config)
await hass.async_block_till_done()
instance: Recorder = hass.data[DATA_INSTANCE]
assert await instance.lock_database()
assert not await instance.lock_database()
event_type = "EVENT_TEST"
event_data = {"test_attr": 5, "test_attr_10": "nice"}
hass.bus.fire(event_type, event_data)
task = asyncio.create_task(async_wait_recording_done(hass, instance))
# Recording can't be finished while lock is held
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(asyncio.shield(task), timeout=1)
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 0
assert instance.unlock_database()
await task
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
async def test_database_lock_and_overflow(hass: HomeAssistant, tmp_path):
"""Test writing events during lock leading to overflow the queue causes the database to unlock."""
# Use file DB, in memory DB cannot do write locks.
config = {recorder.CONF_DB_URL: "sqlite:///" + str(tmp_path / "pytest.db")}
await async_init_recorder_component(hass, config)
await hass.async_block_till_done()
instance: Recorder = hass.data[DATA_INSTANCE]
with patch.object(recorder, "MAX_QUEUE_BACKLOG", 1), patch.object(
recorder, "DB_LOCK_QUEUE_CHECK_TIMEOUT", 0.1
):
await instance.lock_database()
event_type = "EVENT_TEST"
event_data = {"test_attr": 5, "test_attr_10": "nice"}
hass.bus.fire(event_type, event_data)
# Check that this causes the queue to overflow and write succeeds
# even before unlocking.
await async_wait_recording_done(hass, instance)
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
assert not instance.unlock_database()
async def test_database_lock_timeout(hass):
"""Test locking database timeout when recorder stopped."""
await async_init_recorder_component(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
instance: Recorder = hass.data[DATA_INSTANCE]
with patch.object(recorder, "DB_LOCK_TIMEOUT", 0.1):
try:
with pytest.raises(TimeoutError):
await instance.lock_database()
finally:
instance.unlock_database()
|
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import re
import os
import shutil
import gdal
import untangle
import traceback
import copy
from treebeard.mp_tree import MP_Node
from django.core.urlresolvers import reverse
from django.core.validators import MaxValueValidator
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
# from django.db import models
from django.contrib.gis.db import models
from django.conf import settings
from geocamUtil.models.UuidField import UuidField
from geocamUtil.models.managers import ModelCollectionManager
from geocamUtil.modelJson import modelToJson, modelsToJson, modelToDict, dictToJson
from geocamUtil.models.ExtrasDotField import ExtrasDotField
from xgds_core.couchDbStorage import CouchDbStorage
from xgds_core.util import insertIntoPath
# pylint: disable=C1001
LOGO_REGEXES = None
class AbstractNode(models.Model):
"""
Abstract Map Node base class
"""
name = models.CharField('name', max_length=128, db_index=True)
description = models.CharField('description', max_length=1024, blank=True)
creator = models.CharField('creator', max_length=200) # TODO really??? THIS SHOULD BE A FK TO A USER
creation_time = models.DateTimeField(null=True, blank=True, db_index=True)
minLat = models.FloatField(blank=True, null=True, db_index=True)
minLon = models.FloatField(blank=True, null=True, db_index=True)
maxLat = models.FloatField(blank=True, null=True, db_index=True)
maxLon = models.FloatField(blank=True, null=True, db_index=True)
region = models.ForeignKey('geocamUtil.SiteFrame', null=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ['name']
class Place(AbstractNode, MP_Node):
"""
Represents a Place with name, used for geocoding
https://developers.arcgis.com/rest/geocode/api-reference/geocoding-category-filtering.htm
Extending treebeard MP_Node provides the hierarchy
"""
verified = models.BooleanField(default=False) # true if a user has verified this place, ie by editing it manually
extras = ExtrasDotField(default='') # a dictionary of name/value pairs to support flexible extra metadata
geometry = ExtrasDotField(default='') # a dictionary of name/value pairs to support geometry
node_order_by = ['name'] # for treebeard ordering
class AbstractMapNode(AbstractNode):
"""
Abstract Map Node for an entry in the map tree, which can have a parent.
"""
modifier = models.CharField('modifier', max_length=200, null=True, blank=True)
modification_time = models.DateTimeField(null=True, blank=True, db_index=True)
deleted = models.BooleanField(blank=True, default=False)
uuid = UuidField(primary_key=True)
@property
def parent(self):
""" child classes must define parent"""
return None
def getEditHref(self):
""" child classes must define edit href
"""
return None
def getStart(self):
""" If this is a map layer with time, return the start time """
return None
def getEnd(self):
""" If this is a map layer with time, return the end time """
return None
def getInterval(self):
""" If this is a map layer with time, return the interval in decimal seconds """
return None
def getTimeUrl(self, theTime):
""" If this is a map layer with time, return the url to get the data for that time """
return None
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = {"title": self.name,
"key": self.uuid,
"tooltip": self.description,
"data": {"type": self.__class__.__name__,
"parentId": None,
"href": self.getEditHref()}
}
if self.parent:
result['data']['parentId'] = self.parent.uuid
# this slowed the tree down ... for now just put in overlaytime
# if self.getStart():
# result['data']['start'] = self.getStart()
# if self.getEnd():
# result['data']['end'] = self.getEnd()
# if self.getInterval():
# result['data']['interval'] = self.getInterval()
if self.getKmlUrl():
result['data']['kmlFile'] = self.getKmlUrl()
return result
def getKmlUrl(self):
""" If this element has an url which returns kml, override this function to return that url. """
return None
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ['name']
class MapGroup(AbstractMapNode):
"""
A Map Group, or folder in the map tree.
"""
parent = models.ForeignKey('self', db_column='parentId',
null=True, blank=True,
verbose_name='parent group')
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(MapGroup, self).get_tree_json()
result["folder"] = True
return result
def getEditHref(self):
return reverse('folderDetail', kwargs={'groupID': self.uuid})
class AbstractMap(AbstractMapNode):
"""
Abstract Map for an entry in a MapGroup (which is not a group, but something we can render)
"""
locked = models.BooleanField(blank=True, default=False)
visible = models.BooleanField(blank=False, default=False)
transparency = models.PositiveSmallIntegerField(default=0, validators=[MaxValueValidator(100)], help_text="100=transparent") #100=fully transparent, 0=fully opaque
parent = models.ForeignKey(MapGroup, db_column='parentId',
null=True, blank=True,
verbose_name='group')
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(AbstractMap, self).get_tree_json()
result["data"]["transparency"] = self.transparency
result["selected"] = self.visible
return result
def getUrl(self):
""" subclass must implement """
pass
def getGoogleEarthUrl(self):
# make sure the subclass provides this, or overrides
return insertIntoPath(self.getUrl(), 'rest')
class Meta:
abstract = True
couchStore = CouchDbStorage()
class KmlMap(AbstractMap):
"""
A reference to an external or local KML file. Note we can't render all KML features in all libraries
"""
kmlFile = models.CharField('KML File', max_length=200) # actual name of the kml file
localFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_MEDIA_SUBDIR, max_length=256,
null=True, blank=True, storage=couchStore)
openable = models.BooleanField(default=True)
hasNetworkLink = models.BooleanField(default=False) # if something has a network link, right now do not include it for openlayers
def getEditHref(self):
return reverse('mapDetail', kwargs={'mapID': self.uuid})
@property
def isLogo(self):
global LOGO_REGEXES
if LOGO_REGEXES is None:
LOGO_REGEXES = [re.compile(pattern)
for pattern in settings.XGDS_MAP_SERVER_LOGO_PATTERNS]
return any([r.search(self.name)
for r in LOGO_REGEXES])
def getGoogleEarthUrl(self, request):
if self.localFile:
restString = insertIntoPath(self.localFile.url, 'rest')
return request.build_absolute_uri(restString)
elif self.kmlFile:
if (self.kmlFile.startswith('/')):
# starts with a slash, is a feed from our server.
# inject the word rest after the first slash
restString = insertIntoPath(self.kmlFile, 'rest')
return request.build_absolute_uri(restString)
else:
# not sure what these files are
return request.build_absolute_uri(self.kmlFile.url)
return ''
def getUrl(self):
try:
if self.kmlFile and not self.localFile:
if (self.kmlFile.startswith('/')):
return self.kmlFile
else:
return settings.DATA_URL + settings.XGDS_MAP_SERVER_DATA_SUBDIR + self.kmlFile
elif self.localFile:
return self.localFile.url
except:
print 'problem getting url from local file'
traceback.print_exc()
return ''
def getKmlUrl(self):
""" If this element has an url which returns kml, override this function to return that url. """
return self.getUrl()
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
if self.hasNetworkLink:
return None
result = super(KmlMap, self).get_tree_json()
result["data"]["openable"] = self.openable
# result["data"]["transparency"] = self.transparency
if self.localFile:
try:
result["data"]["localFile"] = self.localFile.url
except:
print 'problem getting url from localfile'
traceback.print_exc()
return result
class AbstractWMS(AbstractMap):
local = models.BooleanField(default=False) # locally hosted on geoserver
sourceFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_GEOTIFF_SUBDIR, max_length=256,
null=True, blank=True)
projectionName = models.CharField(null=True, max_length=32, blank=True)
wmsUrl = models.CharField(null=True, max_length=512)
layers = models.CharField(null=True, max_length=64, db_index=True)
# geoserver specific stuff
workspaceName = models.CharField(null=True, max_length=32, blank=True)
storeName = models.CharField(null=True, max_length=32, blank=True)
format = models.CharField(max_length=16, default="image/png") # the format on geoserver
tileWidth = models.IntegerField(default=256)
tileHeight = models.IntegerField(default=256)
minLevel = models.IntegerField(default=0)
maxLevel = models.IntegerField(null=True, blank=True)
wmsVersion = models.CharField(default='1.1.1', max_length=16) # wms version, 1.1.1, or could be 1.3.0
srs = models.CharField(null=True, blank=True, max_length=32) # srs or crs if we are wms version 1.3.0 or higher
hasTime = models.BooleanField(default=False) #whether or not this supports time ie lunaserv time wms
start = models.DateTimeField(null=True, blank=True, db_index=True)
end = models.DateTimeField(null=True, blank=True, db_index=True)
interval = models.FloatField(null=True, blank=True)
def getUrl(self):
return self.wmsUrl
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(AbstractWMS, self).get_tree_json()
result["data"]["tileURL"] = self.getUrl()
result["data"]["format"] = self.format
result["data"]["layers"] = self.layers
result["data"]["tileWidth"] = self.tileWidth
result["data"]["tileHeight"] = self.tileHeight
result["data"]["wmsVersion"] = self.wmsVersion
result["data"]["srs"] = self.srs
result["data"]["hasTime"] = self.hasTime
if self.hasTime:
result["data"]["start"] = self.start
result["data"]["end"] = self.end
result["data"]["interval"] = self.interval
if self.minLevel > 0:
result["data"]["minLevel"] = self.minLevel
if self.maxLevel:
result["data"]["maxLevel"] = self.maxLevel
if self.projectionName:
result["data"]["projectionName"] = self.projectionName
if self.minLat:
result['data']['miny'] = self.minLat
result['data']['minx'] = self.minLon
result['data']['maxy'] = self.maxLat
result['data']['maxx'] = self.maxLon
return result
class Meta:
abstract = True
class AbstractGeotiff(AbstractMap):
sourceFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_GEOTIFF_SUBDIR, max_length=256, null=True, blank=True)
projectionName = models.CharField(null=True, max_length=32, default="EPSG:3857")
wmsUrl = models.CharField(null=True, max_length=512, blank=True)
layers = models.CharField(null=True, max_length=64, db_index=True, blank=True)
format = models.CharField(max_length=16, default="image/png") # the format on geoserver
tileWidth = models.IntegerField(default=256)
tileHeight = models.IntegerField(default=256)
minLevel = models.IntegerField(default=0)
maxLevel = models.IntegerField(null=True, blank=True)
wmsVersion = models.CharField(default='1.1.1', max_length=16) # wms version, 1.1.1, or could be 1.3.0
srs = models.CharField(null=True, blank=True, max_length=32) # srs or crs if we are wms version 1.3.0 or higher
colorized = models.BooleanField(default=False)
minimumValue = models.FloatField(null=True, blank=True)
maximumValue = models.FloatField(null=True, blank=True)
colorPalette = models.CharField(null=True, max_length=64, default="red_to_blue", blank=True)
minimumColor = models.CharField(null=True, max_length=64, blank=True)
maximumColor = models.CharField(null=True, max_length=64, blank=True)
def getUrl(self):
return self.wmsUrl
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(AbstractGeotiff, self).get_tree_json()
result["data"]["tileURL"] = self.getUrl()
result["data"]["format"] = self.format
result["data"]["layers"] = self.layers
result["data"]["tileWidth"] = self.tileWidth
result["data"]["tileHeight"] = self.tileHeight
result["data"]["wmsVersion"] = self.wmsVersion
result["data"]["srs"] = self.srs
result["data"]["hasTime"] = False
result["data"]["styles"] = self.colorPalette
result["data"]["href"] = reverse('mapEditGeotiff', kwargs={'geotiffID': self.uuid})
if self.minLevel > 0:
result["data"]["minLevel"] = self.minLevel
if self.maxLevel:
result["data"]["maxLevel"] = self.maxLevel
if self.projectionName:
result["data"]["projectionName"] = self.projectionName
if self.minLat:
result['data']['miny'] = self.minLat
result['data']['minx'] = self.minLon
result['data']['maxy'] = self.maxLat
result['data']['maxx'] = self.maxLon
return result
class Meta:
abstract = True
class Geotiff(AbstractGeotiff):
class Meta:
abstract = False
class WMSTile(AbstractWMS):
class Meta:
abstract = False
class WMTSTile(AbstractWMS):
urlPattern = models.CharField(max_length=256, null=True, blank=True) # file naming pattern that takes the time and converts it to the file we want, use in strftime
style = models.CharField(null=True, blank=True, max_length=128)
tileMatrixSetID = models.CharField(null=True, blank=True, max_length=128)
tileMatrixLabels = models.CharField(null=True, blank=True, max_length=256)
subdomains = models.CharField(null=True, blank=True, max_length=256)
start = models.DateTimeField(null=True, blank=True, db_index=True)
end = models.DateTimeField(null=True, blank=True, db_index=True)
interval = models.FloatField(null=True, blank=True)
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(WMTSTile, self).get_tree_json()
#result["data"]["urlPattern"] = self.urlPattern
result["data"]["style"] = self.style
result["data"]["start"] = self.start
result["data"]["end"] = self.end
result["data"]["interval"] = self.interval
result["data"]["tileMatrixSetID"] = self.tileMatrixSetID
result["data"]["tileMatrixLabels"] = self.tileMatrixLabels
result["data"]["subdomains"] = self.subdomains
return result
class Meta:
abstract = False
class AbstractMapTile(AbstractMap):
"""
A reference to a tiled geoTiff.
"""
sourceFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_GEOTIFF_SUBDIR, max_length=256,
null=True, blank=True)
processed = models.BooleanField(default=False)
minx = models.FloatField(null=True)
miny = models.FloatField(null=True)
maxx = models.FloatField(null=True)
maxy = models.FloatField(null=True)
resolutions = models.CharField(null=True, max_length=256)
projectionName = models.CharField(null=True, max_length=32) # If this is not a normal WGS84 projection ... handle it in your front end with this name
def initResolutions(self):
if not self.resolutions:
try:
result = ''
filepath = os.path.join(settings.DATA_ROOT, settings.XGDS_MAP_SERVER_GEOTIFF_SUBDIR, self.name.replace(' ', '_'), 'tilemapresource.xml')
tilemapresource = untangle.parse(str(filepath))
for t in tilemapresource.TileMap.TileSets.children:
result += str(int(float(t['units-per-pixel']))) + ' '
result = result.strip()
if result:
self.resolutions = result
self.save()
except:
traceback.print_exc()
pass
@property
def intResolutions(self):
self.initResolutions()
if self.resolutions:
return [int(n) for n in self.resolutions.split()]
def initBounds(self):
if not self.minx:
try:
bounds = self.getBounds()
self.minx = bounds[0][0]
self.miny = bounds[0][1]
self.maxx = bounds[1][0]
self.maxy = bounds[1][1]
self.save()
except:
pass
def getBounds(self):
src = gdal.Open(self.sourceFile.path)
minx, xres, xskew, maxy, yskew, yres = src.GetGeoTransform()
maxx = minx + (src.RasterXSize * xres)
miny = maxy + (src.RasterYSize * yres)
return [[minx, miny], [maxx, maxy]]
@property
def sourceFileLink(self):
if self.sourceFile:
return "<a href='%s'>Download %s (%d MB)</a>" % (self.sourceFile.url, os.path.basename(self.sourceFile.name), self.sourceFile.size / 1000000)
else:
return "No Source File"
def getUrl(self):
return self.getXYZTileSourceUrl()
def getXYZTileSourceUrl(self):
result = os.path.join(self.getTilePath(), '{z}/{x}/{-y}.png')
return result
def getTilePath(self):
result = os.path.join(settings.DATA_URL, settings.XGDS_MAP_SERVER_GEOTIFF_SUBDIR, self.name.replace(' ', '_'))
return result
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(AbstractMapTile, self).get_tree_json()
result["data"]["tileURL"] = self.getUrl()
result["data"]["tilePath"] = self.getTilePath()
if self.minx:
result["data"]["minx"] = self.minx
result["data"]["miny"] = self.miny
result["data"]["maxx"] = self.maxx
result["data"]["maxy"] = self.maxy
if self.resolutions:
result["data"]["resolutions"] = self.intResolutions
if self.projectionName:
result["data"]["projectionName"] = self.projectionName
return result
def rename(self, newName):
oldPath = os.path.join(settings.PROJ_ROOT, self.getTilePath()[1:])
self.name = newName
newPath = os.path.join(settings.PROJ_ROOT, self.getTilePath()[1:])
shutil.move(oldPath, newPath)
class Meta:
abstract = True
class MapTile(AbstractMapTile):
def getEditHref(self):
return reverse('mapEditTile', kwargs={'tileID': self.uuid})
class GroundOverlayTime(AbstractMap):
"""
A reference to a ground overlay (image on a rectangle), with time data
"""
sourcePath = models.CharField(max_length=256) # path to the root of the image files
urlPattern = models.CharField(max_length=256, null=True, blank=True) # file naming pattern that takes the time and converts it to the file we want, use in strftime
start = models.DateTimeField(null=True, blank=True, db_index=True)
end = models.DateTimeField(null=True, blank=True, db_index=True)
interval = models.FloatField(null=True, blank=True)
width = models.IntegerField(db_index=True, default=0)
height = models.IntegerField(db_index=True, default=0)
projectionName = models.CharField(null=True, max_length=32) # If this is not a normal WGS84 projection ... handle it in your front end with this name
def getStart(self):
""" If this is a map layer with time, return the start time """
return self.start
def getEnd(self):
""" If this is a map layer with time, return the end time """
return self.end
def getInterval(self):
""" If this is a map layer with time, return the interval in decimal seconds """
return self.interval
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(GroundOverlayTime, self).get_tree_json()
result['data']['minLat'] = self.minLat
result['data']['minLon'] = self.minLon
result['data']['maxLat'] = self.maxLat
result['data']['maxLon'] = self.maxLon
result['data']['width'] = self.width
result['data']['height'] = self.height
if self.getStart():
result['data']['start'] = self.getStart()
if self.getEnd():
result['data']['end'] = self.getEnd()
if self.getInterval():
result['data']['interval'] = self.getInterval()
result['data']['projectionName'] = self.projectionName
result['data']['timeUrl'] = '/xgds_map_server/overlayTime/' + self.uuid + '/{Time}' # for Cesium
result['data']['imageUrl'] = '/xgds_map_server/overlayTimeImage/' + self.uuid + '/{Time}' # for Cesium
return result
def updateTimeFromInterval(self, inputTime):
""" Right now we support even numbers of hours, minutes or seconds as intervals
We do not support fractional numbers, ie 1.5 hours or 2.5 minutes etc.
"""
t = copy.copy(inputTime)
if self.interval > 3600: # more than one hour
hourInterval = self.interval / 3600
mod = int(t.hour % hourInterval)
t = t.replace(hour=t.hour - mod, minute=0, second=0, microsecond=0)
elif self.interval > 60: # more than one minute
minuteInterval = self.interval / 60
mod = int(t.minute % minuteInterval)
t = t.replace(minute=t.minute - mod)
elif self.interval > 1: # more than one second
mod = int(t.second % self.interval)
t = t.replace(second=t.second - mod)
return t
def getTimeForImage(self, theTime):
""" Default to the start time if no time is given """
if not theTime:
cleanTime = self.start
else:
# check the bounds
valid = self.start <= theTime <= self.end
if not valid:
cleanTime = None
else:
cleanTime = self.updateTimeFromInterval(theTime)
return cleanTime
def getImagePath(self, theTime, rest=False):
cleanTime = self.getTimeForImage(theTime)
if not cleanTime:
return None
specificFile = cleanTime.strftime(self.urlPattern)
if not rest:
prefix = self.sourcePath
else:
prefix = insertIntoPath(self.sourcePath)
result = os.path.join(prefix, specificFile)
return result
class MapDataTile(AbstractMapTile):
"""
A MapTile layer that has meaningful data, an optional legend, a file containing the data and javascript to render the data value below the map.
"""
dataFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_MAPDATA_SUBDIR, max_length=256,
null=True, blank=True)
legendFile = models.FileField(upload_to=settings.XGDS_MAP_SERVER_MAPDATA_SUBDIR, max_length=256,
null=True, blank=True)
legendDefaultVisible = models.BooleanField(default=True, verbose_name='Show Legend:', help_text='Check to show legend when tile layer is turned on')
valueLabel = models.CharField(max_length=128, null=True, blank=True)
unitsLabel = models.CharField(max_length=32, null=True, blank=True)
jsFunction = models.TextField(null=True, blank=True)
jsRawFunction = models.TextField(null=True, blank=True)
def getDataFileUrl(self):
if self.dataFile:
return self.dataFile.url
return None
def getLegendFileUrl(self):
if self.legendFile:
return self.legendFile.url
return None
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(MapDataTile, self).get_tree_json()
result["data"]["dataFileURL"] = self.getDataFileUrl()
result["data"]["legendFileURL"] = self.getLegendFileUrl()
result["data"]["legendVisible"] = self.legendDefaultVisible
result["data"]["valueLabel"] = self.valueLabel
result["data"]["unitsLabel"] = self.unitsLabel
result["data"]["jsFunction"] = self.jsFunction
result["data"]["jsRawFunction"] = self.jsRawFunction
return result
def getEditHref(self):
return reverse('mapEditDataTile', kwargs={'tileID': self.uuid})
class MapLayer(AbstractMap):
""" A map layer which will have a collection of features that have content in them. """
jsonFeatures = ExtrasDotField()
defaultColor = models.CharField(max_length=32, null=True, blank=True)
def getEditHref(self):
return reverse('mapEditLayer', kwargs={'layerID': self.uuid})
def toDict(self):
result = modelToDict(self)
result['uuid'] = self.uuid
return result
# A bit hacky, but... toMapDict() returns metadata info on the layer object so getMappedObjectsJson() can be used
# to return a summary of the available layers if you pass it (e.g.) xgds_map_server.MapLayer as a param.
def toMapDict(self):
result = {"maxLat": self.maxLat, "minLat": self.minLat,
"maxLon": self.maxLon, "minLon": self.minLon,
"parent": self.parent.uuid, "creator":self.creator,
"defaultColor": self.defaultColor,
"description": self.description,
"creation_time": self.creation_time,
"uuid": self.uuid, "visible": self.visible,
"modification_time": self.modification_time,
"region": self.region,
"name": self.name}
return result
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(MapLayer, self).get_tree_json()
result["data"]["layerJSON"] = reverse('mapLayerJSON', kwargs={'layerID': self.uuid})
return result
def getGoogleEarthUrl(self, request):
theUrl = reverse('mapLayerKML', kwargs={'layerID': self.uuid})
theUrl = insertIntoPath(theUrl, 'rest')
return request.build_absolute_uri(theUrl)
def getFeatureJson(self):
return self.jsonFeatures
def getKmlUrl(self):
""" If this element has an url which returns kml, override this function to return that url. """
return reverse('mapLayerKML', kwargs={'layerID': self.uuid})
class MapCollection(AbstractMap):
"""
A layer that encapsulates a collection of found objects.
"""
collection = GenericRelation('MapCollectionItem', related_name='collection_set')
def get_collection_as_dicts(self):
result = []
for o in self.collection:
bd = o.getBroadcastData()
if bd:
result.append(bd)
return result
def getUrl(self):
return reverse('mapCollectionJSON', kwargs={'mapCollectionID': self.uuid})
#
# def getEditHref(self):
# return reverse('mapEditMapCollection', kwargs={'mapCollectionID': self.uuid})
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(MapCollection, self).get_tree_json()
result["data"]["collectionJSON"] = self.getUrl()
return result
class MapCollectionItem(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(db_index=True)
content_object = GenericForeignKey('content_type', 'object_id')
container = models.ForeignKey(MapCollection)
def __unicode__(self):
return ' %s contains %s:%s' % (self.content_object.type, str(self.content_object), self.container.name)
# class MapSearch(AbstractMap):
# TODO overhaul because this depended on xgds_data
# """
# A layer that repsresents a search which can be refreshing
# """
# requestLog = models.ForeignKey(RequestLog)
# mapBounded = models.BooleanField(blank=True, default=False) # true if you want to pass the map extens to the query and redo search with the extens
#
# def getUrl(self):
# return reverse('mapSearchJSON', kwargs={'mapSearchID': self.uuid})
#
# def getEditHref(self):
# return reverse('mapEditMapSearch', kwargs={'mapSearchID': self.uuid})
#
# def get_tree_json(self):
# """ Get the json block that the fancy tree needs to render this node """
# result = super(MapSearch, self).get_tree_json()
# result["data"]["searchJSON"] = self.getUrl()
# # result["data"]["searchResultsJSON"] = reverse('data_searchResultsJSON', kwargs={'collectionID': self.requestLog.pk})
# return result
class MapLink(AbstractMap):
"""
A layer that encapsulates an url that gives json objects
"""
url = models.CharField('url', max_length=512) # url to give map renderable json objects
childNodesUrl = models.CharField('childNodesUrl', max_length=512) # if the tree should have child nodes, return the json for the children from this url
sseUrl = models.CharField('sseUrl', max_length=512) # url for sse data
mapBounded = models.BooleanField(blank=True, default=False) # true if you want to pass the map extens to the query and redo search with the extens
@property
def openable(self):
return self.childNodesUrl != None
def getUrl(self):
if self.url:
return self.url
elif self.childNodesUrl:
return self.childNodesUrl
def getEditHref(self):
""" since we create map link ourselves do not provide a facility to edit them.
"""
return ""
def get_tree_json(self):
""" Get the json block that the fancy tree needs to render this node """
result = super(MapLink, self).get_tree_json()
if self.url:
result["data"]["json"] = self.url
if self.childNodesUrl:
result["data"]["childNodesUrl"] = self.childNodesUrl
result['folder'] = True
result['lazy'] = True
result["data"]["mapBounded"] = self.mapBounded
result["data"]["sseUrl"] = self.sseUrl
try:
del result["data"]["transparency"]
except:
pass
return result
class GeoJSON(AbstractMap):
geoJSON = models.TextField()
def get_tree_json(self):
result = super(AbstractMap, self).get_tree_json()
result["data"]["geoJSON"] = self.geoJSON
return result
class Meta:
abstract = False
""" IMPORTANT These have to be defined after the models they refer to are defined."""
MAP_NODE_MANAGER = ModelCollectionManager(AbstractMapNode, [MapGroup, MapLayer, KmlMap, MapTile, MapDataTile, MapLink, GroundOverlayTime, WMSTile, WMTSTile, GeoJSON, Geotiff, MapCollection])
# this manager does not include groups
MAP_MANAGER = ModelCollectionManager(AbstractMap, [MapLayer, KmlMap, MapTile, MapDataTile, MapLink, GroundOverlayTime, WMSTile, WMTSTile, GeoJSON, Geotiff, MapCollection])
|
|
# Django settings for cms project.
from distutils.version import LooseVersion
import django
import os
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
CACHE_BACKEND = 'locmem:///'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'cms.sqlite',
}
}
DATABASE_SUPPORTS_TRANSACTIONS = True
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/')
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static/')
CMS_MEDIA_ROOT = os.path.join(PROJECT_DIR, '../../cms/media/cms/')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
FIXTURE_DIRS = [os.path.join(PROJECT_DIR, 'fixtures')]
SECRET_KEY = '*xq7m@)*f2awoj!spa0(jibsrz9%c0d=e(g)v*!17y(vx0ue_3'
#TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
#)
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)),
)
TEMPLATE_CONTEXT_PROCESSORS = [
"django.core.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
'django.core.context_processors.csrf',
"cms.context_processors.media",
"sekizai.context_processors.sekizai",
]
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'cms',
'menus',
'cms.plugins.text',
'cms.plugins.picture',
'cms.plugins.file',
'cms.plugins.flash',
'cms.plugins.link',
'cms.plugins.snippet',
'cms.plugins.googlemap',
'cms.plugins.teaser',
'cms.plugins.video',
'cms.plugins.twitter',
'cms.plugins.inherit',
'mptt',
'project.sampleapp',
'project.placeholderapp',
'project.pluginapp',
'project.pluginapp.plugins.manytomany_rel',
'project.fakemlng',
'project.fileapp',
'south',
'reversion',
'sekizai',
]
if LooseVersion(django.get_version()) >= LooseVersion('1.3'):
INSTALLED_APPS.append('django.contrib.staticfiles')
TEMPLATE_CONTEXT_PROCESSORS.append("django.core.context_processors.static")
else:
INSTALLED_APPS.append('staticfiles')
TEMPLATE_CONTEXT_PROCESSORS.append("staticfiles.context_processors.static")
gettext = lambda s: s
LANGUAGE_CODE = "en"
LANGUAGES = (
('en', gettext('English')),
('fr', gettext('French')),
('de', gettext('German')),
('pt-BR', gettext("Brazil")),
('nl', gettext("Dutch")),
)
CMS_LANGUAGE_CONF = {
'de':['fr', 'en'],
'en':['fr', 'de'],
}
CMS_SITE_LANGUAGES = {
1:['en','de','fr','pt-BR'],
2:['de','fr'],
3:['nl'],
}
APPEND_SLASH = True
CMS_TEMPLATES = (
('col_two.html', gettext('two columns')),
('col_three.html', gettext('three columns')),
('nav_playground.html', gettext('navigation examples')),
)
CMS_PLACEHOLDER_CONF = {
'col_sidebar': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin'),
'name': gettext("sidebar column")
},
'col_left': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin','GoogleMapPlugin',),
'name': gettext("left column")
},
'col_right': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin',
'TextPlugin', 'SnippetPlugin','GoogleMapPlugin',),
'name': gettext("right column")
},
'extra_context': {
"plugins": ('TextPlugin',),
"extra_context": {"width": 250},
"name": "extra context"
},
}
CMS_SOFTROOT = True
CMS_MODERATOR = True
CMS_PERMISSION = True
CMS_PUBLIC_FOR = 'all'
CMS_CACHE_DURATIONS = {
'menus': 0,
'content': 0,
'permissions': 0,
}
CMS_REDIRECTS = True
CMS_SEO_FIELDS = True
CMS_FLAT_URLS = False
CMS_MENU_TITLE_OVERWRITE = True
CMS_HIDE_UNTRANSLATED = False
CMS_URL_OVERWRITE = True
CMS_SHOW_END_DATE = True
CMS_SHOW_START_DATE = True
CMS_PLUGIN_PROCESSORS = tuple()
CMS_PLUGIN_CONTEXT_PROCESSORS = tuple()
SOUTH_TESTS_MIGRATE = False
CMS_NAVIGATION_EXTENDERS = (
('project.sampleapp.menu_extender.get_nodes', 'SampleApp Menu'),
)
try:
from local_settings import *
except ImportError:
pass
TEST_RUNNER = 'project.testrunner.CMSTestSuiteRunner'
TEST_OUTPUT_VERBOSE = True
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import re
import unittest
from airflow import DAG, AirflowException
from airflow.contrib.operators.dataproc_operator import \
DataprocClusterCreateOperator, \
DataprocClusterDeleteOperator, \
DataProcHadoopOperator, \
DataProcHiveOperator, \
DataProcPySparkOperator, \
DataProcSparkOperator, \
DataprocWorkflowTemplateInstantiateInlineOperator, \
DataprocWorkflowTemplateInstantiateOperator, \
DataprocClusterScaleOperator
from airflow.version import version
from copy import deepcopy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
from mock import MagicMock, Mock
from mock import patch
TASK_ID = 'test-dataproc-operator'
CLUSTER_NAME = 'test-cluster-name'
PROJECT_ID = 'test-project-id'
NUM_WORKERS = 123
ZONE = 'us-central1-a'
NETWORK_URI = '/projects/project_id/regions/global/net'
SUBNETWORK_URI = '/projects/project_id/regions/global/subnet'
INTERNAL_IP_ONLY = True
TAGS = ['tag1', 'tag2']
STORAGE_BUCKET = 'gs://airflow-test-bucket/'
IMAGE_VERSION = '1.1'
MASTER_MACHINE_TYPE = 'n1-standard-2'
MASTER_DISK_SIZE = 100
MASTER_DISK_TYPE = 'pd-standard'
WORKER_MACHINE_TYPE = 'n1-standard-2'
WORKER_DISK_SIZE = 100
WORKER_DISK_TYPE = 'pd-standard'
NUM_PREEMPTIBLE_WORKERS = 2
GET_INIT_ACTION_TIMEOUT = "600s" # 10m
LABEL1 = {}
LABEL2 = {'application': 'test', 'year': 2017}
SERVICE_ACCOUNT_SCOPES = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/bigtable.data'
]
IDLE_DELETE_TTL = 321
AUTO_DELETE_TIME = datetime.datetime(2017, 6, 7)
AUTO_DELETE_TTL = 654
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
REGION = 'test-region'
MAIN_URI = 'test-uri'
TEMPLATE_ID = 'template-id'
HOOK = 'airflow.contrib.operators.dataproc_operator.DataProcHook'
DATAPROC_JOB_ID = 'dataproc_job_id'
DATAPROC_JOB_TO_SUBMIT = {
'job': {
'reference': {
'projectId': PROJECT_ID,
'jobId': DATAPROC_JOB_ID,
},
'placement': {
'clusterName': CLUSTER_NAME
}
}
}
def _assert_dataproc_job_id(mock_hook, dataproc_task):
hook = mock_hook.return_value
job = MagicMock()
job.build.return_value = DATAPROC_JOB_TO_SUBMIT
hook.create_job_template.return_value = job
dataproc_task.execute(None)
assert dataproc_task.dataproc_job_id == DATAPROC_JOB_ID
class DataprocClusterCreateOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterCreateOperator
def setUp(self):
# instantiate two different test cases with different labels.
self.labels = [LABEL1, LABEL2]
self.dataproc_operators = []
self.mock_conn = Mock()
for labels in self.labels:
self.dataproc_operators.append(
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
network_uri=NETWORK_URI,
subnetwork_uri=SUBNETWORK_URI,
internal_ip_only=INTERNAL_IP_ONLY,
tags=TAGS,
storage_bucket=STORAGE_BUCKET,
image_version=IMAGE_VERSION,
master_machine_type=MASTER_MACHINE_TYPE,
master_disk_type=MASTER_DISK_TYPE,
master_disk_size=MASTER_DISK_SIZE,
worker_machine_type=WORKER_MACHINE_TYPE,
worker_disk_type=WORKER_DISK_TYPE,
worker_disk_size=WORKER_DISK_SIZE,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
labels=deepcopy(labels),
service_account_scopes=SERVICE_ACCOUNT_SCOPES,
idle_delete_ttl=IDLE_DELETE_TTL,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL
)
)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_init(self):
"""Test DataProcClusterOperator instance is properly initialized."""
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
self.assertEqual(dataproc_operator.cluster_name, CLUSTER_NAME)
self.assertEqual(dataproc_operator.project_id, PROJECT_ID)
self.assertEqual(dataproc_operator.num_workers, NUM_WORKERS)
self.assertEqual(dataproc_operator.zone, ZONE)
self.assertEqual(dataproc_operator.network_uri, NETWORK_URI)
self.assertEqual(dataproc_operator.subnetwork_uri, SUBNETWORK_URI)
self.assertEqual(dataproc_operator.tags, TAGS)
self.assertEqual(dataproc_operator.storage_bucket, STORAGE_BUCKET)
self.assertEqual(dataproc_operator.image_version, IMAGE_VERSION)
self.assertEqual(dataproc_operator.master_machine_type, MASTER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.master_disk_size, MASTER_DISK_SIZE)
self.assertEqual(dataproc_operator.master_disk_type, MASTER_DISK_TYPE)
self.assertEqual(dataproc_operator.worker_machine_type, WORKER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.worker_disk_size, WORKER_DISK_SIZE)
self.assertEqual(dataproc_operator.worker_disk_type, WORKER_DISK_TYPE)
self.assertEqual(dataproc_operator.num_preemptible_workers,
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(dataproc_operator.labels, self.labels[suffix])
self.assertEqual(dataproc_operator.service_account_scopes,
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(dataproc_operator.idle_delete_ttl, IDLE_DELETE_TTL)
self.assertEqual(dataproc_operator.auto_delete_time, AUTO_DELETE_TIME)
self.assertEqual(dataproc_operator.auto_delete_ttl, AUTO_DELETE_TTL)
def test_get_init_action_timeout(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
timeout = dataproc_operator._get_init_action_timeout()
self.assertEqual(timeout, "600s")
def test_build_cluster_data(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['clusterName'], CLUSTER_NAME)
self.assertEqual(cluster_data['projectId'], PROJECT_ID)
self.assertEqual(cluster_data['config']['softwareConfig'],
{'imageVersion': IMAGE_VERSION})
self.assertEqual(cluster_data['config']['configBucket'], STORAGE_BUCKET)
self.assertEqual(cluster_data['config']['workerConfig']['numInstances'],
NUM_WORKERS)
self.assertEqual(
cluster_data['config']['secondaryWorkerConfig']['numInstances'],
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(
cluster_data['config']['gceClusterConfig']['serviceAccountScopes'],
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(cluster_data['config']['gceClusterConfig']['internalIpOnly'],
INTERNAL_IP_ONLY)
self.assertEqual(cluster_data['config']['gceClusterConfig']['subnetworkUri'],
SUBNETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['networkUri'],
NETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['tags'],
TAGS)
self.assertEqual(cluster_data['config']['lifecycleConfig']['idleDeleteTtl'],
"321s")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
# test whether the default airflow-version label has been properly
# set to the dataproc operator.
merged_labels = {}
merged_labels.update(self.labels[suffix])
merged_labels.update({'airflow-version': 'v' + version.replace('.', '-').replace('+','-')})
self.assertTrue(re.match(r'[a-z]([-a-z0-9]*[a-z0-9])?',
cluster_data['labels']['airflow-version']))
self.assertEqual(cluster_data['labels'], merged_labels)
def test_build_cluster_data_with_autoDeleteTime(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_build_cluster_data_with_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTtl'],
"654s")
def test_build_cluster_data_with_autoDeleteTime_and_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
if 'autoDeleteTtl' in cluster_data['config']['lifecycleConfig']:
self.fail("If 'auto_delete_time' and 'auto_delete_ttl' is set, " +
"only `auto_delete_time` is used")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s',
u'smoke-cluster-testnodash')
def test_build_cluster_data_internal_ip_only_without_subnetwork(self):
def create_cluster_with_invalid_internal_ip_only_setup():
# Given
create_cluster = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
internal_ip_only=True)
# When
create_cluster._build_cluster_data()
# Then
with self.assertRaises(AirflowException) as cm:
create_cluster_with_invalid_internal_ip_only_setup()
self.assertEqual(str(cm.exception),
"Set internal_ip_only to true only when"
" you pass a subnetwork_uri.")
class DataprocClusterScaleOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterScaleOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done': True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects = Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s',
u'smoke-cluster-testnodash')
class DataprocClusterDeleteOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterDeleteOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done' : True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects=Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s',
u'smoke-cluster-testnodash')
class DataProcHadoopOperatorTest(unittest.TestCase):
# Unit test for the DataProcHadoopOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
REGION)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcHiveOperatorTest(unittest.TestCase):
# Unit test for the DataProcHiveOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
REGION)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcPySparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcPySparkOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
REGION)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataProcSparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcSparkOperator
@staticmethod
def test_hook_correct_region():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY,
REGION)
@staticmethod
def test_dataproc_job_id_is_set():
with patch(HOOK) as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID
)
_assert_dataproc_job_id(mock_hook, dataproc_task)
class DataprocWorkflowTemplateInstantiateOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiate().execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiate.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.wait.return_value = None
dataproc_task = DataprocWorkflowTemplateInstantiateOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
template_id=TEMPLATE_ID,
dag=self.dag
)
dataproc_task.execute(None)
template_name = (
'projects/test-project-id/regions/test-region/'
'workflowTemplates/template-id')
self.mock_workflows.instantiate.assert_called_once_with(
name=template_name,
body=mock.ANY)
hook.wait.assert_called_once_with(self.operation)
class DataprocWorkflowTemplateInstantiateInlineOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiateInline()
# .execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiateInline.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_iniline_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.wait.return_value = None
template = {
"placement": {
"managed_cluster": {
"cluster_name": CLUSTER_NAME,
"config": {
"gce_cluster_config": {
"zone_uri": ZONE,
}
}
}
},
"jobs": [
{
"step_id": "say-hello",
"pig_job": {
"query": "sh echo hello"
}
}],
}
dataproc_task = DataprocWorkflowTemplateInstantiateInlineOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
template=template,
dag=self.dag
)
dataproc_task.execute(None)
self.mock_workflows.instantiateInline.assert_called_once_with(
parent='projects/test-project-id/regions/test-region',
instanceId=mock.ANY,
body=template)
hook.wait.assert_called_once_with(self.operation)
|
|
import logging
import libvirt
from vnc.models import Vnc
from .tasks import start_websock
from os import kill
from xml.dom.minidom import parseString
from multiprocessing import Process
class LibvirtBackend(object):
def __init__(self):
self.logger = logging.getLogger(__name__)
try:
self.conn = libvirt.open('qemu:///system')
except OSError, e:
logging.error('Error when initializing qemu connection: %s' % e)
def _get_domain(self, name):
'''
Return a domain object associated with the host
'''
try:
domain = self.conn.lookupByName(name)
return domain
except libvirt.libvirtError as e:
print 'libvirt failed for host ' + name + ' with:'
print str(e.get_error_code()) + ': ' + e.get_error_message()
def _terminate_vnc(self, host):
'''
Terminates the VNC Websocket process attached to a Host object
'''
if hasattr(host, 'vnc'):
# let the receiver handle the websocket
host.vnc.delete()
else:
self.logger.info('tried deleting a nonexistant vnc database entry')
def set_name(self, host, old_name):
#TODO make critical error less critical
'''
renames a libvirt domain by redefining it
:returns: 1 if no change was required, 0 if successful, -1 if there was an error
'''
if old_name == host.name:
self.logger.warning('unnesccessary set_name for %s' % host.name)
return 1
old_domain = self._get_domain(old_name)
if old_domain.isActive():
self.logger.error('cannot change the name of a running domain')
host.name = old_name
host.save()
return -1
else:
try:
#xml = xmltodict.parse(domain.XMLDesc(0))
old_domain.undefine()
xml.replace('<name>' + old_name + '</name>', '<name>' + host.name + '</name>')
libvirt.defineXML(xml)
except libvirt.libvirtError as e:
self.logger.critical('set_name %s (old: %s) failed' % (host.name, old_name))
self.logger.critical("check the domain definition's integrity on the hypervisor")
self.logger.critical(e.get_error_message())
return -1
return 0 # all is fine
def set_state(self, host):
'''
Adapt a domains status to the is_on value in the database
:returns: 1 if no change was required, 0 if successful, -1 if there was an error
'''
domain = self._get_domain(host.name)
if host.is_on and not domain.isActive():
return self.start(host)
elif not host.is_on and domain.isActive():
return self.shutdown(host)
else:
self.logger.warning('unnesccessary set_state for %s' % host.name)
return 1
def set_vcpus(self, host):
pass
def set_memory(self, host):
'''
sets the amount of memory in kibibyte available for the specified host
:returns: 1 if there is no change, 0 if done, -1 if there was an error
'''
domain = self._get_domain(host.name)
if domain.maxMemory() == host.memory:
self.logger.warning('unnesccessary set_memory for %s' % host.name)
return 1
else:
try:
domain.setMaxMemory(host.memory)
except libvirt.libvirtError as e:
self.logger.error('setting memory failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.logger.info('set_memory run for %s' % host.name)
return 0 # all is fine
def set_autostart(self, host):
'''
sets the autostart for the specified host
:returns: 1 if there is no change, 0 if done, -1 if there was an error
'''
domain = self._get_domain(host.name)
if domain.autostart() == host.autostart:
self.logger.warning('unnesccessary set_autostart for %s' % host.name)
return 1
else:
try:
domain.setAutostart(host.autostart)
except libvirt.libvirtError as e:
self.logger.error('setting autostart failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.logger.info('set_autostart run for %s' % host.name)
return 0 # all is fine
def set_persistent(self, host):
pass
def start(self, host):
'''
Boots a domain
:returns: 1 if the domain is already running, 0 if successful, -1 if there was an error
'''
domain = self._get_domain(host.name)
if domain.isActive():
self.logger.warning('unnesccessary start for %s' % host.name)
return 1
else:
try:
domain.create()
except libvirt.libvirtError as e:
self.logger.error('start failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.logger.info('start run for %s' % host.name)
return 0 # all is fine
def reboot(self, host):
'''
Reboots a domain
:returns: 1 if the domain is not running, 0 if the reboot was successful, -1 if there was an error
'''
domain = self._get_domain(host.name)
if not domain.isActive():
self.logger.warning('unnesccessary reboot for %s' % host.name)
return 1
else:
try:
domain.reboot(0)
except libvirt.libvirtError as e:
self.logger.error('reboot failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.terminate_vnc(host)
self.logger.info('reboot run for %s' % host.name)
return 0 # all is fine
def shutdown(self, host):
'''
shuts a domain down
:returns: 1 if the domain is already stopped, 0 if successful, -1 if there was an error
'''
domain = self._get_domain(host.name)
if not domain.isActive():
self.logger.warning('unnesccessary shutdown for %s' % host.name)
return 1
else:
try:
domain.shutdown()
except libvirt.libvirtError as e:
self.logger.error('shutdown failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.terminate_vnc(host)
self.logger.info('shutdown run for %s' % host.name)
return 0 # all is fine
def destroy(self, host):
'''
Destroys a domain matched by its hostname
:returns: 1 if it was not running, 0 if the domain has been destroyed, -1 if there was an error
'''
domain = self._get_domain(host.name)
if not domain.isActive():
self.logger.warning('unnesccessary destroy for %s' % host.name)
return 1
else:
try:
domain.destroy()
except libvirt.libvirtError as e:
self.logger.error('destroy failed for %s with:' % host.name)
self.logger.error(e.get_error_message())
return -1
self.terminate_vnc(host)
self.logger.info('destroy run for %s' % host.name)
return 0 # all is fine
def attach_or_create_websock(self, user, host):
'''
Creates a websocket for the specified host
:returns: 1 if the host is not running, 0 if the websocket is running, -1 if
there is no VNC support for this domain
'''
domain = self._get_domain(host.name)
if domain is None:
self.logger.warning('vnc requested for the host %s, which does not exist in libvirt (yet)' % host.name)
return 2
elif not domain.isActive():
self.logger.warning('vnc requested for the shutdown host %s' % host.name)
return 1
elif hasattr(host, 'vnc'):
try:
kill(int(host.vnc.pid), 0)
except OSError:
host.vnc.delete()
self._create_websock(user, host)
else:
self._create_websock(user, host)
host.vnc.users.add(user)
return 0
def _create_websock(self, user, host):
domain = self._get_domain(host.name)
self.logger.info('initializing vnc for %s' % host.name)
doc = parseString(domain.XMLDesc(0))
domain_node = doc.getElementsByTagName('domain')[0]
graphics_node = domain_node.getElementsByTagName('graphics')[0]
if graphics_node is None or graphics_node.getAttribute('type') != u'vnc':
# vm does not support vnc
return -1
target_port = graphics_node.getAttribute('port')
port = int(graphics_node.getAttribute('port')) + 10000
p = Process(target=start_websock, args=(str(target_port), str(port)))
p.start()
vnc = Vnc.objects.create(host=host, port=port, pid=p.pid)
self.logger.info('websocket started for %s on port %d' % (host.name, host.vnc.port))
return 0 # all is fine
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Object representations of the elements of a HOT template.
These objects were created against the HOT specification found at:
http://docs.openstack.org/developer/heat/template_guide/hot_spec.html
"""
from os import path
from tuskar.templates import namespace as ns_utils
DEFAULT_VERSION = '2014-10-16'
class Template(object):
def __init__(self, version=DEFAULT_VERSION, description=None):
super(Template, self).__init__()
self.version = version
self.description = description
self._parameter_groups = [] # list of ParameterGroup
self._parameters = [] # list of Parameter
self._resources = [] # list of Resource
self._outputs = [] # list of Output
def __str__(self):
msg = ('Template: version=%(ver)s, description=%(desc)s, '
'parameter_count=%(param)s, output_count=%(out)s')
data = {
'ver': self.version,
'desc': _safe_strip(self.description),
'param': len(self.parameters),
'out': len(self.outputs)
}
return msg % data
@property
def parameter_groups(self):
return tuple(self._parameter_groups)
@property
def parameters(self):
return tuple(self._parameters)
@property
def resources(self):
return tuple(self._resources)
@property
def outputs(self):
return tuple(self._outputs)
def add_parameter(self, parameter):
"""Adds a parameter to the template.
:type parameter: tuskar.templates.heat.Parameter
"""
self._parameters.append(parameter)
def remove_parameter(self, parameter):
"""Removes a parameter from the template.
:type parameter: tuskar.templates.heat.Parameter
:raise ValueError: if the parameter is not in the template
"""
self._parameters.remove(parameter)
def find_parameter_by_name(self, name):
"""Returns the parameter from the template with the given name or
None if no matching parameter is found.
:type name: str
:rtype: tuskar.templates.heat.Parameter or None
"""
for p in self._parameters:
if p.name == name:
return p
return None
def remove_parameters_by_namespace(self, namespace):
"""Removes all parameters in the given namespace.
:type namespace: str
"""
self._parameters = [
p for p in self.parameters
if not ns_utils.matches_template_namespace(namespace, p.name)]
def add_parameter_group(self, parameter_group):
"""Adds a parameter group to the template.
:type parameter_group: tuskar.templates.heat.ParameterGroup
"""
self._parameter_groups.append(parameter_group)
def remove_parameter_group(self, parameter_group):
"""Removes a parameter group from the template.
:type parameter_group: tuskar.templates.heat.ParameterGroup
:raise ValueError: if the parameter group is not in the template
"""
self._parameter_groups.remove(parameter_group)
def find_parameter_group_by_label(self, label):
"""Returns the parameter group with the given label or None if no
matching group is found.
:type label: str
:rtype: tuskar.templates.heat.ParameterGroup or None
"""
for pg in self._parameter_groups:
if pg.label == label:
return pg
return None
def add_resource(self, resource):
"""Adds a resource to the template.
:type resource: tuskar.templates.heat.Resource
"""
self._resources.append(resource)
def remove_resource(self, resource):
"""Removes a resource from the template.
:type resource: tuskar.templates.heat.Resource
:raise ValueError: if the resource is not in the template
"""
self._resources.remove(resource)
def remove_resource_by_id(self, resource_id):
"""Removes a resource from the template if found.
:type resource_id: str
"""
self._resources = [r for r in self._resources
if r.resource_id != resource_id]
def find_resource_by_id(self, resource_id):
"""Returns the resource with the given ID or None if no matching
resource is found.
:type resource_id: str
:rtype: tuskar.templates.heat.Resource or None
"""
for r in self._resources:
if r.resource_id == resource_id:
return r
return None
def add_output(self, output):
"""Adds an output to the template.
:type output: tuskar.templates.heat.Output
"""
self._outputs.append(output)
def remove_output(self, output):
"""Removes an output from the template.
:type output: tuskar.templates.heat.Output
:raise ValueError: if the output is not in the template
"""
self._outputs.remove(output)
def remove_outputs_by_namespace(self, namespace):
"""Removes all outputs in the given namespace from the template.
:type namespace: str
"""
self._outputs = [
o for o in self.outputs
if not ns_utils.matches_template_namespace(namespace, o.name)]
def find_output_by_name(self, name):
"""Returns the output with the given name or None if no matching
output is found.
:type name: str
:rtype: tuskar.templates.heat.Output or None
"""
for o in self._outputs:
if o.name == name:
return o
return None
class ParameterGroup(object):
def __init__(self, label, description=None):
super(ParameterGroup, self).__init__()
self.label = label
self.description = description
self._parameter_names = set()
def __str__(self):
msg = ('ParameterGroup: label=%(label)s, description=%(desc)s '
'parameter_names=%(names)s')
data = {
'label': self.label,
'desc': self.description,
'names': ','.join(self.parameter_names),
}
return msg % data
@property
def parameter_names(self):
return tuple(self._parameter_names)
def add_parameter_name(self, *names):
"""Adds one or more parameters to the group.
:type names: str
"""
for n in names:
self._parameter_names.add(n)
def remove_parameter_name(self, name):
"""Removes a parameter from the group if it is present.
:type name: str
"""
self._parameter_names.discard(name)
class Parameter(object):
def __init__(self, name, param_type,
description=None, label=None, default=None, hidden=None):
super(Parameter, self).__init__()
self.name = name
self.param_type = param_type
self.description = description
self.label = label
self.default = default
self.hidden = hidden
self._constraints = []
def __str__(self):
msg = ('Parameter: name=%(name)s, type=%(type)s, '
'description=%(desc)s, label=%(label)s, '
'default=%(def)s, hidden=%(hidden)s')
data = {
'name': self.name,
'type': self.param_type,
'desc': self.description,
'label': self.label,
'def': self.default,
'hidden': self.hidden,
}
return msg % data
@property
def constraints(self):
return tuple(self._constraints)
def add_constraint(self, constraint):
"""Adds a constraint to the parameter.
:type constraint: tuskar.templates.heat.ParameterConstraint
"""
self._constraints.append(constraint)
def remove_constraint(self, constraint):
"""Removes a constraint from the template.
:type constraint: tuskar.templates.heat.ParameterConstraint
:raise ValueError: if the given constraint isn't in the parameter
"""
self._constraints.remove(constraint)
class ParameterConstraint(object):
def __init__(self, constraint_type, definition, description=None):
super(ParameterConstraint, self).__init__()
self.constraint_type = constraint_type
self.definition = definition
self.description = description
def __str__(self):
msg = ('Constraint: type=%(type)s, definition=%(def)s, '
'description=%(desc)s')
data = {
'type': self.constraint_type,
'def': self.definition,
'desc': self.description,
}
return msg % data
class Resource(object):
def __init__(self, resource_id, resource_type,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None):
super(Resource, self).__init__()
self.resource_id = resource_id
self.resource_type = resource_type
self.metadata = metadata
self.depends_on = depends_on
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self._properties = []
def __str__(self):
msg = 'Resource: id=%(id)s, resource_type=%(type)s'
data = {
'id': self.resource_id,
'type': self.resource_type,
}
return msg % data
@property
def properties(self):
return tuple(self._properties)
def add_property(self, resource_property):
"""Adds a property to the resource.
:type resource_property: tuskar.templates.heat.ResourceProperty
"""
self._properties.append(resource_property)
def remove_property(self, resource_property):
"""Removes a property from the template.
:type resource_property: tuskar.templates.heat.ResourceProperty
:raise ValueError: if the property isn't in the resource
"""
self._properties.remove(resource_property)
def find_property_by_name(self, name):
"""Returns the property with the given name or None if there is no
matching property.
:type name: str
:rtype: tuskar.templates.heat.ResourceProperty or None
"""
for p in self._properties:
if p.name == name:
return p
return None
class ResourceProperty(object):
def __init__(self, name, value):
super(ResourceProperty, self).__init__()
self.name = name
self.value = value
def __str__(self):
msg = 'ResourceProperty: name=%(name)s, value=%(value)s'
data = {
'name': self.name,
'value': self.value,
}
return msg % data
class Output(object):
def __init__(self, name, value, description=None):
super(Output, self).__init__()
self.name = name
self.value = value
self.description = description
def __str__(self):
msg = 'Output: name=%(name)s, value=%(value)s, description=%(desc)s'
data = {
'name': self.name,
'value': self.value,
'desc': _safe_strip(self.description)
}
return msg % data
class Environment(object):
def __init__(self):
super(Environment, self).__init__()
self._parameters = []
self._registry_entries = []
def __str__(self):
msg = ('Environment: parameter_count=%(p_count)s, '
'registry_count=%(r_count)s')
data = {
'p_count': len(self.parameters),
'r_count': len(self.registry_entries),
}
return msg % data
@property
def parameters(self):
return tuple(self._parameters)
@property
def registry_entries(self):
return tuple(self._registry_entries)
def add_parameter(self, parameter):
"""Adds a property to the environment.
:type parameter: tuskar.templates.heat.EnvironmentParameter
"""
self._parameters.append(parameter)
def remove_parameter(self, parameter):
"""Removes a parameter from the environment.
:type parameter: tuskar.templates.heat.EnvironmentParameter
:raise ValueError: if the parameter is not in the environment
"""
self._parameters.remove(parameter)
def remove_parameters_by_namespace(self, namespace):
"""Removes all parameters that match the given namespace.
:type namespace: str
"""
self._parameters = [
p for p in self._parameters
if not ns_utils.matches_template_namespace(namespace, p.name)]
def find_parameter_by_name(self, name):
"""Returns the parameter instance with the given name or None
if there is no matching parameter.
:type name: str
:rtype: tuskar.templates.heat.EnvironmentParameter or None
"""
for p in self._parameters:
if p.name == name:
return p
return None
def has_parameter_in_namespace(self, namespace):
"""Returns true if the environment has at least one parameter
in given namespace, false otherwise.
:type namespace: str
"""
for p in self._parameters:
if ns_utils.matches_template_namespace(namespace, p.name):
return True
return False
def add_registry_entry(self, entry, unique=False):
"""Adds a registry entry to the environment.
:type entry: tuskar.templates.heat.RegistryEntry
:param unique: toggles if registry is treated as a set
:type unique: boolean
"""
if unique:
setentries = set(self._registry_entries)
setentries.add(entry)
self._registry_entries = list(setentries)
else:
self._registry_entries.append(entry)
def remove_registry_entry(self, entry):
"""Removes a registry entry from the environment.
:type entry: tuskar.templates.heat.RegistryEntry
:raise ValueError: if the entry is not in the environment
"""
self._registry_entries.remove(entry)
def remove_registry_entry_by_alias(self, alias):
"""Removes a registry entry from the environment if it is found.
:type alias: str
"""
self._registry_entries = [e for e in self._registry_entries
if e.alias != alias]
class EnvironmentParameter(object):
def __init__(self, name, value):
super(EnvironmentParameter, self).__init__()
self.name = name
self.value = value
def __str__(self):
msg = 'EnvironmentParameter: name=%(name)s, value=%(value)s'
data = {
'name': self.name,
'value': self.value,
}
return msg % data
class RegistryEntry(object):
def __init__(self, alias, filename):
super(RegistryEntry, self).__init__()
self.alias = alias
self.filename = filename
# TODO(jpeeler) rename self.filename to mapping
def __str__(self):
msg = 'RegistryEntry: alias=%(alias)s, filename=%(f)s'
data = {
'alias': self.alias,
'f': self.filename,
}
return msg % data
def is_filename(self):
if ('::' in self.filename or
path.splitext(self.filename)[1] not in ('.yaml', '.yml')):
return False
return True
def _safe_strip(value):
"""Strips the value if it is not None.
:param value: text to be cleaned up
:type value: str or None
:return: clean value if one was specified; None otherwise
:rtype: str or None
"""
if value is not None:
return value.strip()
return None
|
|
#!/usr/bin/env python2
##
# autosign
# https://github.com/leosartaj/autosign.git
#
# copyright (c) 2014 sartaj singh
# licensed under the mit license.
##
import os
import re
import exce
"""
Main functions
"""
def getIndex(fName, options):
"""
returns the start and end of a signature in a file
returns None if no signature found
"""
handler = open(fName)
opt_start, opt_end, opt_line = options.start, options.end, options.line
ls, le, ll = len(opt_start), len(opt_end), len(opt_line)
start, end = None, None
for index, line in enumerate(handler):
if line[:ls] == opt_start and start == None:
start = index
elif line[:le] == opt_end and end == None:
end = index
break
elif line[:ll] != opt_line and start != None:
start, end = None, None
break
if start != None and end != None:
return start, end
return None, None
def isSign(fName, options):
"""
Checks if a file is already signed
"""
start, end = getIndex(fName, options)
if start != None and end != None:
return True
return False
def checkRe(exp, line):
"""
Checks a line if it follows a regular expression or not
"""
result = exp.match(line)
if result:
return True
return False
def hasInter(fName, allow=None):
"""
Checks if a file has a special line
matching allow
"""
if not allow:
return False
exp = re.compile(allow)
with open(fName, 'r') as handler:
lines = handler.readlines()
if len(lines) and checkRe(exp, lines[0]):
return True
return False
def removeInter(fName, allow=None):
"""
Checks if a file has a line with the passed re
if it has removes and returns the first matched line
else returns None
"""
inter = None
if not allow:
return inter
if not hasInter(fName, allow):
return inter
with open(fName, 'r') as handler:
lines = handler.readlines()
exp = re.compile(allow)
with open(fName, 'w') as handler:
for line in lines:
if not checkRe(exp, line):
handler.write(line)
else:
inter = line
return inter
def checkType(fName, ext='.py'):
"""
checks if file is of a given type
checks if file has ext extension
"""
name, extension = os.path.splitext(fName)
if extension == ext:
return True
return False
def checkTemplate(fName, options):
"""
checks if the file
is a proper template or not
file should only contain a single signature
if allow option is set, allows allowed line
extra lines are allowed before or after signature
"""
start, end = getIndex(fName, options)
if start == None or end == None:
return False
exp = None
if options.allow:
exp = re.compile(options.allow)
handler = open(fName, 'r')
lines = handler.readlines()
add = 0
for index, line in enumerate(lines):
if exp and checkRe(exp, line) and index < start:
add += 1
elif options.blank and line == os.linesep and (index < start or index > end):
add += 1
if len(lines) - 1 == end - start + add:
return True
return False
def checkFiles(fName, options, recursive=False):
"""
yields whether a file is signed or not
"""
ext = options.ext
if os.path.isfile(fName) and checkType(fName, ext):
yield fName, isSign(fName, options)
elif os.path.isdir(fName):
for filename in os.listdir(fName):
path = os.path.join(fName, filename)
if os.path.isdir(path) and recursive:
for filename, val in checkFiles(path, options, recursive):
yield filename, val
elif os.path.isfile(path) and checkType(path, ext):
yield path, isSign(path, options)
def sign(signFile, fName, options, force=False):
"""
Signs an unsigned file by default
if force is True also replaces sign of signed files
"""
if not checkTemplate(signFile, options):
raise exce.TemplateError('Incorrect Template')
with open(signFile, 'r') as sign: # sign to be added
sign_lines = sign.readlines()
temp_len = len(sign_lines)
allow = options.allow
if not isSign(fName, options):
inter_f = removeInter(fName, allow)
with open(fName, 'r') as handler:
lines = handler.readlines()
with open(fName, 'w') as handler:
if inter_f != None and not hasInter(signFile, allow):
handler.write(inter_f)
for line in sign_lines:
handler.write(line)
for line in lines:
handler.write(line)
return True
elif force:
inter_f = removeInter(fName, allow)
start, end = getIndex(fName, options)
with open(fName, 'r') as handler:
lines = handler.readlines()
with open(fName, 'w') as handler:
if inter_f != None and not hasInter(signFile, allow):
handler.write(inter_f)
for line in sign_lines:
handler.write(line)
for index, line in enumerate(lines):
if index > end:
handler.write(line)
return True
return False
def signFiles(signfile, fName, options, recursive=False, force=False):
"""
recursive implementation of main.sign
signs a file
signs all the files in a directory
"""
ext = options.ext
if os.path.isfile(fName) and checkType(fName, ext):
result = sign(signfile, fName, options, force)
yield fName, result
elif os.path.isdir(fName):
for filename in os.listdir(fName):
path = os.path.join(fName, filename)
if os.path.isdir(path) and recursive:
for filename, val in signFiles(signfile, path, options, recursive, force):
yield filename, val
elif os.path.isfile(path) and checkType(path, ext):
result = sign(signfile, path, options, force)
yield path, result
def removeSign(fName, options):
"""
Removes sign from a signed file
does not remove shebang line
does not remove extra lines that were added
after/before the signature when the file was signed
raises UnsignedError if file not signed
"""
if not isSign(fName, options):
raise exce.UnsignedError("File not signed")
with open(fName, 'r') as handler:
lines = handler.readlines()
start, end = getIndex(fName, options)
with open(fName, 'w') as handler:
for index in range(len(lines)):
if index < start or index > end:
handler.write(lines[index])
def removeSignFiles(fName, options, recursive=False):
"""
recursive implementation of main.removeSign
removes sign from a python file
removes signs from all the python files in a directory
"""
ext = options.ext
if os.path.isfile(fName) and isSign(fName, options) and checkType(fName, ext):
removeSign(fName, options)
yield fName
elif os.path.isdir(fName):
for filename in os.listdir(fName):
path = os.path.join(fName, filename)
if os.path.isdir(path) and recursive:
for filename in removeSignFiles(path, options, recursive):
yield path
elif os.path.isfile(path) and isSign(path, options) and checkType(path, ext):
removeSign(path, options)
yield path
|
|
"""
To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been
created. Clearly there is huge need to use conforming syntax.
"""
import sys
import imp
import os
import re
try:
import importlib
except ImportError:
pass
is_py3 = sys.version_info[0] >= 3
is_py33 = is_py3 and sys.version_info.minor >= 3
is_py34 = is_py3 and sys.version_info.minor >= 4
is_py35 = is_py3 and sys.version_info.minor >= 5
is_py26 = not is_py3 and sys.version_info[1] < 7
def find_module_py33(string, path=None):
loader = importlib.machinery.PathFinder.find_module(string, path)
if loader is None and path is None: # Fallback to find builtins
try:
loader = importlib.find_loader(string)
except ValueError as e:
# See #491. Importlib might raise a ValueError, to avoid this, we
# just raise an ImportError to fix the issue.
raise ImportError("Originally " + repr(e))
if loader is None:
raise ImportError("Couldn't find a loader for {0}".format(string))
try:
is_package = loader.is_package(string)
if is_package:
module_path = os.path.dirname(loader.path)
module_file = None
else:
module_path = loader.get_filename(string)
module_file = open(module_path, 'rb')
except AttributeError:
# ExtensionLoader has not attribute get_filename, instead it has a
# path attribute that we can use to retrieve the module path
try:
module_path = loader.path
module_file = open(loader.path, 'rb')
except AttributeError:
module_path = string
module_file = None
finally:
is_package = False
return module_file, module_path, is_package
def find_module_pre_py33(string, path=None):
module_file, module_path, description = imp.find_module(string, path)
module_type = description[2]
return module_file, module_path, module_type is imp.PKG_DIRECTORY
find_module = find_module_py33 if is_py33 else find_module_pre_py33
find_module.__doc__ = """
Provides information about a module.
This function isolates the differences in importing libraries introduced with
python 3.3 on; it gets a module name and optionally a path. It will return a
tuple containin an open file for the module (if not builtin), the filename
or the name of the module if it is a builtin one and a boolean indicating
if the module is contained in a package.
"""
# unicode function
try:
unicode = unicode
except NameError:
unicode = str
if is_py3:
u = lambda s: s
else:
u = lambda s: s.decode('utf-8')
u.__doc__ = """
Decode a raw string into unicode object. Do nothing in Python 3.
"""
# exec function
if is_py3:
def exec_function(source, global_map):
exec(source, global_map)
else:
eval(compile("""def exec_function(source, global_map):
exec source in global_map """, 'blub', 'exec'))
# re-raise function
if is_py3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
eval(compile("""
def reraise(exception, traceback):
raise exception, None, traceback
""", 'blub', 'exec'))
reraise.__doc__ = """
Re-raise `exception` with a `traceback` object.
Usage::
reraise(Exception, sys.exc_info()[2])
"""
class Python3Method(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype):
if obj is None:
return lambda *args, **kwargs: self.func(*args, **kwargs)
else:
return lambda *args, **kwargs: self.func(obj, *args, **kwargs)
def use_metaclass(meta, *bases):
""" Create a class with a metaclass. """
if not bases:
bases = (object,)
return meta("HackClass", bases, {})
try:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
except AttributeError:
encoding = 'ascii'
def u(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
try:
import builtins # module name in python 3
except ImportError:
import __builtin__ as builtins
import ast
def literal_eval(string):
# py3.0, py3.1 and py32 don't support unicode literals. Support those, I
# don't want to write two versions of the tokenizer.
if is_py3 and sys.version_info.minor < 3:
if re.match('[uU][\'"]', string):
string = string[1:]
return ast.literal_eval(string)
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
def no_unicode_pprint(dct):
"""
Python 2/3 dict __repr__ may be different, because of unicode differens
(with or without a `u` prefix). Normally in doctests we could use `pprint`
to sort dicts and check for equality, but here we have to write a separate
function to do that.
"""
import pprint
s = pprint.pformat(dct)
print(re.sub("u'", "'", s))
def utf8_repr(func):
"""
``__repr__`` methods in Python 2 don't allow unicode objects to be
returned. Therefore cast them to utf-8 bytes in this decorator.
"""
def wrapper(self):
result = func(self)
if isinstance(result, unicode):
return result.encode('utf-8')
else:
return result
if is_py3:
return func
else:
return wrapper
|
|
# Natural Language Toolkit: Chunk format conversions
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# Steven Bird <stevenbird1@gmail.com> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals, division
import re
from nltk.tree import Tree
from nltk.tag.mapping import map_tag
from nltk.tag.util import str2tuple
from nltk.compat import python_2_unicode_compatible
##//////////////////////////////////////////////////////
## EVALUATION
##//////////////////////////////////////////////////////
from nltk.metrics import accuracy as _accuracy
def accuracy(chunker, gold):
"""
Score the accuracy of the chunker against the gold standard.
Strip the chunk information from the gold standard and rechunk it using
the chunker, then compute the accuracy score.
:type chunker: ChunkParserI
:param chunker: The chunker being evaluated.
:type gold: tree
:param gold: The chunk structures to score the chunker on.
:rtype: float
"""
gold_tags = []
test_tags = []
for gold_tree in gold:
test_tree = chunker.parse(gold_tree.flatten())
gold_tags += tree2conlltags(gold_tree)
test_tags += tree2conlltags(test_tree)
# print 'GOLD:', gold_tags[:50]
# print 'TEST:', test_tags[:50]
return _accuracy(gold_tags, test_tags)
# Patched for increased performance by Yoav Goldberg <yoavg@cs.bgu.ac.il>, 2006-01-13
# -- statistics are evaluated only on demand, instead of at every sentence evaluation
#
# SB: use nltk.metrics for precision/recall scoring?
#
class ChunkScore(object):
"""
A utility class for scoring chunk parsers. ``ChunkScore`` can
evaluate a chunk parser's output, based on a number of statistics
(precision, recall, f-measure, misssed chunks, incorrect chunks).
It can also combine the scores from the parsing of multiple texts;
this makes it significantly easier to evaluate a chunk parser that
operates one sentence at a time.
Texts are evaluated with the ``score`` method. The results of
evaluation can be accessed via a number of accessor methods, such
as ``precision`` and ``f_measure``. A typical use of the
``ChunkScore`` class is::
>>> chunkscore = ChunkScore() # doctest: +SKIP
>>> for correct in correct_sentences: # doctest: +SKIP
... guess = chunkparser.parse(correct.leaves()) # doctest: +SKIP
... chunkscore.score(correct, guess) # doctest: +SKIP
>>> print('F Measure:', chunkscore.f_measure()) # doctest: +SKIP
F Measure: 0.823
:ivar kwargs: Keyword arguments:
- max_tp_examples: The maximum number actual examples of true
positives to record. This affects the ``correct`` member
function: ``correct`` will not return more than this number
of true positive examples. This does *not* affect any of
the numerical metrics (precision, recall, or f-measure)
- max_fp_examples: The maximum number actual examples of false
positives to record. This affects the ``incorrect`` member
function and the ``guessed`` member function: ``incorrect``
will not return more than this number of examples, and
``guessed`` will not return more than this number of true
positive examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- max_fn_examples: The maximum number actual examples of false
negatives to record. This affects the ``missed`` member
function and the ``correct`` member function: ``missed``
will not return more than this number of examples, and
``correct`` will not return more than this number of true
negative examples. This does *not* affect any of the
numerical metrics (precision, recall, or f-measure)
- chunk_label: A regular expression indicating which chunks
should be compared. Defaults to ``'.*'`` (i.e., all chunks).
:type _tp: list(Token)
:ivar _tp: List of true positives
:type _fp: list(Token)
:ivar _fp: List of false positives
:type _fn: list(Token)
:ivar _fn: List of false negatives
:type _tp_num: int
:ivar _tp_num: Number of true positives
:type _fp_num: int
:ivar _fp_num: Number of false positives
:type _fn_num: int
:ivar _fn_num: Number of false negatives.
"""
def __init__(self, **kwargs):
self._correct = set()
self._guessed = set()
self._tp = set()
self._fp = set()
self._fn = set()
self._max_tp = kwargs.get('max_tp_examples', 100)
self._max_fp = kwargs.get('max_fp_examples', 100)
self._max_fn = kwargs.get('max_fn_examples', 100)
self._chunk_label = kwargs.get('chunk_label', '.*')
self._tp_num = 0
self._fp_num = 0
self._fn_num = 0
self._count = 0
self._tags_correct = 0.0
self._tags_total = 0.0
self._measuresNeedUpdate = False
def _updateMeasures(self):
if (self._measuresNeedUpdate):
self._tp = self._guessed & self._correct
self._fn = self._correct - self._guessed
self._fp = self._guessed - self._correct
self._tp_num = len(self._tp)
self._fp_num = len(self._fp)
self._fn_num = len(self._fn)
self._measuresNeedUpdate = False
def score(self, correct, guessed):
"""
Given a correctly chunked sentence, score another chunked
version of the same sentence.
:type correct: chunk structure
:param correct: The known-correct ("gold standard") chunked
sentence.
:type guessed: chunk structure
:param guessed: The chunked sentence to be scored.
"""
self._correct |= _chunksets(correct, self._count, self._chunk_label)
self._guessed |= _chunksets(guessed, self._count, self._chunk_label)
self._count += 1
self._measuresNeedUpdate = True
# Keep track of per-tag accuracy (if possible)
try:
correct_tags = tree2conlltags(correct)
guessed_tags = tree2conlltags(guessed)
except ValueError:
# This exception case is for nested chunk structures,
# where tree2conlltags will fail with a ValueError: "Tree
# is too deeply nested to be printed in CoNLL format."
correct_tags = guessed_tags = ()
self._tags_total += len(correct_tags)
self._tags_correct += sum(1 for (t,g) in zip(guessed_tags,
correct_tags)
if t==g)
def accuracy(self):
"""
Return the overall tag-based accuracy for all text that have
been scored by this ``ChunkScore``, using the IOB (conll2000)
tag encoding.
:rtype: float
"""
if self._tags_total == 0: return 1
return self._tags_correct/self._tags_total
def precision(self):
"""
Return the overall precision for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fp_num
if div == 0: return 0
else: return self._tp_num / div
def recall(self):
"""
Return the overall recall for all texts that have been
scored by this ``ChunkScore``.
:rtype: float
"""
self._updateMeasures()
div = self._tp_num + self._fn_num
if div == 0: return 0
else: return self._tp_num / div
def f_measure(self, alpha=0.5):
"""
Return the overall F measure for all texts that have been
scored by this ``ChunkScore``.
:param alpha: the relative weighting of precision and recall.
Larger alpha biases the score towards the precision value,
while smaller alpha biases the score towards the recall
value. ``alpha`` should have a value in the range [0,1].
:type alpha: float
:rtype: float
"""
self._updateMeasures()
p = self.precision()
r = self.recall()
if p == 0 or r == 0: # what if alpha is 0 or 1?
return 0
return 1/(alpha/p + (1-alpha)/r)
def missed(self):
"""
Return the chunks which were included in the
correct chunk structures, but not in the guessed chunk
structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fn)
return [c[1] for c in chunks] # discard position information
def incorrect(self):
"""
Return the chunks which were included in the guessed chunk structures,
but not in the correct chunk structures, listed in input order.
:rtype: list of chunks
"""
self._updateMeasures()
chunks = list(self._fp)
return [c[1] for c in chunks] # discard position information
def correct(self):
"""
Return the chunks which were included in the correct
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._correct)
return [c[1] for c in chunks] # discard position information
def guessed(self):
"""
Return the chunks which were included in the guessed
chunk structures, listed in input order.
:rtype: list of chunks
"""
chunks = list(self._guessed)
return [c[1] for c in chunks] # discard position information
def __len__(self):
self._updateMeasures()
return self._tp_num + self._fn_num
def __repr__(self):
"""
Return a concise representation of this ``ChunkScoring``.
:rtype: str
"""
return '<ChunkScoring of '+repr(len(self))+' chunks>'
def __str__(self):
"""
Return a verbose representation of this ``ChunkScoring``.
This representation includes the precision, recall, and
f-measure scores. For other information about the score,
use the accessor methods (e.g., ``missed()`` and ``incorrect()``).
:rtype: str
"""
return ("ChunkParse score:\n" +
(" IOB Accuracy: {:5.1f}%%\n".format(self.accuracy()*100)) +
(" Precision: {:5.1f}%%\n".format(self.precision()*100)) +
(" Recall: {:5.1f}%%\n".format(self.recall()*100))+
(" F-Measure: {:5.1f}%%".format(self.f_measure()*100)))
# extract chunks, and assign unique id, the absolute position of
# the first word of the chunk
def _chunksets(t, count, chunk_label):
pos = 0
chunks = []
for child in t:
if isinstance(child, Tree):
if re.match(chunk_label, child.label()):
chunks.append(((count, pos), child.freeze()))
pos += len(child.leaves())
else:
pos += 1
return set(chunks)
def tagstr2tree(s, chunk_label="NP", root_label="S", sep='/',
source_tagset=None, target_tagset=None):
"""
Divide a string of bracketted tagged text into
chunks and unchunked tokens, and produce a Tree.
Chunks are marked by square brackets (``[...]``). Words are
delimited by whitespace, and each word should have the form
``text/tag``. Words that do not contain a slash are
assigned a ``tag`` of None.
:param s: The string to be converted
:type s: str
:param chunk_label: The label to use for chunk nodes
:type chunk_label: str
:param root_label: The label to use for the root of the tree
:type root_label: str
:rtype: Tree
"""
WORD_OR_BRACKET = re.compile(r'\[|\]|[^\[\]\s]+')
stack = [Tree(root_label, [])]
for match in WORD_OR_BRACKET.finditer(s):
text = match.group()
if text[0] == '[':
if len(stack) != 1:
raise ValueError('Unexpected [ at char {:d}'.format(match.start()))
chunk = Tree(chunk_label, [])
stack[-1].append(chunk)
stack.append(chunk)
elif text[0] == ']':
if len(stack) != 2:
raise ValueError('Unexpected ] at char {:d}'.format(match.start()))
stack.pop()
else:
if sep is None:
stack[-1].append(text)
else:
word, tag = str2tuple(text, sep)
if source_tagset and target_tagset:
tag = map_tag(source_tagset, target_tagset, tag)
stack[-1].append((word, tag))
if len(stack) != 1:
raise ValueError('Expected ] at char {:d}'.format(len(s)))
return stack[0]
### CONLL
_LINE_RE = re.compile('(\S+)\s+(\S+)\s+([IOB])-?(\S+)?')
def conllstr2tree(s, chunk_types=('NP', 'PP', 'VP'), root_label="S"):
"""
Return a chunk structure for a single sentence
encoded in the given CONLL 2000 style string.
This function converts a CoNLL IOB string into a tree.
It uses the specified chunk types
(defaults to NP, PP and VP), and creates a tree rooted at a node
labeled S (by default).
:param s: The CoNLL string to be converted.
:type s: str
:param chunk_types: The chunk types to be converted.
:type chunk_types: tuple
:param root_label: The node label to use for the root.
:type root_label: str
:rtype: Tree
"""
stack = [Tree(root_label, [])]
for lineno, line in enumerate(s.split('\n')):
if not line.strip(): continue
# Decode the line.
match = _LINE_RE.match(line)
if match is None:
raise ValueError('Error on line {:d}'.format(lineno))
(word, tag, state, chunk_type) = match.groups()
# If it's a chunk type we don't care about, treat it as O.
if (chunk_types is not None and
chunk_type not in chunk_types):
state = 'O'
# For "Begin"/"Outside", finish any completed chunks -
# also do so for "Inside" which don't match the previous token.
mismatch_I = state == 'I' and chunk_type != stack[-1].label()
if state in 'BO' or mismatch_I:
if len(stack) == 2: stack.pop()
# For "Begin", start a new chunk.
if state == 'B' or mismatch_I:
chunk = Tree(chunk_type, [])
stack[-1].append(chunk)
stack.append(chunk)
# Add the new word token.
stack[-1].append((word, tag))
return stack[0]
def tree2conlltags(t):
"""
Return a list of 3-tuples containing ``(word, tag, IOB-tag)``.
Convert a tree to the CoNLL IOB tag format.
:param t: The tree to be converted.
:type t: Tree
:rtype: list(tuple)
"""
tags = []
for child in t:
try:
category = child.label()
prefix = "B-"
for contents in child:
if isinstance(contents, Tree):
raise ValueError("Tree is too deeply nested to be printed in CoNLL format")
tags.append((contents[0], contents[1], prefix+category))
prefix = "I-"
except AttributeError:
tags.append((child[0], child[1], "O"))
return tags
def conlltags2tree(sentence, chunk_types=('NP','PP','VP'),
root_label='S', strict=False):
"""
Convert the CoNLL IOB format to a tree.
"""
tree = Tree(root_label, [])
for (word, postag, chunktag) in sentence:
if chunktag is None:
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as O
tree.append((word,postag))
elif chunktag.startswith('B-'):
tree.append(Tree(chunktag[2:], [(word,postag)]))
elif chunktag.startswith('I-'):
if (len(tree)==0 or not isinstance(tree[-1], Tree) or
tree[-1].label() != chunktag[2:]):
if strict:
raise ValueError("Bad conll tag sequence")
else:
# Treat as B-*
tree.append(Tree(chunktag[2:], [(word,postag)]))
else:
tree[-1].append((word,postag))
elif chunktag == 'O':
tree.append((word,postag))
else:
raise ValueError("Bad conll tag {0!r}".format(chunktag))
return tree
def tree2conllstr(t):
"""
Return a multiline string where each line contains a word, tag and IOB tag.
Convert a tree to the CoNLL IOB string format
:param t: The tree to be converted.
:type t: Tree
:rtype: str
"""
lines = [" ".join(token) for token in tree2conlltags(t)]
return '\n'.join(lines)
### IEER
_IEER_DOC_RE = re.compile(r'<DOC>\s*'
r'(<DOCNO>\s*(?P<docno>.+?)\s*</DOCNO>\s*)?'
r'(<DOCTYPE>\s*(?P<doctype>.+?)\s*</DOCTYPE>\s*)?'
r'(<DATE_TIME>\s*(?P<date_time>.+?)\s*</DATE_TIME>\s*)?'
r'<BODY>\s*'
r'(<HEADLINE>\s*(?P<headline>.+?)\s*</HEADLINE>\s*)?'
r'<TEXT>(?P<text>.*?)</TEXT>\s*'
r'</BODY>\s*</DOC>\s*', re.DOTALL)
_IEER_TYPE_RE = re.compile('<b_\w+\s+[^>]*?type="(?P<type>\w+)"')
def _ieer_read_text(s, root_label):
stack = [Tree(root_label, [])]
# s will be None if there is no headline in the text
# return the empty list in place of a Tree
if s is None:
return []
for piece_m in re.finditer('<[^>]+>|[^\s<]+', s):
piece = piece_m.group()
try:
if piece.startswith('<b_'):
m = _IEER_TYPE_RE.match(piece)
if m is None: print('XXXX', piece)
chunk = Tree(m.group('type'), [])
stack[-1].append(chunk)
stack.append(chunk)
elif piece.startswith('<e_'):
stack.pop()
# elif piece.startswith('<'):
# print "ERROR:", piece
# raise ValueError # Unexpected HTML
else:
stack[-1].append(piece)
except (IndexError, ValueError):
raise ValueError('Bad IEER string (error at character {:d})'.format \
(piece_m.start()))
if len(stack) != 1:
raise ValueError('Bad IEER string')
return stack[0]
def ieerstr2tree(s, chunk_types = ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'], root_label="S"):
"""
Return a chunk structure containing the chunked tagged text that is
encoded in the given IEER style string.
Convert a string of chunked tagged text in the IEER named
entity format into a chunk structure. Chunks are of several
types, LOCATION, ORGANIZATION, PERSON, DURATION, DATE, CARDINAL,
PERCENT, MONEY, and MEASURE.
:rtype: Tree
"""
# Try looking for a single document. If that doesn't work, then just
# treat everything as if it was within the <TEXT>...</TEXT>.
m = _IEER_DOC_RE.match(s)
if m:
return {
'text': _ieer_read_text(m.group('text'), root_label),
'docno': m.group('docno'),
'doctype': m.group('doctype'),
'date_time': m.group('date_time'),
#'headline': m.group('headline')
# we want to capture NEs in the headline too!
'headline': _ieer_read_text(m.group('headline'), root_label),
}
else:
return _ieer_read_text(s, root_label)
def demo():
s = "[ Pierre/NNP Vinken/NNP ] ,/, [ 61/CD years/NNS ] old/JJ ,/, will/MD join/VB [ the/DT board/NN ] ./."
import nltk
t = nltk.chunk.tagstr2tree(s, chunk_label='NP')
t.pprint()
print()
s = """
These DT B-NP
research NN I-NP
protocols NNS I-NP
offer VBP B-VP
to TO B-PP
the DT B-NP
patient NN I-NP
not RB O
only RB O
the DT B-NP
very RB I-NP
best JJS I-NP
therapy NN I-NP
which WDT B-NP
we PRP B-NP
have VBP B-VP
established VBN I-VP
today NN B-NP
but CC B-NP
also RB I-NP
the DT B-NP
hope NN I-NP
of IN B-PP
something NN B-NP
still RB B-ADJP
better JJR I-ADJP
. . O
"""
conll_tree = conllstr2tree(s, chunk_types=('NP', 'PP'))
conll_tree.pprint()
# Demonstrate CoNLL output
print("CoNLL output:")
print(nltk.chunk.tree2conllstr(conll_tree))
print()
if __name__ == '__main__':
demo()
|
|
import os
import wx
import wx.lib.agw.multidirdialog as MDD
from abc import ABCMeta, abstractmethod
from gooey.gui.lang import i18n
from gooey.gui.util.filedrop import FileDrop
from gooey.gui.widgets.calender_dialog import CalendarDlg
class WidgetPack(object):
"""
Interface specifying the contract to which
all `WidgetPack`s will adhere
"""
__metaclass__ = ABCMeta
@abstractmethod
def build(self, parent, data, choices=None):
pass
def onResize(self, evt):
pass
@staticmethod
def get_command(data):
return ''
@staticmethod
def disable_quoting(data):
nargs = data.get('nargs', None)
if not nargs:
return False
return nargs not in (1, '?')
class BaseChooser(WidgetPack):
def __init__(self):
self.button_text = i18n._('browse')
self.parent = None
self.widget = None
self.button = None
def build(self, parent, data, choices=None):
self.parent = parent
self.widget = wx.TextCtrl(self.parent)
self.widget.AppendText('')
self.widget.SetMinSize((0, -1))
dt = FileDrop(self.widget)
self.widget.SetDropTarget(dt)
self.button = wx.Button(self.parent, label=self.button_text, size=(73, 23))
widget_sizer = wx.BoxSizer(wx.HORIZONTAL)
widget_sizer.Add(self.widget, 1, wx.EXPAND)
widget_sizer.AddSpacer(10)
widget_sizer.Add(self.button, 0, wx.ALIGN_CENTER_VERTICAL)
parent.Bind(wx.EVT_BUTTON, self.on_button, self.button)
return widget_sizer
def get_value(self):
return self.widget.GetValue()
def __repr__(self):
return self.__class__.__name__
class BaseFileChooser(BaseChooser):
dialog = None
def __init__(self):
BaseChooser.__init__(self)
def on_button(self, evt):
dlg = self.dialog(self.parent)
result = (self.get_path(dlg)
if dlg.ShowModal() == wx.ID_OK
else None)
if result:
self.widget.SetValue(result)
def get_path(self, dlg):
return dlg.GetPath()
class BaseMultiFileChooser(BaseFileChooser):
def __init__(self, dialog):
BaseFileChooser.__init__(self)
self.dialog = dialog
def get_path(self, dlg):
return os.pathsep.join(dlg.GetPaths())
class MultiFileSaverPayload(BaseMultiFileChooser):
def __init__(self, *args, **kwargs):
BaseMultiFileChooser.__init__(self, build_dialog(wx.FD_MULTIPLE, False))
class MultiDirChooserPayload(BaseMultiFileChooser):
class MyMultiDirChooser(MDD.MultiDirDialog):
def __init__(self, *args, **kwargs):
kwargs.update({
'title': "Select Directories",
'defaultPath': os.getcwd(),
'agwStyle': MDD.DD_MULTIPLE|MDD.DD_DIR_MUST_EXIST
})
MDD.MultiDirDialog.__init__(self, *args, **kwargs)
def GetPaths(self):
return self.dirCtrl.GetPaths()
def __init__(self, *args, **kwargs):
BaseMultiFileChooser.__init__(self, MultiDirChooserPayload.MyMultiDirChooser)
class TextInputPayload(WidgetPack):
def __init__(self, no_quoting=False):
self.widget = None
self.option_string = None
self.no_quoting = no_quoting
def build(self, parent, data, choices=None):
self.widget = wx.TextCtrl(parent)
dt = FileDrop(self.widget)
self.widget.SetDropTarget(dt)
self.widget.SetMinSize((0, -1))
self.widget.SetDoubleBuffered(True)
self.widget.AppendText('')
return self.widget
def get_value(self):
return self.widget.GetValue()
class TextAreaPayload(WidgetPack):
def __init__(self, no_quoting=False):
self.widget = None
self.option_string = None
self.no_quoting = no_quoting
def build(self, parent, data, choices=None):
self.widget = wx.TextCtrl(parent, style=wx.TE_MULTILINE)
dt = FileDrop(self.widget)
self.widget.SetDropTarget(dt)
self.widget.SetMinSize((0, -1))
self.widget.SetDoubleBuffered(True)
self.widget.AppendText('')
return self.widget
def get_value(self):
return self.widget.GetValue()
class DropdownPayload(WidgetPack):
default_value = 'Select Option'
def __init__(self, no_quoting=False):
self.widget = None
self.option_string = None
self.no_quoting = no_quoting
def build(self, parent, data, choices=None):
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value=self.default_value,
choices=[self.default_value] + choices,
style=wx.CB_DROPDOWN
)
return self.widget
def get_value(self):
return self.widget.GetValue()
def set_value(self, text):
self.widget.SetValue(text)
class CounterPayload(WidgetPack):
def __init__(self):
self.widget = None
def build(self, parent, data, choices=None):
self.widget = wx.ComboBox(
parent=parent,
id=-1,
value='',
choices=map(str, range(1, 11)),
style=wx.CB_DROPDOWN
)
return self.widget
def get_value(self):
return self.widget.GetValue()
class DirDialog(wx.DirDialog):
def __init__(self, parent, *args, **kwargs):
wx.DirDialog.__init__(self, parent, 'Select Directory', style=wx.DD_DEFAULT_STYLE)
def safe_default(data, default):
return ''
def build_dialog(style, exist_constraint=True, **kwargs):
if exist_constraint:
return lambda panel: wx.FileDialog(panel, style=style | wx.FD_FILE_MUST_EXIST, **kwargs)
else:
return lambda panel: wx.FileDialog(panel, style=style, **kwargs)
def build_subclass(subclass, dialog):
return type(subclass, (BaseFileChooser,), {'dialog': dialog})
FileSaverPayload = build_subclass('FileSaverPayload', staticmethod(build_dialog(wx.FD_SAVE, False, defaultFile="Enter Filename")))
FileChooserPayload = build_subclass('FileChooserPayload', staticmethod(build_dialog(wx.FD_OPEN)))
DirChooserPayload = build_subclass('DirChooserPayload', DirDialog)
DateChooserPayload = build_subclass('DateChooserPayload', CalendarDlg)
|
|
from operator import (
and_,
ge,
gt,
le,
lt,
methodcaller,
ne,
or_,
)
from unittest import TestCase
import numpy
from numpy import (
arange,
eye,
full,
isnan,
zeros,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from zipline.modelling.expression import (
NumericalExpression,
NUMEXPR_MATH_FUNCS,
)
from zipline.modelling.factor import Factor
from zipline.utils.test_utils import check_arrays
class F(Factor):
inputs = ()
window_length = 0
class G(Factor):
inputs = ()
window_length = 0
class H(Factor):
inputs = ()
window_length = 0
class NumericalExpressionTestCase(TestCase):
def setUp(self):
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.f = F()
self.g = G()
self.h = H()
self.fake_raw_data = {
self.f: full((5, 5), 3),
self.g: full((5, 5), 2),
self.h: full((5, 5), 1),
}
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def check_output(self, expr, expected):
result = expr._compute(
[self.fake_raw_data[input_] for input_ in expr.inputs],
self.mask.index,
self.mask.columns,
self.mask.values,
)
check_arrays(result, expected)
def check_constant_output(self, expr, expected):
self.assertFalse(isnan(expected))
return self.check_output(expr, full((5, 5), expected))
def test_validate_good(self):
f = self.f
g = self.g
NumericalExpression("x_0", (f,))
NumericalExpression("x_0 ", (f,))
NumericalExpression("x_0 + x_0", (f,))
NumericalExpression("x_0 + 2", (f,))
NumericalExpression("2 * x_0", (f,))
NumericalExpression("x_0 + x_1", (f, g))
NumericalExpression("x_0 + x_1 + x_0", (f, g))
NumericalExpression("x_0 + 1 + x_1", (f, g))
def test_validate_bad(self):
f, g, h = F(), G(), H()
# Too few inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", ())
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f,))
# Too many inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (f, g))
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f, g, h))
# Invalid variable name.
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f,))
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f, g))
# Variable index must start at 0.
with self.assertRaises(ValueError):
NumericalExpression("x_1", (f,))
# Scalar operands must be numeric.
with self.assertRaises(TypeError):
"2" + f
with self.assertRaises(TypeError):
f + "2"
with self.assertRaises(TypeError):
f > "2"
# Boolean binary operators must be between filters.
with self.assertRaises(TypeError):
f + (f > 2)
with self.assertRaises(TypeError):
(f > f) > f
def test_negate(self):
f, g = self.f, self.g
self.check_constant_output(-f, -3.0)
self.check_constant_output(--f, 3.0)
self.check_constant_output(---f, -3.0)
self.check_constant_output(-(f + f), -6.0)
self.check_constant_output(-f + -f, -6.0)
self.check_constant_output(-(-f + -f), 6.0)
self.check_constant_output(f + -g, 1.0)
self.check_constant_output(f - -g, 5.0)
self.check_constant_output(-(f + g) + (f + g), 0.0)
self.check_constant_output((f + g) + -(f + g), 0.0)
self.check_constant_output(-(f + g) + -(f + g), -10.0)
def test_add(self):
f, g = self.f, self.g
self.check_constant_output(f + g, 5.0)
self.check_constant_output((1 + f) + g, 6.0)
self.check_constant_output(1 + (f + g), 6.0)
self.check_constant_output((f + 1) + g, 6.0)
self.check_constant_output(f + (1 + g), 6.0)
self.check_constant_output((f + g) + 1, 6.0)
self.check_constant_output(f + (g + 1), 6.0)
self.check_constant_output((f + f) + f, 9.0)
self.check_constant_output(f + (f + f), 9.0)
self.check_constant_output((f + g) + f, 8.0)
self.check_constant_output(f + (g + f), 8.0)
self.check_constant_output((f + g) + (f + g), 10.0)
self.check_constant_output((f + g) + (g + f), 10.0)
self.check_constant_output((g + f) + (f + g), 10.0)
self.check_constant_output((g + f) + (g + f), 10.0)
def test_subtract(self):
f, g = self.f, self.g
self.check_constant_output(f - g, 1.0) # 3 - 2
self.check_constant_output((1 - f) - g, -4.) # (1 - 3) - 2
self.check_constant_output(1 - (f - g), 0.0) # 1 - (3 - 2)
self.check_constant_output((f - 1) - g, 0.0) # (3 - 1) - 2
self.check_constant_output(f - (1 - g), 4.0) # 3 - (1 - 2)
self.check_constant_output((f - g) - 1, 0.0) # (3 - 2) - 1
self.check_constant_output(f - (g - 1), 2.0) # 3 - (2 - 1)
self.check_constant_output((f - f) - f, -3.) # (3 - 3) - 3
self.check_constant_output(f - (f - f), 3.0) # 3 - (3 - 3)
self.check_constant_output((f - g) - f, -2.) # (3 - 2) - 3
self.check_constant_output(f - (g - f), 4.0) # 3 - (2 - 3)
self.check_constant_output((f - g) - (f - g), 0.0) # (3 - 2) - (3 - 2)
self.check_constant_output((f - g) - (g - f), 2.0) # (3 - 2) - (2 - 3)
self.check_constant_output((g - f) - (f - g), -2.) # (2 - 3) - (3 - 2)
self.check_constant_output((g - f) - (g - f), 0.0) # (2 - 3) - (2 - 3)
def test_multiply(self):
f, g = self.f, self.g
self.check_constant_output(f * g, 6.0)
self.check_constant_output((2 * f) * g, 12.0)
self.check_constant_output(2 * (f * g), 12.0)
self.check_constant_output((f * 2) * g, 12.0)
self.check_constant_output(f * (2 * g), 12.0)
self.check_constant_output((f * g) * 2, 12.0)
self.check_constant_output(f * (g * 2), 12.0)
self.check_constant_output((f * f) * f, 27.0)
self.check_constant_output(f * (f * f), 27.0)
self.check_constant_output((f * g) * f, 18.0)
self.check_constant_output(f * (g * f), 18.0)
self.check_constant_output((f * g) * (f * g), 36.0)
self.check_constant_output((f * g) * (g * f), 36.0)
self.check_constant_output((g * f) * (f * g), 36.0)
self.check_constant_output((g * f) * (g * f), 36.0)
self.check_constant_output(f * f * f * 0 * f * f, 0.0)
def test_divide(self):
f, g = self.f, self.g
self.check_constant_output(f / g, 3.0 / 2.0)
self.check_constant_output(
(2 / f) / g,
(2 / 3.0) / 2.0
)
self.check_constant_output(
2 / (f / g),
2 / (3.0 / 2.0),
)
self.check_constant_output(
(f / 2) / g,
(3.0 / 2) / 2.0,
)
self.check_constant_output(
f / (2 / g),
3.0 / (2 / 2.0),
)
self.check_constant_output(
(f / g) / 2,
(3.0 / 2.0) / 2,
)
self.check_constant_output(
f / (g / 2),
3.0 / (2.0 / 2),
)
self.check_constant_output(
(f / f) / f,
(3.0 / 3.0) / 3.0
)
self.check_constant_output(
f / (f / f),
3.0 / (3.0 / 3.0),
)
self.check_constant_output(
(f / g) / f,
(3.0 / 2.0) / 3.0,
)
self.check_constant_output(
f / (g / f),
3.0 / (2.0 / 3.0),
)
self.check_constant_output(
(f / g) / (f / g),
(3.0 / 2.0) / (3.0 / 2.0),
)
self.check_constant_output(
(f / g) / (g / f),
(3.0 / 2.0) / (2.0 / 3.0),
)
self.check_constant_output(
(g / f) / (f / g),
(2.0 / 3.0) / (3.0 / 2.0),
)
self.check_constant_output(
(g / f) / (g / f),
(2.0 / 3.0) / (2.0 / 3.0),
)
def test_pow(self):
f, g = self.f, self.g
self.check_constant_output(f ** g, 3.0 ** 2)
self.check_constant_output(2 ** f, 2.0 ** 3)
self.check_constant_output(f ** 2, 3.0 ** 2)
self.check_constant_output((f + g) ** 2, (3.0 + 2.0) ** 2)
self.check_constant_output(2 ** (f + g), 2 ** (3.0 + 2.0))
self.check_constant_output(f ** (f ** g), 3.0 ** (3.0 ** 2.0))
self.check_constant_output((f ** f) ** g, (3.0 ** 3.0) ** 2.0)
self.check_constant_output((f ** g) ** (f ** g), 9.0 ** 9.0)
self.check_constant_output((f ** g) ** (g ** f), 9.0 ** 8.0)
self.check_constant_output((g ** f) ** (f ** g), 8.0 ** 9.0)
self.check_constant_output((g ** f) ** (g ** f), 8.0 ** 8.0)
def test_mod(self):
f, g = self.f, self.g
self.check_constant_output(f % g, 3.0 % 2.0)
self.check_constant_output(f % 2.0, 3.0 % 2.0)
self.check_constant_output(g % f, 2.0 % 3.0)
self.check_constant_output((f + g) % 2, (3.0 + 2.0) % 2)
self.check_constant_output(2 % (f + g), 2 % (3.0 + 2.0))
self.check_constant_output(f % (f % g), 3.0 % (3.0 % 2.0))
self.check_constant_output((f % f) % g, (3.0 % 3.0) % 2.0)
self.check_constant_output((f + g) % (f * g), 5.0 % 6.0)
def test_math_functions(self):
f, g = self.f, self.g
fake_raw_data = self.fake_raw_data
alt_fake_raw_data = {
self.f: full((5, 5), .5),
self.g: full((5, 5), -.5),
}
for funcname in NUMEXPR_MATH_FUNCS:
method = methodcaller(funcname)
func = getattr(numpy, funcname)
# These methods have domains in [0, 1], so we need alternate inputs
# that are in the domain.
if funcname in ('arcsin', 'arccos', 'arctanh'):
self.fake_raw_data = alt_fake_raw_data
else:
self.fake_raw_data = fake_raw_data
f_val = self.fake_raw_data[f][0, 0]
g_val = self.fake_raw_data[g][0, 0]
self.check_constant_output(method(f), func(f_val))
self.check_constant_output(method(g), func(g_val))
self.check_constant_output(method(f) + 1, func(f_val) + 1)
self.check_constant_output(1 + method(f), 1 + func(f_val))
self.check_constant_output(method(f + .25), func(f_val + .25))
self.check_constant_output(method(.25 + f), func(.25 + f_val))
self.check_constant_output(
method(f) + method(g),
func(f_val) + func(g_val),
)
self.check_constant_output(
method(f + g),
func(f_val + g_val),
)
def test_comparisons(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
f_data = self.fake_raw_data[f]
g_data = self.fake_raw_data[g]
cases = [
# Sanity Check with hand-computed values.
(f, g, eye(5), zeros((5, 5))),
(f, 10, f_data, 10),
(10, f, 10, f_data),
(f, f, f_data, f_data),
(f + 1, f, f_data + 1, f_data),
(1 + f, f, 1 + f_data, f_data),
(f, g, f_data, g_data),
(f + 1, g, f_data + 1, g_data),
(f, g + 1, f_data, g_data + 1),
(f + 1, g + 1, f_data + 1, g_data + 1),
((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2),
]
for op in (gt, ge, lt, le, ne):
for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases:
self.check_output(
op(expr_lhs, expr_rhs),
op(expected_lhs, expected_rhs),
)
def test_boolean_binops(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
# Should be True on the diagonal.
eye_filter = f > g
# Should be True in the first row only.
first_row_filter = f < h
eye_mask = eye(5, dtype=bool)
first_row_mask = zeros((5, 5), dtype=bool)
first_row_mask[0] = 1
self.check_output(eye_filter, eye_mask)
self.check_output(first_row_filter, first_row_mask)
for op in (and_, or_): # NumExpr doesn't support xor.
self.check_output(
op(eye_filter, first_row_filter),
op(eye_mask, first_row_mask),
)
|
|
"""Miscellaneous operations for traditional philology and simple
statistics."""
__author__ = ['Kyle P. Johnson <kyle@kyle-p-johnson.com>',
'Steven Bird <stevenbird1@gmail.com>', # original author of NLTK's ConcordanceIndex()
'Edward Loper <edloper@gmail.com>'] # original author of NLTK's ConcordanceIndex()
__license__ = 'MIT License. See LICENSE.'
from cltk.utils.cltk_logger import logger
from collections import defaultdict
from nltk.text import ConcordanceIndex
from nltk.tokenize.punkt import PunktLanguageVars
import os
class Philology:
"""Class for Philological and simple statistics."""
def __init__(self):
"""Misc. contructors.
TODO: add a language here
"""
pass
def _read_file(self, filepath):
"""Read a file and return it as a string"""
filepath = os.path.expanduser(filepath) #? Check this is ok if absolute paths passed in
with open(filepath) as opened_file:
read_file = opened_file.read()
return read_file
def _build_concordance(self, text_str):
"""
Inherit or mimic the logic of ConcordanceIndex() at http://www.nltk.org/_modules/nltk/text.html
and/or ConcordanceSearchView() & SearchCorpus() at https://github.com/nltk/nltk/blob/develop/nltk/app/concordance_app.py
:param text_string: Text to be turned into a concordance
:type text_string: str
:return: list
"""
p = PunktLanguageVars()
orig_tokens = p.word_tokenize(text_str)
c = ConcordanceIndex(orig_tokens)
#! rm dupes after index, before loop
tokens = set(orig_tokens)
tokens = [x for x in tokens if x not in [',', '.', ';', ':', '"', "'", '[', ']']] # this needs to be changed or rm'ed
return c.return_concordance_all(tokens)
def write_concordance_from_file(self, filepaths, name):
"""This calls my modified ConcordanceIndex, taken and modified from
the NLTK, and writes to disk a file named 'concordance_' + name at
'~/cltk_data/user_data/'.
TODO: Add language (here or in class), lowercase option, stemming/
lemmatization, else?
:type filepaths: str or list
:param filepaths: Filepath of text(s) to be used in concordance.
:rtype : str
"""
assert isinstance(filepaths, (str, list))
if isinstance(filepaths, str):
filepath = filepaths
text = self._read_file(filepath)
elif isinstance(filepaths, list):
text = ''
for filepath in filepaths:
text += self._read_file(filepath)
list_of_lists = self._build_concordance(text)
user_data_rel = '~/cltk_data/user_data'
user_data = os.path.expanduser(user_data_rel)
if not os.path.isdir(user_data):
os.makedirs(user_data)
file_path = os.path.join(user_data, 'concordance_' + name + '.txt')
concordance_output = ''
for word_list in list_of_lists:
for line in word_list:
concordance_output += line + '\n'
try:
with open(file_path, 'w') as open_file:
open_file.write(concordance_output)
logger.info("Wrote concordance to '%s'." % file_path)
except IOError as io_error:
logger.error("Failed to write concordance to '%s'." % file_path)
def write_concordance_from_string(self, text, name):
"""A reworkinng of write_concordance_from_file(). Refactor these."""
list_of_lists = self._build_concordance(text)
user_data_rel = '~/cltk_data/user_data'
user_data = os.path.expanduser(user_data_rel)
if not os.path.isdir(user_data):
os.makedirs(user_data)
file_path = os.path.join(user_data, 'concordance_' + name + '.txt')
concordance_output = ''
for word_list in list_of_lists:
for line in word_list:
concordance_output += line + '\n'
try:
with open(file_path, 'w') as open_file:
open_file.write(concordance_output)
logger.info("Wrote concordance to '%s'." % file_path)
except IOError as io_error:
logger.error("Failed to write concordance to '%s'." % file_path)
class ConcordanceIndex(object):
"""
An index that can be used to look up the offset locations at which
a given word occurs in a document. This is a helper class not
intended for direct use. Repurposed from the NLTK:
https://github.com/nltk/nltk/blob/7ba46b9d52ed0c03bf806193f38d8c0e9bd8a9b4/nltk/text.py
"""
def __init__(self, tokens, key=lambda x:x):
"""
Construct a new concordance index.
:param tokens: The document (list of tokens) that this
concordance index was created from. This list can be used
to access the context of a given word occurrence.
:param key: A function that maps each token to a normalized
version that will be used as a key in the index. E.g., if
you use ``key=lambda s:s.lower()``, then the index will be
case-insensitive.
"""
self._tokens = tokens
"""The document (list of tokens) that this concordance index
was created from."""
self._key = key
"""Function mapping each token to an index key (or None)."""
self._offsets = defaultdict(list)
"""Dictionary mapping words (or keys) to lists of offset
indices."""
# Initialize the index (self._offsets)
for index, word in enumerate(tokens):
word = self._key(word)
self._offsets[word].append(index)
def tokens(self):
"""
:rtype: list(str)
:return: The document that this concordance index was
created from.
"""
return self._tokens
def offsets(self, word):
"""
:rtype: list(int)
:return: A list of the offset positions at which the given
word occurs. If a key function was specified for the
index, then given word's key will be looked up.
"""
word = self._key(word)
return self._offsets[word]
def __repr__(self):
return '<ConcordanceIndex for %d tokens (%d types)>' % (
len(self._tokens), len(self._offsets))
def return_concordance_word(self, word, width=150, lines=1000000):
"""
Makes concordance for ``word`` with the specified context window.
Returns a list of concordance lines for the given input word.
:param word: The target word
:type word: str
:param width: The width of each line, in characters (default=80)
:type width: int
:param lines: The number of lines to display (default=25)
:type lines: int
"""
return_list = []
half_width = (width - len(word) - 2) // 2
context = width // 4 # approx number of words of context
offsets = self.offsets(word)
if offsets:
lines = min(lines, len(offsets))
while lines:
for i in offsets:
left = (' ' * half_width +
' '.join(self._tokens[i-context:i]))
right = ' '.join(self._tokens[i+1:i+context])
left = left[-half_width:]
right = right[:half_width]
#print(left, '*', self._tokens[i], '*', right)
line_str = left + ' ' + self._tokens[i] + ' ' + right
return_list.append(line_str)
lines -= 1
return return_list
def return_concordance_all(self, tokens):
"""Take a list of tokens, iteratively run each word through
return_concordance_word and build a list of all. This returns a list
of lists.
"""
tokens = sorted(tokens) #! is the list order preserved?
concordance_list = []
for token in tokens:
x = None
x = self.return_concordance_word(token)
concordance_list.append(x)
return concordance_list
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
class ClientException(Exception):
"""The base exception class for all exceptions this library raises."""
message = 'Unknown Error'
def __init__(self, code=None, message=None, request_id=None,
url=None, method=None):
self.code = code
self.message = message or self.__class__.message
self.request_id = request_id
self.url = url
self.method = method
def __str__(self):
formatted_string = "%s" % self.message
if self.code:
formatted_string += " (HTTP %s)" % self.code
if self.request_id:
formatted_string += " (Request-ID: %s)" % self.request_id
return formatted_string
class RetryAfterException(ClientException):
"""The base exception for ClientExceptions that use Retry-After header."""
def __init__(self, *args, **kwargs):
try:
self.retry_after = int(kwargs.pop('retry_after'))
except (KeyError, ValueError):
self.retry_after = 0
super(RetryAfterException, self).__init__(*args, **kwargs)
class ConnectionFailure(ClientException):
"""Connection failure."""
class ConnectionTimeout(ClientException):
"""Connection timeout."""
class UnknownConnectionError(ClientException):
"""Unknown connection error."""
class SSLError(ClientException):
"""SSL connection error."""
class BadRequest(ClientException):
"""HTTP 400 - Bad request: you sent some malformed data."""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""HTTP 401 - Unauthorized: bad credentials."""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""HTTP 403 - Forbidden.
Your credentials don't give you access to this resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""HTTP 404 - Not found."""
http_status = 404
message = "Not found"
class MetricNotFound(NotFound):
message = "Metric not found"
match = re.compile("Metric .* does not exist")
class ResourceNotFound(NotFound):
message = "Resource not found"
match = re.compile("Resource .* does not exist")
class ResourceTypeNotFound(NotFound):
message = "Resource type not found"
match = re.compile("Resource type .* does not exist")
class ArchivePolicyNotFound(NotFound):
message = "Archive policy not found"
match = re.compile("Archive policy .* does not exist")
class ArchivePolicyRuleNotFound(NotFound):
message = "Archive policy rule not found"
match = re.compile("Archive policy rule .* does not exist")
class MethodNotAllowed(ClientException):
"""HTTP 405 - Method Not Allowed."""
http_status = 405
message = "Method Not Allowed"
class NotAcceptable(ClientException):
"""HTTP 406 - Not Acceptable."""
http_status = 406
message = "Not Acceptable"
class Conflict(ClientException):
"""HTTP 409 - Conflict."""
http_status = 409
message = "Conflict"
class NamedMetricAlreadyExists(Conflict):
message = "Named metric already exists"
match = re.compile("Named metric .* already exist")
class ResourceTypeAlreadyExists(Conflict):
message = "Resource type already exists"
match = re.compile("Resource type .* already exists")
class ResourceAlreadyExists(Conflict):
message = "Resource already exists"
match = re.compile("Resource .* already exists")
class ArchivePolicyAlreadyExists(Conflict):
message = "Archive policy already exists"
match = re.compile("Archive policy .* already exists")
class ArchivePolicyRuleAlreadyExists(Conflict):
message = "Archive policy rule already exists"
match = re.compile("Archive policy rule .* already exists")
class OverLimit(RetryAfterException):
"""HTTP 413 - Over limit.
You're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
class RateLimit(RetryAfterException):
"""HTTP 429 - Rate limit.
You've sent too many requests for this time period.
"""
http_status = 429
message = "Rate limit"
class NotImplemented(ClientException): # noqa
"""HTTP 501 - Not Implemented.
The server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
_error_classes = [BadRequest, Unauthorized, Forbidden, NotFound,
MethodNotAllowed, NotAcceptable, Conflict, OverLimit,
RateLimit, NotImplemented]
_error_classes_enhanced = {
NotFound: [MetricNotFound, ResourceTypeNotFound, ResourceNotFound,
ArchivePolicyRuleNotFound, ArchivePolicyNotFound],
Conflict: [NamedMetricAlreadyExists, ResourceTypeAlreadyExists,
ResourceAlreadyExists,
ArchivePolicyAlreadyExists,
ArchivePolicyRuleAlreadyExists]
}
_code_map = dict(
(c.http_status, (c, _error_classes_enhanced.get(c, [])))
for c in _error_classes)
def from_response(response, method=None):
"""Return an instance of one of the ClientException on an requests response.
Usage::
resp, body = requests.request(...)
if resp.status_code != 200:
raise from_response(resp)
"""
if response.status_code:
cls, enhanced_classes = _code_map.get(response.status_code,
(ClientException, []))
req_id = response.headers.get("x-openstack-request-id")
content_type = response.headers.get("Content-Type", "").split(";")[0]
kwargs = {
'code': response.status_code,
'method': method,
'url': response.url,
'request_id': req_id,
}
if "retry-after" in response.headers:
kwargs['retry_after'] = response.headers.get('retry-after')
if content_type == "application/json":
try:
body = response.json()
except ValueError:
pass
else:
if 'description' in body:
# Gnocchi json
desc = body.get('description')
if desc and isinstance(desc, six.text_type):
for enhanced_cls in enhanced_classes:
if enhanced_cls.match.match(desc):
cls = enhanced_cls
break
kwargs['message'] = desc
elif isinstance(body, dict) and isinstance(body.get("error"),
dict):
# Keystone json
kwargs['message'] = body["error"]["message"]
else:
kwargs['message'] = response.text
elif content_type.startswith("text/"):
kwargs['message'] = response.text
if not kwargs['message']:
del kwargs['message']
return cls(**kwargs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
# TODO(ebrevdo): Remove once _linear is fully deprecated.
# pylint: disable=protected-access
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
# pylint: enable=protected-access
linear = rnn_cell_impl._linear
class RNNCellTest(test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(1.0)):
x = array_ops.zeros([1, 2])
l = linear([x], 2, False)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with variable_scope.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with variable_scope.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(variables_lib.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellNotTrainable(self):
with self.test_session() as sess:
def not_trainable_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5),
custom_getter=not_trainable_getter):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
cell = rnn_cell_impl.BasicRNNCell(2)
g, _ = cell(x, m)
self.assertFalse(cell.trainable_variables)
self.assertEqual([
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/basic_rnn_cell/%s:0" % rnn_cell_impl._BIAS_VARIABLE_NAME
], [v.name for v in cell.non_trainable_variables])
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test GRUCell with input_size != num_units.
m = array_ops.zeros([1, 2])
g, _ = rnn_cell_impl.GRUCell(2)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g],
{x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 8])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=False)
g, out_m = cell(x, m)
expected_variable_names = [
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
"root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
rnn_cell_impl._BIAS_VARIABLE_NAME
]
self.assertEqual(
expected_variable_names, [v.name for v in cell.trainable_variables])
self.assertFalse(cell.non_trainable_variables)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
variables = variables_lib.global_variables()
self.assertEqual(expected_variable_names, [v.name for v in variables])
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[
0.68967271, 0.68967271, 0.44848421, 0.44848421, 0.39897051,
0.39897051, 0.24024698, 0.24024698
]])
self.assertAllClose(res[1], expected_mem)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = array_ops.zeros([1, 4])
g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, out_m],
{x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
def testBasicLSTMCellDimension0Error(self):
"""Tests that dimension 0 in both(x and m) shape must be equal."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size - 1, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size - 1, state_size])})
def testBasicLSTMCellStateSizeError(self):
"""Tests that state_size must be num_units * 2."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
num_units = 2
state_size = num_units * 3 # state_size must be num_units * 2
batch_size = 3
input_size = 4
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
with self.assertRaises(ValueError):
g, out_m = rnn_cell_impl.BasicLSTMCell(
num_units, state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
sess.run([g, out_m],
{x.name: 1 * np.ones([batch_size, input_size]),
m.name: 0.1 * np.ones([batch_size, state_size])})
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = (array_ops.zeros([1, 2]),) * 2
m1 = (array_ops.zeros([1, 2]),) * 2
cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicLSTMCell(2) for _ in range(2)],
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(
isinstance(cell.state_size[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(
isinstance(cell.state_size[1], rnn_cell_impl.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
# Pass in LSTMStateTuples
variable_scope.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, dtypes.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0], rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1], rnn_cell_impl.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(isinstance(out_m0, rnn_cell_impl.LSTMStateTuple))
self.assertTrue(isinstance(out_m1, rnn_cell_impl.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m0 = array_ops.zeros([1, 4])
m1 = array_ops.zeros([1, 4])
cell = rnn_cell_impl.MultiRNNCell(
[
rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
for _ in range(2)
],
state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, out_m0, out_m1], {
x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])
})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array(
[[0.68967271, 0.68967271, 0.44848421, 0.44848421]])
expected_mem1 = np.array(
[[0.39897051, 0.39897051, 0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
output, state = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([output, state], {
x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testLSTMCellVariables(self):
with self.test_session():
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
cell = rnn_cell_impl.LSTMCell(
num_units=num_units,
num_proj=num_proj,
forget_bias=1.0,
state_is_tuple=False)
cell(x, m) # Execute to create variables
variables = variables_lib.global_variables()
self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
self.assertEquals(variables[2].op.name,
"root/lstm_cell/projection/kernel")
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.OutputProjectionWrapper(rnn_cell_impl.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell_impl.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testResidualWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(base_cell)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res[1], res[0] + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testResidualWrapperWithSlice(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 5])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell_impl.GRUCell(3)
g, m_new = base_cell(x, m)
variable_scope.get_variable_scope().reuse_variables()
def residual_with_slice_fn(inp, out):
inp_sliced = array_ops.slice(inp, [0, 0], [-1, 3])
return inp_sliced + out
g_res, m_new_res = rnn_cell_impl.ResidualWrapper(
base_cell, residual_with_slice_fn)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res_g, res_g_res, res_m_new, res_m_new_res = sess.run(
[g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# Residual connections
self.assertAllClose(res_g_res, res_g + [1., 1., 1.])
# States are left untouched
self.assertAllClose(res_m_new, res_m_new_res)
def testDeviceWrapper(self):
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/cpu:14159")
outputs, _ = cell(x, m)
self.assertTrue("cpu:14159" in outputs.device.lower())
def testDeviceWrapperDynamicExecutionNodesAreAllProperlyLocated(self):
if not test.is_gpu_available():
# Can't perform this test w/o a GPU
return
with self.test_session(use_gpu=True) as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1, 3])
cell = rnn_cell_impl.DeviceWrapper(rnn_cell_impl.GRUCell(3), "/gpu:0")
with ops.device("/cpu:0"):
outputs, _ = rnn.dynamic_rnn(
cell=cell, inputs=x, dtype=dtypes.float32)
run_metadata = config_pb2.RunMetadata()
opts = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
sess.run([variables_lib.global_variables_initializer()])
_ = sess.run(outputs, options=opts, run_metadata=run_metadata)
step_stats = run_metadata.step_stats
ix = 0 if "gpu" in step_stats.dev_stats[0].device else 1
gpu_stats = step_stats.dev_stats[ix].node_stats
cpu_stats = step_stats.dev_stats[1 - ix].node_stats
self.assertFalse([s for s in cpu_stats if "gru_cell" in s.node_name])
self.assertTrue([s for s in gpu_stats if "gru_cell" in s.node_name])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 1], dtype=dtypes.int32)
m = array_ops.zeros([1, 2])
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.GRUCell(2), embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g, new_m],
{x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with variable_scope.variable_scope("root"):
inputs = ops.convert_to_tensor([[[0], [0]]], dtype=dtypes.int64)
input_lengths = ops.convert_to_tensor([2], dtype=dtypes.int64)
embedding_cell = contrib_rnn.EmbeddingWrapper(
rnn_cell_impl.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = rnn.dynamic_rnn(
cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=dtypes.float32)
sess.run([variables_lib.global_variables_initializer()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 4])
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=False)(x, m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991, 0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class DropoutWrapperTest(test.TestCase):
def _testDropoutWrapper(self, batch_size=None, time_steps=None,
parallel_iterations=None, **kwargs):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
if batch_size is None and time_steps is None:
# 2 time steps, batch size 1, depth 3
batch_size = 1
time_steps = 2
x = constant_op.constant(
[[[2., 2., 2.]], [[1., 1., 1.]]], dtype=dtypes.float32)
m = rnn_cell_impl.LSTMStateTuple(
*[constant_op.constant([[0.1, 0.1, 0.1]], dtype=dtypes.float32)
] * 2)
else:
x = constant_op.constant(
np.random.randn(time_steps, batch_size, 3).astype(np.float32))
m = rnn_cell_impl.LSTMStateTuple(*[
constant_op.constant(
[[0.1, 0.1, 0.1]] * batch_size, dtype=dtypes.float32)
] * 2)
outputs, final_state = rnn.dynamic_rnn(
cell=rnn_cell_impl.DropoutWrapper(
rnn_cell_impl.LSTMCell(3), dtype=x.dtype, **kwargs),
time_major=True,
parallel_iterations=parallel_iterations,
inputs=x,
initial_state=m)
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([outputs, final_state])
self.assertEqual(res[0].shape, (time_steps, batch_size, 3))
self.assertEqual(res[1].c.shape, (batch_size, 3))
self.assertEqual(res[1].h.shape, (batch_size, 3))
return res
def testDropoutWrapperKeepAllConstantInput(self):
keep = array_ops.ones([])
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepAll(self):
keep = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep, output_keep_prob=keep, state_keep_prob=keep)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(true_full_output, res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperWithSeed(self):
keep_some = 0.5
random_seed.set_random_seed(2)
## Use parallel_iterations = 1 in both calls to
## _testDropoutWrapper to ensure the (per-time step) dropout is
## consistent across both calls. Otherwise the seed may not end
## up being munged consistently across both graphs.
res_standard_1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
# Clear away the graph and the test session (which keeps variables around)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2)
res_standard_2 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, seed=10,
parallel_iterations=1)
self.assertAllClose(res_standard_1[0], res_standard_2[0])
self.assertAllClose(res_standard_1[1].c, res_standard_2[1].c)
self.assertAllClose(res_standard_1[1].h, res_standard_2[1].h)
def testDropoutWrapperKeepNoOutput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_none,
state_keep_prob=keep_all)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
self.assertAllClose(np.zeros(res[0].shape), res[0])
self.assertAllClose(true_full_output[1], res[1].h)
self.assertAllClose(true_full_final_c, res[1].c)
def testDropoutWrapperKeepNoState(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_all,
state_keep_prob=keep_none)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
self.assertAllClose(true_full_output[0], res[0][0])
# Second output is modified by zero input state
self.assertGreater(np.linalg.norm(true_full_output[1] - res[0][1]), 1e-4)
self.assertAllClose(np.zeros(res[1].h.shape), res[1].h)
self.assertAllClose(np.zeros(res[1].c.shape), res[1].c)
def testDropoutWrapperKeepNoInput(self):
keep_all = variable_scope.get_variable("all", initializer=1.0)
keep_none = variable_scope.get_variable("none", initializer=1e-10)
true_full_output = np.array(
[[[0.751109, 0.751109, 0.751109]],
[[0.895509, 0.895509, 0.895509]]], dtype=np.float32)
true_full_final_c = np.array(
[[1.949385, 1.949385, 1.949385]], dtype=np.float32)
# All outputs are different because inputs are zeroed out
res = self._testDropoutWrapper(
input_keep_prob=keep_none, output_keep_prob=keep_all,
state_keep_prob=keep_all)
self.assertGreater(np.linalg.norm(res[0] - true_full_output), 1e-4)
self.assertGreater(np.linalg.norm(res[1].h - true_full_output[1]), 1e-4)
self.assertGreater(np.linalg.norm(res[1].c - true_full_final_c), 1e-4)
def testDropoutWrapperRecurrentOutput(self):
keep_some = 0.8
keep_all = variable_scope.get_variable("all", initializer=1.0)
res = self._testDropoutWrapper(
input_keep_prob=keep_all, output_keep_prob=keep_some,
state_keep_prob=keep_all, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Ensure the same dropout pattern for all time steps
output_mask = np.abs(res[0]) > 1e-6
for m in output_mask[1:]:
self.assertAllClose(output_mask[0], m)
def testDropoutWrapperRecurrentStateInputAndOutput(self):
keep_some = 0.9
res = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7)
# Smoke test for the state/input masks.
output_mask = np.abs(res[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res[1].c) > 1e-6
state_h_mask = np.abs(res[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
def testDropoutWrapperRecurrentStateInputAndOutputWithSeed(self):
keep_some = 0.9
random_seed.set_random_seed(2347)
np.random.seed(23487)
res0 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
ops.reset_default_graph()
self._ClearCachedSession()
random_seed.set_random_seed(2347)
np.random.seed(23487)
res1 = self._testDropoutWrapper(
input_keep_prob=keep_some, output_keep_prob=keep_some,
state_keep_prob=keep_some, variational_recurrent=True,
input_size=3, batch_size=5, time_steps=7, seed=-234987)
output_mask = np.abs(res0[0]) > 1e-6
for time_step in output_mask:
# Ensure the same dropout output pattern for all time steps
self.assertAllClose(output_mask[0], time_step)
for batch_entry in time_step:
# Assert all batch entries get the same mask
self.assertAllClose(batch_entry, time_step[0])
# For state, ensure all batch entries have the same mask
state_c_mask = np.abs(res0[1].c) > 1e-6
state_h_mask = np.abs(res0[1].h) > 1e-6
for batch_entry in state_c_mask:
self.assertAllClose(batch_entry, state_c_mask[0])
for batch_entry in state_h_mask:
self.assertAllClose(batch_entry, state_h_mask[0])
# Ensure seeded calculation is identical.
self.assertAllClose(res0[0], res1[0])
self.assertAllClose(res0[1].c, res1[1].c)
self.assertAllClose(res0[1].h, res1[1].h)
class SlimRNNCellTest(test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m = array_ops.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = rnn_cell_impl._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([variables_lib.global_variables_initializer()])
res = sess.run(
[g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
inputs = random_ops.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
rnn_cell = rnn_cell_impl.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
variable_scope.get_variable_scope().reuse_variables()
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = rnn_cell_impl._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([variables_lib.global_variables_initializer()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = dtypes.float32
init_output = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_state = array_ops.zeros(
array_ops.stack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with variable_scope.variable_scope(scope, "basic_rnn_cell",
[inputs, state]):
output = math_ops.tanh(linear([inputs, state], num_units, True))
return output, output
if __name__ == "__main__":
test.main()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the rally.common.db namespace.
Call these functions from rally.common.db namespace, not the
rally.common.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
import datetime as dt
import functools
import tempfile
import time
from oslo_db import exception as db_exc
from oslo_db import options as db_options
from oslo_db.sqlalchemy import session as db_session
import sqlalchemy as sa
import sqlalchemy.orm # noqa
from rally.common import cfg
from rally.common.db import models
from rally import consts
from rally import exceptions
from rally.task.processing import charts
CONF = cfg.CONF
db_options.set_defaults(
CONF, connection="sqlite:///%s/rally.sqlite" % tempfile.gettempdir())
_FACADE = None
_SESSION_MAKER = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session():
global _SESSION_MAKER
if not _SESSION_MAKER:
_SESSION_MAKER = sa.orm.sessionmaker()
_SESSION_MAKER.configure(bind=get_engine())
return _SESSION_MAKER()
def engine_reset():
global _FACADE, _SESSION_MAKER
_FACADE = None
_SESSION_MAKER = None
def serialize(data):
if data is None:
return None
if isinstance(data, (int,
str,
dt.date,
dt.time,
float,
)):
return data
if isinstance(data, models.RallyBase):
result = data.as_dict()
for k in result:
result[k] = serialize(result[k])
return result
if isinstance(data, (list, tuple)):
return [serialize(d) for d in data]
if isinstance(data, dict):
result = {}
for k in data:
result[k] = serialize(data[k])
return result
raise ValueError("Failed to serialize %r data type." % type(data).__name__)
def with_session(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
session = get_session()
session.expire_on_commit = False
try:
result = f(session, *args, **kwargs)
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
return serialize(result)
return wrapper
@with_session
def tags_get(session, uuid, tag_type):
query = session.query(models.Tag.tag).filter_by(uuid=uuid, type=tag_type)
return [t.tag for t in query.distinct().all()]
def _uuids_by_tags_get(session, tag_type, tags):
tags = (session.query(models.Tag.uuid)
.filter(models.Tag.type == tag_type,
models.Tag.tag.in_(tags)).distinct())
return [t.uuid for t in tags.all()]
def _task_workload_data_get_all(session, workload_uuid):
results = (session.query(models.WorkloadData)
.filter_by(workload_uuid=workload_uuid)
.order_by(models.WorkloadData.chunk_order.asc()))
return sorted([raw for workload_data in results
for raw in workload_data.chunk_data["raw"]],
key=lambda x: x["timestamp"])
def _subtasks_get_all_by_task_uuid(session, task_uuid):
result = session.query(models.Subtask).filter_by(task_uuid=task_uuid).all()
subtasks = []
for subtask in result:
subtask = subtask.as_dict()
subtask["workloads"] = []
workloads = (session.query(models.Workload).
filter_by(subtask_uuid=subtask["uuid"]).all())
for workload in workloads:
workload = workload.as_dict()
workload["data"] = _task_workload_data_get_all(
session, workload["uuid"])
subtask["workloads"].append(workload)
subtasks.append(subtask)
return subtasks
@with_session
def task_get(session, uuid=None, detailed=False):
task = session.query(models.Task).filter_by(uuid=uuid).first()
if not task:
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % uuid, table="tasks")
task = task.as_dict()
task["tags"] = sorted(tags_get(uuid, consts.TagType.TASK))
if detailed:
task["subtasks"] = _subtasks_get_all_by_task_uuid(session, uuid)
return task
@with_session
def task_get_status(session, uuid=None):
task = (session.query(models.Task)
.options(sa.orm.load_only("status"))
.filter_by(uuid=uuid).first())
if not task:
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % uuid, table="tasks")
return task.status
@with_session
def task_create(session, values):
tags = values.pop("tags", [])
# TODO(ikhudoshyn): currently 'input_task'
# does not come in 'values'
# After completely switching to the new
# DB schema in API we should reconstruct
# input_task's from associated workloads
# the same is true for 'pass_sla',
# 'task_duration', 'validation_result'
# and 'validation_duration'
task = models.Task(**values)
session.add(task)
session.commit()
task = task.as_dict()
if tags:
session.bulk_save_objects(
[models.Tag(uuid=task["uuid"], tag=t,
type=consts.TagType.TASK)
for t in set(tags)])
task["tags"] = tags
return task
@with_session
def task_update(session, uuid, values):
values.pop("uuid", None)
tags = values.pop("tags", None)
task = session.query(models.Task).filter_by(uuid=uuid).first()
if not task:
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % uuid, table="tasks")
task.update(values)
task = task.as_dict()
if tags is not None:
# TODO(boris-42): create separate method for tags editing
tags_in_db = session.query(models.Tag.tag).filter_by(
uuid=uuid, type=consts.TagType.TASK).distinct()
new_tags = set(tags) - set(tags_in_db)
removed_tags = set(tags_in_db) - set(tags)
(session.query(models.Tag)
.filter_by(uuid=uuid, type=consts.TagType.TASK)
.filter(models.Tag.tag.in_(removed_tags))
.delete(synchronize_session=False))
if new_tags:
session.bulk_save_objects(
[models.Tag(uuid=uuid, tag=t, type=consts.TagType.TASK)
for t in set(new_tags)])
task["tags"] = tags
else:
task["tags"] = []
return task
@with_session
def task_update_status(session, uuid, status, allowed_statuses):
result = (session.query(models.Task)
.filter(models.Task.uuid == uuid,
models.Task.status.in_(allowed_statuses))
.update({"status": status}, synchronize_session=False))
if not result:
raise exceptions.DBRecordNotFound(
criteria="uuid=%(uuid)s and status in [%(statuses)s]"
% {"uuid": uuid, "statuses": ", ".join(allowed_statuses)},
table="tasks")
return result
@with_session
def task_list(session, status=None, env=None, tags=None, uuids_only=False):
tasks = []
query = session.query(models.Task)
filters = {}
if status is not None:
filters["status"] = status
if env is not None:
filters["env_uuid"] = env_get(env)["uuid"]
if filters:
query = query.filter_by(**filters)
if tags:
uuids = _uuids_by_tags_get(session, consts.TagType.TASK, tags)
if not uuids:
return []
query = query.filter(models.Task.uuid.in_(uuids))
if uuids_only:
query = query.options(sa.orm.load_only("uuid"))
for task in query.all():
task = task.as_dict()
if not uuids_only:
task["tags"] = sorted(tags_get(task["uuid"], consts.TagType.TASK))
tasks.append(task)
return tasks
@with_session
def task_delete(session, uuid, status=None):
(session.query(models.WorkloadData).filter_by(task_uuid=uuid).
delete(synchronize_session=False))
(session.query(models.Workload).filter_by(task_uuid=uuid).
delete(synchronize_session=False))
(session.query(models.Subtask).filter_by(task_uuid=uuid).
delete(synchronize_session=False))
(session.query(models.Tag).filter_by(
uuid=uuid, type=consts.TagType.TASK).
delete(synchronize_session=False))
query = session.query(models.Task).filter_by(uuid=uuid)
if status:
count = query.filter_by(status=status).delete(
synchronize_session="fetch")
else:
count = query.delete(synchronize_session="fetch")
if not count:
if status is not None:
task = query.first()
if task:
raise exceptions.DBConflict(
"Task `%(uuid)s` in `%(actual)s` status but "
"`%(require)s` is required."
% {"uuid": uuid,
"require": status, "actual": task.status})
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % uuid, table="tasks")
@with_session
def subtask_create(session, task_uuid, title, description=None, contexts=None):
subtask = models.Subtask(task_uuid=task_uuid,
title=title,
description=description or "",
contexts=contexts or {})
session.add(subtask)
return subtask
@with_session
def subtask_update(session, subtask_uuid, values):
subtask = session.query(models.Subtask).filter_by(
uuid=subtask_uuid).first()
subtask.update(values)
return subtask
@with_session
def workload_get(session, workload_uuid):
return session.query(models.Workload).filter_by(uuid=workload_uuid).first()
@with_session
def workload_create(session, task_uuid, subtask_uuid, name, description,
position, runner, runner_type, hooks, contexts, sla, args):
workload = models.Workload(task_uuid=task_uuid,
subtask_uuid=subtask_uuid,
name=name,
description=description,
position=position,
runner=runner,
runner_type=runner_type,
hooks=hooks,
contexts=contexts or {},
sla=sla,
args=args)
session.add(workload)
return workload
@with_session
def workload_data_create(session, task_uuid, workload_uuid, chunk_order, data):
workload_data = models.WorkloadData(task_uuid=task_uuid,
workload_uuid=workload_uuid)
raw_data = data.get("raw", [])
iter_count = len(raw_data)
failed_iter_count = 0
started_at = float("inf")
finished_at = 0
for d in raw_data:
if d.get("error"):
failed_iter_count += 1
timestamp = d["timestamp"]
duration = d["duration"]
finished = timestamp + duration
if timestamp < started_at:
started_at = timestamp
if finished > finished_at:
finished_at = finished
now = time.time()
if started_at == float("inf"):
started_at = now
if finished_at == 0:
finished_at = now
workload_data.update({
"task_uuid": task_uuid,
"workload_uuid": workload_uuid,
"chunk_order": chunk_order,
"iteration_count": iter_count,
"failed_iteration_count": failed_iter_count,
"chunk_data": {"raw": raw_data},
# TODO(ikhudoshyn)
"chunk_size": 0,
"compressed_chunk_size": 0,
"started_at": dt.datetime.fromtimestamp(started_at),
"finished_at": dt.datetime.fromtimestamp(finished_at)
})
session.add(workload_data)
return workload_data
@with_session
def workload_set_results(session, workload_uuid, subtask_uuid, task_uuid,
load_duration, full_duration, start_time,
sla_results, contexts_results, hooks_results=None):
workload_results = _task_workload_data_get_all(session, workload_uuid)
iter_count = len(workload_results)
failed_iter_count = 0
max_duration = None
min_duration = None
for d in workload_results:
if d.get("error"):
failed_iter_count += 1
duration = d.get("duration", 0)
if max_duration is None or duration > max_duration:
max_duration = duration
if min_duration is None or min_duration > duration:
min_duration = duration
durations_stat = charts.MainStatsTable(
{"total_iteration_count": iter_count})
for itr in workload_results:
durations_stat.add_iteration(itr)
sla = sla_results or []
# NOTE(ikhudoshyn): we call it 'pass_sla'
# for the sake of consistency with other models
# so if no SLAs were specified, then we assume pass_sla == True
success = all([s.get("success") for s in sla])
session.query(models.Workload).filter_by(
uuid=workload_uuid).update(
{
"sla_results": {"sla": sla},
"contexts_results": contexts_results,
"hooks": hooks_results or [],
"load_duration": load_duration,
"full_duration": full_duration,
"min_duration": min_duration,
"max_duration": max_duration,
"total_iteration_count": iter_count,
"failed_iteration_count": failed_iter_count,
"start_time": start_time,
"statistics": {"durations": durations_stat.to_dict()},
"pass_sla": success}
)
task_values = {
"task_duration": models.Task.task_duration + load_duration}
if not success:
task_values["pass_sla"] = False
subtask_values = {
"duration": models.Subtask.duration + load_duration}
if not success:
subtask_values["pass_sla"] = False
session.query(models.Task).filter_by(uuid=task_uuid).update(
task_values)
session.query(models.Subtask).filter_by(uuid=subtask_uuid).update(
subtask_values)
@with_session
def env_get(session, uuid_or_name):
env = (session.query(models.Env)
.filter(sa.or_(models.Env.uuid == uuid_or_name,
models.Env.name == uuid_or_name))
.first())
if not env:
raise exceptions.DBRecordNotFound(
criteria="uuid or name is %s" % uuid_or_name, table="envs")
return env
@with_session
def env_get_status(session, uuid):
resp = (session.query(models.Env)
.filter_by(uuid=uuid)
.options(sa.orm.load_only("status"))
.first())
if not resp:
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % uuid, table="envs")
return resp.status
@with_session
def env_list(session, status=None):
query = session.query(models.Env)
if status:
query = query.filter_by(status=status)
return query.all()
@with_session
def env_create(session, name, status, description, extras, config,
spec, platforms):
try:
env_uuid = models.UUID()
for p in platforms:
p["env_uuid"] = env_uuid
env = models.Env(
name=name, uuid=env_uuid,
status=status, description=description,
extras=extras, config=config, spec=spec
)
session.add(env)
session.commit()
session.bulk_save_objects(
[models.Platform(**p) for p in platforms])
except db_exc.DBDuplicateEntry:
raise exceptions.DBRecordExists(
field="name", value=name, table="envs")
return env
@with_session
def env_rename(session, uuid, old_name, new_name):
try:
return bool(session.query(models.Env)
.filter_by(uuid=uuid, name=old_name)
.update({"name": new_name}))
except db_exc.DBDuplicateEntry:
raise exceptions.DBRecordExists(
field="name", value=new_name, table="envs")
@with_session
def env_update(session, uuid, description=None, extras=None, config=None):
values = {}
if description is not None:
values["description"] = description
if extras is not None:
values["extras"] = extras
if config is not None:
values["config"] = config
if not values:
return True
return bool(session.query(models.Env).filter_by(uuid=uuid).update(values))
@with_session
def env_set_status(session, uuid, old_status, new_status):
count = (session.query(models.Env)
.filter_by(uuid=uuid, status=old_status)
.update({"status": new_status}))
if count:
return True
raise exceptions.DBConflict("Env %s should be in status %s actual %s"
% (uuid, old_status, env_get_status(uuid)))
@with_session
def env_delete_cascade(session, uuid):
for model in [models.Task, models.Verification, models.Platform]:
session.query(model).filter_by(env_uuid=uuid).delete()
session.query(models.Env).filter_by(uuid=uuid).delete()
@with_session
def platforms_list(session, env_uuid):
return session.query(models.Platform).filter_by(env_uuid=env_uuid).all()
@with_session
def platform_get(session, uuid):
p = session.query(models.Platform).filter_by(uuid=uuid).first()
if not p:
raise exceptions.DBRecordNotFound(
criteria="uuid = %s" % uuid, table="platforms")
return p
@with_session
def platform_set_status(session, uuid, old_status, new_status):
count = (session.query(models.Platform)
.filter_by(uuid=uuid, status=old_status)
.update({"status": new_status}))
if count:
return True
platform = platform_get(uuid)
raise exceptions.DBConflict(
"Platform %s should be in status %s actual %s"
% (uuid, old_status, platform["status"]))
@with_session
def platform_set_data(session, uuid, platform_data=None, plugin_data=None):
values = {}
if platform_data is not None:
values["platform_data"] = platform_data
if plugin_data is not None:
values["plugin_data"] = plugin_data
if not values:
return True
return bool(
session.query(models.Platform).filter_by(uuid=uuid).update(values))
@with_session
def verifier_create(session, name, vtype, platform, source, version,
system_wide, extra_settings=None):
verifier = models.Verifier(name=name, type=vtype, platform=platform,
source=source, extra_settings=extra_settings,
version=version, system_wide=system_wide)
session.add(verifier)
return verifier
@with_session
def verifier_get(session, verifier_id):
return _verifier_get(session, verifier_id)
def _verifier_get(session, verifier_id):
verifier = (session.query(models.Verifier)
.filter(sa.or_(models.Verifier.name == verifier_id,
models.Verifier.uuid == verifier_id))
.first())
if not verifier:
raise exceptions.DBRecordNotFound(
criteria="name or uuid is %s" % verifier_id, table="verifiers")
return verifier
@with_session
def verifier_list(session, status=None):
query = session.query(models.Verifier)
if status:
query = query.filter_by(status=status)
return query.all()
@with_session
def verifier_delete(session, verifier_id):
count = (session.query(models.Verifier)
.filter(sa.or_(models.Verifier.name == verifier_id,
models.Verifier.uuid == verifier_id))
.delete(synchronize_session=False))
if not count:
raise exceptions.DBRecordNotFound(
criteria="name or uuid is %s" % verifier_id, table="verifiers")
@with_session
def verifier_update(session, verifier_id, **properties):
verifier = _verifier_get(session, verifier_id)
verifier.update(properties)
return verifier
@with_session
def verification_create(session, verifier_id, env, tags=None, run_args=None):
verifier = _verifier_get(session, verifier_id)
env = env_get(env)
verification = models.Verification(verifier_uuid=verifier.uuid,
env_uuid=env["uuid"],
run_args=run_args)
session.add(verification)
session.commit()
if tags:
session.bulk_save_objects(
[models.Tag(uuid=verification.uuid, tag=t,
type=consts.TagType.VERIFICATION)
for t in set(tags)]
)
return verification
@with_session
def verification_get(session, verification_uuid):
verification = _verification_get(session, verification_uuid)
verification.tags = sorted(tags_get(verification.uuid,
consts.TagType.VERIFICATION))
return verification
def _verification_get(session, verification_uuid):
verification = session.query(models.Verification).filter_by(
uuid=verification_uuid).first()
if not verification:
raise exceptions.DBRecordNotFound(
criteria="uuid: %s" % verification_uuid, table="verifications")
return verification
@with_session
def verification_list(session,
verifier_id=None, env=None, tags=None, status=None):
filter_by = {}
if verifier_id:
verifier = _verifier_get(session, verifier_id)
filter_by["verifier_uuid"] = verifier.uuid
if env:
env = env_get(env)
filter_by["env_uuid"] = env["uuid"]
if status:
filter_by["status"] = status
query = session.query(models.Verification)
if filter_by:
query = query.filter_by(**filter_by)
if tags:
uuids = _uuids_by_tags_get(session,
consts.TagType.VERIFICATION, tags)
query = query.filter(models.Verification.uuid.in_(uuids))
verifications = [verification.as_dict() for verification in query.all()]
for verification in verifications:
verification["tags"] = sorted(tags_get(verification["uuid"],
consts.TagType.VERIFICATION))
return verifications
@with_session
def verification_delete(session, uuid):
count = session.query(models.Verification).filter_by(uuid=uuid).delete()
if not count:
raise exceptions.DBRecordNotFound(criteria="uuid: %s" % uuid,
table="verifications")
@with_session
def verification_update(session, verification_uuid, **properties):
verification = _verification_get(session, verification_uuid)
verification.update(properties)
return verification
|
|
"""Device, context and memory management on CuPy.
.. note::
The package ``chainer.cuda`` has been renamed to
:mod:`chainer.backends.cuda` as of v4.0.0, but the previous module path
``chainer.cuda`` is also available.
Chainer uses `CuPy <https://cupy.chainer.org/>`_ (with very thin wrapper)
to exploit the speed of GPU computation. Following modules and classes defined
in CuPy are imported to :mod:`chainer.backends.cuda` module for convenience
(refer to this table when reading chainer's source codes).
===================================== =================================
imported name original name
===================================== =================================
``chainer.backends.cuda.cupy`` :mod:`cupy`
``chainer.backends.cuda.cupyx`` :mod:`cupyx`
``chainer.backends.cuda.ndarray`` :class:`cupy.ndarray`
``chainer.backends.cuda.cupy.cuda`` :mod:`cupy.cuda`
``chainer.backends.cuda.Device`` :class:`cupy.cuda.Device`
``chainer.backends.cuda.Event`` :class:`cupy.cuda.Event`
``chainer.backends.cuda.Stream`` :class:`cupy.cuda.Stream`
===================================== =================================
Chainer replaces the default allocator of CuPy by its memory pool
implementation. It enables us to reuse the device memory over multiple
forward/backward computations, and temporary arrays for consecutive elementwise
operations.
"""
import binascii
import functools
import itertools
import os
import threading
import time
import warnings
import numpy
import six
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.backends import intel64
from chainer.configuration import config
import chainerx
available = False
cudnn_enabled = False
try:
import cupy
from cupy import cuda # NOQA
from cupy.cuda import cublas # NOQA
import cupyx # NOQA
import cupyx.scipy.linalg # NOQA
import cupyx.scipy.special # NOQA
from cupy import ndarray # NOQA
from cupy.cuda import Device # NOQA
from cupy.cuda import Event # NOQA
from cupy.cuda import Stream # NOQA
available = True
except Exception as e:
_resolution_error = e
class ndarray(object):
pass # for type testing
# for `xp is cuda.cupy` to always work
cupy = object()
if available:
_cudnn_disabled_by_user = int(os.environ.get('CHAINER_CUDNN', '1')) == 0
try:
import cupy.cudnn
cudnn = cupy.cudnn
cudnn_enabled = not _cudnn_disabled_by_user
except Exception as e:
_resolution_error = e
def check_cuda_available():
"""Checks if CUDA is available.
When CUDA is correctly set up, nothing happens.
Otherwise it raises ``RuntimeError``.
"""
if not available:
msg = ('CUDA environment is not correctly set up\n'
'(see https://github.com/chainer/chainer#installation).')
msg += str(_resolution_error)
raise RuntimeError(msg)
if (not cudnn_enabled and
not _cudnn_disabled_by_user and
not getattr(check_cuda_available, '_already_warned', False)):
warnings.warn(
'cuDNN is not enabled.\n'
'Please reinstall CuPy after you install cudnn\n'
'(see https://docs-cupy.chainer.org/en/stable/install.html'
'#install-cudnn).')
check_cuda_available._already_warned = True
class DummyDeviceType(object):
"""Dummy device class that does nothing with cupy.cuda.Device interface.
This class is used to represent CPU device.
"""
id = -1
def __int__(self):
return -1
def __enter__(self):
return self
def __exit__(self, *args):
pass
def use(self):
pass
def synchronize(self):
pass
def __eq__(self, other):
return isinstance(other, DummyDeviceType)
def __ne__(self, other):
return not (self == other)
DummyDevice = DummyDeviceType()
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
if available:
# This is for backward compatibility
memory_pool = cupy.get_default_memory_pool()
pinned_memory_pool = cupy.get_default_pinned_memory_pool()
_integer_types = six.integer_types + (numpy.integer,)
# ------------------------------------------------------------------------------
# Device
# ------------------------------------------------------------------------------
class GpuDevice(_backend.Device):
def __init__(self, device):
check_cuda_available()
assert isinstance(device, Device)
super(GpuDevice, self).__init__()
self.device = device
@staticmethod
def from_device_id(device_id):
check_cuda_available()
if not (isinstance(device_id, _integer_types) and device_id >= 0):
raise ValueError('Invalid CUDA device ID: {}'.format(device_id))
return GpuDevice(Device(device_id))
@staticmethod
def from_array(array):
if isinstance(array, ndarray) and array.device is not None:
return GpuDevice(array.device)
return None
def __eq__(self, other):
return isinstance(other, GpuDevice) and other.device == self.device
def __repr__(self):
return '<{} (cupy):{}>'.format(
self.__class__.__name__, self.device.id)
@property
def xp(self):
return cupy
def create_context(self):
# Creates a new cuda.Device instance because a single cuda.Device
# instance cannot be used across threads.
return Device(self.device.id)
def send_array(self, array):
return _array_to_gpu(array, self.device, None)
def use(self):
self.device.use()
def _get_device(device_spec):
if not available:
return None
if isinstance(device_spec, Device):
return GpuDevice(device_spec)
if (isinstance(device_spec, tuple) and len(device_spec) == 2
and device_spec[0] is cupy
and isinstance(device_spec[1], _integer_types)):
return GpuDevice.from_device_id(device_spec[1])
return None
# ------------------------------------------------------------------------------
# Global states
# ------------------------------------------------------------------------------
def get_device_from_id(device_id):
"""Gets the device from an ID integer.
Args:
device_id (int or None): The ID of the device which this function
returns.
"""
if device_id is not None:
if device_id >= 0:
check_cuda_available()
return Device(int(device_id))
return DummyDevice
def get_device_from_array(*arrays):
"""Gets the device from a list of CuPy array or a single CuPy array.
.. deprecated:: v6.0.0
This API is deprecated. Please use
:func:`~chainer.backend.get_device_from_array` instead.
The device on which the given CuPy array reside is returned.
.. note::
This method only recognizes :class:`cupy.ndarray`\\ s in arguments.
Especially note that, unlike :func:`get_array_module`, this method
does not recognize :class:`~chainer.Variable` objects.
If you need to get device from the :class:`~chainer.Variable` instance
``v``, you need to use ``get_device_from_array(v.array)``.
Args:
arrays (:class:`cupy.ndarray` or list of :class:`cupy.ndarray`):
A CuPy array which this function returns the device corresponding
to. If a list of :class:`cupy.ndarray`\\ s are given, it returns
the first device object of an array in the list.
"""
for array in arrays:
if isinstance(array, ndarray) and array.device is not None:
return array.device
return DummyDevice
def get_device(*args):
"""Gets the device from a device object, an ID integer or an array object.
.. note::
This API is deprecated since v3.0.0. Please use
:func:`~chainer.backends.cuda.get_device_from_id`
or :func:`~chainer.backends.cuda.get_device_from_array` instead.
This is a convenient utility to select a correct device if the type of
``arg`` is unknown (i.e., one can use this function on arrays that may be
on CPU or GPU). The returned device object supports the context management
protocol of Python for the *with* statement.
Args:
args: Values to specify a GPU device. The first device object, integer
or :class:`cupy.ndarray` object is used to select a device.
If it is a device object, it is returned. If it is an integer,
the corresponding device is returned. If it is a CuPy array,
the device on which this array reside is returned. If any
arguments are neither integers nor CuPy arrays, a dummy device
object representing CPU is returned.
Returns:
Device object specified by given ``args``.
.. seealso::
See :class:`cupy.cuda.Device` for the device selection not by arrays.
"""
warnings.warn('get_device is deprecated. Please use get_device_from_id or'
' get_device_from_array instead.', DeprecationWarning)
return _get_cuda_device(*args)
def _get_cuda_device(*args):
# Returns cuda.Device or DummyDevice.
for arg in args:
if type(arg) is not bool and isinstance(arg, _integer_types):
check_cuda_available()
return Device(arg)
if isinstance(arg, ndarray):
if arg.device is None:
continue
return arg.device
if available and isinstance(arg, Device):
return arg
# NOTE: This function returns DummyDevice for both NumPy and ChainerX
return DummyDevice
def _get_device_or_current(device):
# Returns cuda.Device.
# - If cuda.Device instance, it's returned intact.
# - If None, the current device is returned.
# - If non-negative integer, cuda.Device is returned.
# - Otherwise: error.
if device is None:
return cuda.Device()
if isinstance(device, Device):
return device
if not (isinstance(device, _integer_types) and device >= 0):
raise ValueError('Invalid CUDA device specifier: {}'.format(device))
return cuda.Device(int(device))
# ------------------------------------------------------------------------------
# cupy.ndarray allocation and copy
# ------------------------------------------------------------------------------
def to_gpu(array, device=None, stream=None):
"""Copies the given CPU array to the specified device.
Args:
array (*array*, None, list or tuple):
Array or arrays to be sent to GPU.
device: CUDA device specifier. If ``None`` or :data:`cuda.DummyDevice`,
the arrays will be copied to the current CUDA device.
stream (~cupy.cuda.Stream): *(deprecated since v3.0.0)*
CUDA stream. If not ``None``, the copy runs asynchronously.
Returns:
cupy.ndarray, list or tuple: Array or arrays on GPU.
If some of the arrays are already on GPU, then this function just
returns those arrays without performing any copy.
If input arrays include `None`, it is returned as `None` as is.
"""
if stream is not None:
warnings.warn(
'The stream option is deprecated in chainer.backends.cuda.to_gpu. '
'Please remove it.', DeprecationWarning)
check_cuda_available()
if device is DummyDevice:
device = cuda.Device()
else:
device = _get_device_or_current(device)
return _backend._convert_arrays(
array, lambda arr: _array_to_gpu(arr, device, stream))
def _array_to_gpu(array, device, stream):
if array is None:
return None
if isinstance(array, chainerx.ndarray):
# TODO(niboshi): Update this logic once both CuPy and ChainerX support
# the array interface.
if array.device.backend.name == 'cuda':
# Convert to cupy.ndarray on the same device as source array
array = cupy.ndarray(
array.shape,
array.dtype,
cupy.cuda.MemoryPointer(
cupy.cuda.UnownedMemory(
array.data_ptr + array.offset,
array.data_size,
array,
array.device.index),
0),
strides=array.strides)
else:
array = chainerx.to_numpy(array)
elif isinstance(array, (numpy.number, numpy.bool_)):
array = numpy.asarray(array)
elif isinstance(array, intel64.mdarray):
array = numpy.asarray(array)
if isinstance(array, ndarray):
if array.device == device:
return array
is_numpy = False
elif isinstance(array, numpy.ndarray):
is_numpy = True
else:
raise TypeError(
'The array sent to gpu must be an array or a NumPy scalar.'
'\nActual type: {0}.'.format(type(array)))
if stream is not None and stream.ptr != 0:
ret = cupy.empty_like(array)
if is_numpy:
# cpu to gpu
mem = cupy.cuda.alloc_pinned_memory(array.nbytes)
src = numpy.frombuffer(
mem, array.dtype, array.size).reshape(array.shape)
src[...] = array
ret.set(src, stream)
cupy.cuda.pinned_memory._add_to_watch_list(
stream.record(), mem)
else:
# gpu to gpu
with array.device:
src = array.copy()
event = Stream.null.record()
stream.wait_event(event)
ret.data.copy_from_device_async(
src.data, src.nbytes, stream)
# to hold a reference until the end of the asynchronous
# memcpy
stream.add_callback(lambda *x: None, (src, ret))
return ret
with device:
if is_numpy:
return cupy.asarray(array)
# Need to make a copy when an array is copied to another device
return cupy.array(array, copy=True)
def to_cpu(array, stream=None):
"""Copies the given GPU array to host CPU.
Args:
array (*array*, None, list or tuple):
Array or arrays to be sent to CPU.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
numpy.ndarray, list or tuple: Array on CPU.
If some of the arrays are already on CPU, then this function just
returns those arrays without performing any copy.
If input arrays include `None`, it is returned as `None` as is.
"""
return _backend._convert_arrays(
array, lambda arr: _array_to_cpu(arr, stream))
def _array_to_cpu(array, stream):
if array is None:
return None
if isinstance(array, ndarray):
check_cuda_available()
with get_device_from_array(array):
return array.get(stream)
return _cpu._array_to_cpu(array)
def copy(array, out=None, out_device=None, stream=None):
"""Copies a :class:`cupy.ndarray` object using the default stream.
This function can copy the device array to the destination array on another
device.
Args:
array (cupy.ndarray): Array to be copied.
out (cupy.ndarray): Destination array.
If it is not ``None``, then ``out_device`` argument is ignored.
out_device: Destination device specifier. Actual device object is
obtained by passing this value to :func:`get_device`.
stream (cupy.cuda.Stream): CUDA stream.
Returns:
cupy.ndarray: Copied array.
If ``out`` is not specified, then the array is allocated on the device
specified by ``out_device`` argument.
"""
# TODO(niboshi): Update docstring not to mention deprecated `get_device`
check_cuda_available()
assert stream is None # TODO(beam2d): FIX IT
if out is None:
if out_device is None:
out_device = array
with _get_device(out_device):
out = cupy.empty_like(array)
with get_device_from_array(array):
cupy.copyto(out, array)
return out
# ------------------------------------------------------------------------------
# Function result memoization
# ------------------------------------------------------------------------------
def memoize(for_each_device=False):
"""Makes a function memoizing the result for each argument and device.
This is a similar version of :func:`cupy.memoize`. The difference is that
this function can be used in the global scope even if CUDA is not
available. In such case, this function does nothing.
.. note::
This decorator acts as a dummy if CUDA is not available. It cannot be
used for general purpose memoization even if ``for_each_device`` is set
to False.
"""
if available:
return cupy.memoize(for_each_device)
def dummy_decorator(f):
@functools.wraps(f)
def ret(*args, **kwargs):
return f(*args, **kwargs)
return ret
return dummy_decorator
def clear_memo():
"""Clears the memoized results for all functions decorated by memoize.
This function works like :func:`cupy.clear_memo` as a counterpart for
:func:`chainer.backends.cuda.memoize`. It can be used even if CUDA is
not available. In such a case, this function does nothing.
"""
if available:
cupy.clear_memo()
# ------------------------------------------------------------------------------
# Kernel definition utility
# ------------------------------------------------------------------------------
@memoize()
def elementwise(in_params, out_params, operation, name, **kwargs):
"""Creates an elementwise kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
kernel object, i.e. the resulting kernel object is cached for each argument
combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ElementwiseKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, **kwargs)
@memoize()
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs):
"""Creates a global reduction kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for
:class:`cupy.ReductionKernel`, except that the ``name`` argument is
mandatory.
"""
check_cuda_available()
return cupy.ReductionKernel(
in_params, out_params, map_expr, reduce_expr, post_map_expr,
identity, name, **kwargs)
@memoize()
def raw(code, name, *args, **kwargs):
"""Creates a raw kernel function.
This function uses :func:`~chainer.backends.cuda.memoize` to cache the
resulting kernel object, i.e. the resulting kernel object is cached for
each argument combination and CUDA device.
The arguments are the same as those for :class:`cupy.RawKernel`.
"""
check_cuda_available()
return cupy.RawKernel(code, name, *args, **kwargs)
# ------------------------------------------------------------------------------
# numpy/cupy compatible coding
# ------------------------------------------------------------------------------
def get_array_module(*args):
"""Gets an appropriate one from :mod:`numpy` or :mod:`cupy`.
This is almost equivalent to :func:`cupy.get_array_module`. The differences
are that this function can be used even if CUDA is not available and that
it will return their data arrays' array module for
:class:`~chainer.Variable` arguments.
.. deprecated:: v5.0.0
This API is deprecated. Please use
:func:`~chainer.backend.get_array_module` instead.
Args:
args: Values to determine whether NumPy or CuPy should be used.
Returns:
module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
the arguments.
"""
return chainer.backend.get_array_module(*args)
def get_max_workspace_size():
"""Gets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Returns:
int: The workspace size for cuDNN.
"""
# To avoid error on no cuDNN environment
if cudnn_enabled:
return cudnn.get_max_workspace_size()
return 0
def set_max_workspace_size(size):
"""Sets the workspace size for cuDNN.
Check "cuDNN Library User Guide" for detail.
Args:
size: The workspace size for cuDNN.
"""
# To avoid error on no cuDNN environment
if cudnn_enabled:
cudnn.set_max_workspace_size(size)
def fuse(*args, **kwargs):
"""Function fusing decorator.
It calls :func:`cupy.fuse` when CuPy is available to make fused function
and does nothing otherwise.
.. seealso::
:func:`cupy.fuse`
"""
if available:
return cupy.fuse(*args, **kwargs)
elif len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return args[0]
else:
return lambda f: f
# ------------------------------------------------------------------------------
# cuDNN
# ------------------------------------------------------------------------------
_SHOULD_USE_CUDNN = {
'==always': {'always': True, 'auto': False, 'never': False},
'>=auto': {'always': True, 'auto': True, 'never': False},
}
_cudnn_version = cuda.cudnn.getVersion() if cudnn_enabled else -1
def should_use_cudnn(level, lowest_version=0):
"""Determines if we should use cuDNN.
This function checks ``chainer.config.use_cudnn``,
``chainer.backends.cuda.cudnn_enabled``, and the cuDNN version. Note that
``cudnn_enabled`` flag is fixed at loading of :mod:`chainer` module.
Args:
level (str): cuDNN use level. It must be either ``'==always'`` or
``'>=auto'``. ``'==always'`` indicates that the ``use_cudnn``
config must be ``'always'`` to use cuDNN.
lowest_version (int): Required lowest cuDNN version. It must be
non-negative.
Returns:
bool: ``True`` if the caller should use cuDNN.
"""
if _cudnn_version < lowest_version:
return False
if level not in _SHOULD_USE_CUDNN:
raise ValueError('invalid cuDNN use level: %s '
'(must be either of "==always" or ">=auto")' %
repr(level))
flags = _SHOULD_USE_CUDNN[level]
use_cudnn = config.use_cudnn
if use_cudnn not in flags:
raise ValueError('invalid use_cudnn configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_cudnn))
return flags[use_cudnn]
_tensor_core_flag = {'always': True, 'auto': None, 'never': False}
def should_use_cudnn_tensor_core(dtype):
"""Determines if Tensor Core should be used.
Args:
dtype (numpy.dtype): data type of input tensor.
Returns:
bool: ``True`` if Tensor Core should be used.
"""
use_cudnn_tensor_core = config.use_cudnn_tensor_core
if use_cudnn_tensor_core not in _tensor_core_flag:
raise ValueError('invalid use_cudnn_tensor_core configuration: %s '
'(must be either of "always", "auto", or "never")' %
repr(use_cudnn_tensor_core))
use_tensor_core = _tensor_core_flag[use_cudnn_tensor_core]
if use_tensor_core is None:
use_tensor_core = cudnn.is_tensor_core_available(dtype)
return use_tensor_core
# ------------------------------------------------------------------------------
# cupy.cudnn utility
# ------------------------------------------------------------------------------
def get_cudnn_dropout_states():
if not cudnn_enabled:
raise RuntimeError('cuDNN is not enabled.')
thread_id = threading.current_thread().ident
return get_cudnn_dropout_states_core(thread_id)
_dropout_states_count = itertools.count()
@memoize(for_each_device=True)
def get_cudnn_dropout_states_core(thread_id):
states_id = next(_dropout_states_count)
seed = os.getenv('CHAINER_SEED')
if seed is None:
try:
seed_str = binascii.hexlify(os.urandom(8))
seed = numpy.uint64(int(seed_str, 16))
except NotImplementedError:
seed = numpy.uint64(time.clock() * 1000000)
else:
seed = numpy.uint64(seed)
seed += numpy.uint64(states_id)
return cudnn.DropoutStates(None, seed)
|
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 13 18:36:41 2014
@author: favi
"""
from PyQt4 import QtCore, QtGui
from FsmGraphicHandle import FsmGraphicHandle
class FsmTransition(QtGui.QGraphicsPathItem):
#class FsmTransition(QtGui.QGraphicsLineItem):
def __init__(self, startItem, endItem, parent=None, scene=None):
super(FsmTransition, self).__init__(parent, scene)
self.arrowHead = QtGui.QPainterPath()
#self.path = QtGui.QPainterPath()
self.myStartItem = startItem
self.myEndItem = endItem
self.intermediatePoints = []
self.updatePosition()
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
#self.myColor = QtCore.Qt.black
#self.setPen(QtGui.QPen(self.myColor, 1, QtCore.Qt.SolidLine,
# QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
def toStore(self):
return "FsmTransition(startItemName='{0}', endItemName='{1}', scene=self)\n".format(self.startItem().stateName, self.endItem().stateName)
def setColor(self, color):
self.myColor = color
def startItem(self):
return self.myStartItem
def endItem(self):
return self.myEndItem
def addIntermediatePoint(self, point):
#print "addIntermediatePoint @({},{})".format(point.x(), point.y())
p = FsmGraphicHandle(parent=self, scene=self.scene())
p.setPos(point) #using setPos instead of constructor position
#because it didn't get the right coordinates
self.intermediatePoints.append(p)
self.updatePosition()
def removeIntermediatePoint(self, p):
if p in self.intermediatePoints:
self.intermediatePoints.remove(p)
def popIntermediatePoint(self):
if len(self.intermediatePoints):
p = self.intermediatePoints.pop()
p.setParentItem(None)
self.scene().removeItem(p)
self.updatePosition()
def getNbOfIntermediatePoints(self):
return len(self.intermediatePoints)
def addEndItem(self, endItem):
self.myEndItem = endItem
self.updatePosition()
def keyPressEvent(self, event):
print "FsmTransition received keyPressEvent: ", event
# Paint related section
def shape(self):
ps = QtGui.QPainterPathStroker()
ps.setWidth(5)
path = self.path()
path.addPath(self.arrowHead)
shapepath = ps.createStroke(path)
return shapepath
def boundingRect(self):
return self.shape().boundingRect()
def updatePosition(self):
def computeControlPoints(K):
'''compute cubicTo control points according to
https://www.particleincell.com/2012/bezier-splines/
input K should be a list of x(or y) coordinates of the
knots
returns two lists of control point x(or y) coordinates of
length=(len(K)-1 )
'''
n=len(K)
#this is the tridiagonal matrix A
a = [1]*(n-3) + [2]
b = [2] + [4]*(n-3) + [7]
c = [1]*(n-2)
#this is rhs
d = [K[0]+2*K[1]]
d +=[4*K[i]+2*K[i+1] for i in range(1, n-2)]
d +=[8*K[n-2]+K[n-1]]
#solve Ax=d with the Thomas algorithm
#TODO optimize it with np
def TDMAsolve(a,b,c,d):
n = len(d)
for i in xrange(n-1):
d[i+1] -= 1. * d[i] * a[i] / b[i]
b[i+1] -= 1. * c[i] * a[i] / b[i]
for i in reversed(xrange(n-1)):
d[i] -= d[i+1] * c[i] / b[i+1]
return [d[i] / b[i] for i in xrange(n)]
p1 = TDMAsolve(a,b,c,d)
p2 = [2*K[i+1]-p1[i+1] for i in range(n-2)] + \
[0.5*(K[n-1]+p1[n-2])]
return (p1,p2)
#start the path
path = QtGui.QPainterPath(self.myStartItem.pos())
#print self.intermediatePoints, self.myEndItem
#if the path is at it beginning or it is a straight line to another state...
if (self.myEndItem and len(self.intermediatePoints)==0) or \
(not self.myEndItem and len(self.intermediatePoints)<2):
if self.myEndItem:
path.lineTo(self.myEndItem.pos())
elif self.intermediatePoints:
path.lineTo(self.intermediatePoints[0].pos())
self.lastSubPath = path
else:
itemList = [self.myStartItem]
itemList += self.intermediatePoints
if self.myEndItem:
itemList += [self.myEndItem]
k = [p.scenePos() for p in itemList]
kx = [p.x() for p in k]
ky = [p.y() for p in k]
c1x,c2x = computeControlPoints(kx)
c1y,c2y = computeControlPoints(ky)
c1 = tuple(QtCore.QPointF(x,y) for x,y in zip(c1x,c1y))
c2 = tuple(QtCore.QPointF(x,y) for x,y in zip(c2x,c2y))
for cc1,cc2,kk in zip(c1,c2,k[1:]):
path.cubicTo(cc1,cc2,kk)
# for cc1 in k[1:-1]: #temporary showing knot points -> moved to FsmGraphicHandle
# path.addEllipse(cc1, 2,2)
self.lastSubPath = QtGui.QPainterPath(k[-2])
self.lastSubPath.cubicTo(c1[-1],c2[-1],k[-1])
self.setPath(path)
self.update(self.boundingRect())
def paint(self, painter, option, widget=None):
#display shape for debug
#painter.fillPath(self.shape(), QtCore.Qt.cyan)
if self.isSelected():
c = QtGui.QColor() #TODO: highligh color should be taken from preference
c.setHsv(30, 255, 255)
else:
c = QtCore.Qt.black
painter.setPen(c)
painter.setBrush(QtCore.Qt.NoBrush)
path = self.path()
painter.drawPath(path)
#draw arrow
painter.setBrush(c)
#arrow computation should be moved elsewhere
arrowSize = 20
arrowAperture = 20 #degrees
angle = self.lastSubPath.angleAtPercent(1.)+180
if self.myEndItem:
arrowTip = QtCore.QLineF.fromPolar(self.myEndItem.diameter/2, angle).translated(self.myEndItem.pos()).p2()
else:
arrowTip = self.intermediatePoints[-1].pos()
arrowP1 = QtCore.QLineF.fromPolar(arrowSize,angle+arrowAperture).translated(arrowTip).p2()
arrowC1 = QtCore.QLineF.fromPolar(arrowSize/2,angle).translated(arrowTip).p2()
arrowP2 = QtCore.QLineF.fromPolar(arrowSize,angle-arrowAperture).translated(arrowTip).p2()
self.arrowHead = QtGui.QPainterPath()
self.arrowHead.moveTo(arrowTip)
for point in (arrowP1, arrowC1, arrowP2):
self.arrowHead.lineTo(point)
self.arrowHead.closeSubpath()
painter.drawPath(self.arrowHead)
if __name__ == '__main__':
import sys
from MainWindow import MainWindow
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.setGeometry(100, 100, 800, 500)
mainWindow.show()
QTest.mouseClick(mainWindow.addStateButton, Qt.LeftButton)
QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))
QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))
QTest.mouseClick(mainWindow.addTransitionButton, Qt.LeftButton)
QTest.mousePress(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))
QTest.mouseMove(mainWindow.view.viewport(), QtCore.QPoint(100,250))
QTest.mouseRelease(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))
sys.exit(app.exec_())
|
|
"""
Define various message strings generated in the code.
As far as possible, user display strings referenced directly by
source code are isolated here to facilitate editing and translation.
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
SITE_NAME_DEFAULT = "Annalist linked data notebook"
ACTION_COMPLETED = "Action completed"
NO_ACTION_PERFORMED = "No action performed"
NO_SELECTION = "(No '%(id)s' selected)"
INPUT_ERROR = "Problem with input"
SYSTEM_ERROR = "System error"
DATA_ERROR = "Problem with data"
UNEXPECTED_FORM_DATA = "Unexpected form data: %r"
MISSING_COLLECTION_ID = "Missing identifier for new collection"
INVALID_COLLECTION_ID = "Invalid identifier for new collection: '%(coll_id)s'"
CREATED_COLLECTION_ID = "Created new collection: '%(coll_id)s'"
NO_COLLECTION_METADATA = "Metadata not found for collection '%(id)s'"
CONFIRM_REQUESTED_ACTION = "Confirm requested action"
ARE_YOU_SURE = "Are you sure?"
CONFIRM_OR_CANCEL = '''Click "Confirm" to continue, or "Cancel" to abort operation'''
ACTION_COMPLETED = "Action completed"
TURTLE_SERIALIZE_ERROR = "Problem generating Turtle serialization from data"
TURTLE_SERIALIZE_REASON = "Internal description of error"
JSONLD_PARSE_ERROR = "Problem pasring JSON-LD data (maybe JSON-LD context)"
JSONLD_PARSE_REASON = "Internal description of error"
INVALID_OPERATION_ATTEMPTED = "Attempt to peform invalid operation"
INVALID_TYPE_CHANGE = "Change of entity type to or from '_type' is not supported"
INVALID_TYPE_RENAME = "Renaming of Annalist built-in types is not supported"
CREATE_ENTITY_FAILED = "Problem creating/updating entity %s/%s (see log for more info)"
RENAME_ENTITY_FAILED = "Problem renaming entity %s/%s to %s/%s (see log for more info)"
COPY_ENTITY_FAILED = "Problem copying entity %s/%s to %s/%s (see log for more info)"
RENAME_TYPE_FAILED = "Problem renaming type %s to %s (see log for more info)"
IMPORT_ERROR = "Resource import error"
IMPORT_ERROR_REASON = ("Failed to import resource %(import_url)s as %(import_name)s"+
" for %(type_id)s/%(id)s: %(import_exc)s")
IMPORT_DONE = "Resource imported"
IMPORT_DONE_DETAIL = ("Imported <%(resource_url)s>"+
" as %(import_name)s"+
" for entity %(type_id)s/%(id)s")
UPLOAD_ERROR = "File upload error"
UPLOAD_ERROR_REASON = ("Failed to upload file %(uploaded_file)s as %(upload_name)s"+
" for %(type_id)s/%(id)s: %(import_exc)s")
UPLOAD_DONE = "File uploaded"
UPLOAD_DONE_DETAIL = ("Uploaded <%(uploaded_file)s>"+
" as %(upload_name)s"+
" for entity %(type_id)s/%(id)s")
NO_COLLECTION_VIEW = "No collection selected for viewing"
MANY_COLLECTIONS_VIEW = "Too many collections selected for viewing: %(ids)s"
NO_COLLECTION_EDIT = "No collection selected for editing"
MANY_COLLECTIONS_EDIT = "Too many collections selected for viewing: %(ids)s"
NO_COLLECTIONS_REMOVE = "No collections selected for removal"
REMOVE_COLLECTIONS = "Remove collection(s): %(ids)s"
MIGRATE_COLLECTION_ERROR = "Error(s) occurred while migrating collection data for %(id)s"
MIGRATED_COLLECTION_DATA = "Migrated data for collection %(id)s"
TOO_MANY_ENTITIES_SEL = "Too many items selected"
NO_ENTITY_FOR_COPY = "No entity selected to copy"
NO_ENTITY_FOR_EDIT = "No entity selected to edit"
NO_ENTITY_FOR_DELETE = "No entity selected to delete"
CANNOT_DELETE_ENTITY = "Entity %(id)s of type %(type_id)s not found or cannot be deleted"
SITE_ENTITY_FOR_DELETE = "Cannot remove site built-in entity %(id)s of type %(type_id)s, or entity not found"
TYPE_VALUES_FOR_DELETE = "Cannot remove type %(id)s with existing values"
REMOVE_ENTITY_DATA = "Remove entity %(id)s of type %(type_id)s in collection %(coll_id)s"
NO_TYPE_FOR_COPY = "No entity type selected to copy"
NO_TYPE_FOR_EDIT = "No entity type selected to edit"
NO_TYPE_FOR_DELETE = "No entity type selected to delete"
NO_VIEW_FOR_COPY = "No entity view selected to copy"
NO_VIEW_FOR_EDIT = "No entity view selected to edit"
NO_VIEW_FOR_DELETE = "No entity view selected to delete"
NO_LIST_FOR_COPY = "No list view selected to copy"
NO_LIST_FOR_EDIT = "No list view selected to edit"
NO_LIST_FOR_DELETE = "No list view selected to delete"
ENTITY_MESSAGE_LABEL = "%(type_id)s/%(entity_id)s in collection %(coll_id)s"
ENTITY_DEFAULT_LABEL = "" # "Entity %(type_id)s/%(entity_id)s in collection %(coll_id)s"
ENTITY_DEFAULT_COMMENT = "" # "Entity %(type_id)s/%(entity_id)s in collection %(coll_id)s"
ENTITY_DOES_NOT_EXIST = "Entity %(type_id)s/%(id)s (%(label)s) does not exist"
ENTITY_COPY_FILE_ERROR = "Failed to copy file %(file)s while copying entity %(id)% to %(src_id)s"
RESOURCE_DOES_NOT_EXIST = "Resource %(ref)s for entity %(id)s does not exist"
RESOURCE_NOT_DEFINED = "Resource %(ref)s is not present for entity %(id)s"
REMOVE_RECORD_TYPE = "Remove entity type %(id)s in collection %(coll_id)s"
REMOVE_RECORD_VIEW = "Remove entity view %(id)s in collection %(coll_id)s"
REMOVE_RECORD_LIST = "Remove list %(id)s in collection %(coll_id)s"
LIST_NOT_DEFINED = "List %(list_id)s/%(list_ref)s is not present for entity type %(type_id)s"
LIST_NOT_ACCESSED = "List %(list_id)s/%(list_ref)s not accessed for entity type %(type_id)s"
SITE_RESOURCE_NOT_DEFINED = "Resource %(ref)s is not recogized for site"
SITE_RESOURCE_NOT_EXIST = "Site resource %(ref)s does not exist"
COLLECTION_ID = "Problem with collection identifier"
COLLECTION_ID_INVALID = "The collection identifier is missing or not a valid identifier"
COLLECTION_LABEL = "Collection %(id)s"
COLLECTION_EXISTS = "Collection %(save_id)s already exists"
COLLECTION_NOT_EXISTS = "Collection %(id)s does not exist"
COLLECTION_REMOVED = "The following collections were removed: %(ids)s"
COLLECTION_NEWER_VERSION = ("Cannot access collection %(id)s, "+
"which was created by software version %(ver)s. "+
"(Update Annalist server software to use this collection)")
COLL_PARENT_NOT_EXIST = "Collection %(id)s references non-existent parent %(parent_id)s"
COLL_RESOURCE_NOT_DEFINED = "Resource %(ref)s is not recogized for collection %(id)s"
COLL_RESOURCE_NOT_EXIST = "Resource %(ref)s for collection %(id)s does not exist"
COLL_MIGRATE_DIR_FAILED = "Collection %(id)s migration %(old_path)s -> %(new_path)s failed. (%(exc)s)"
ANNALIST_USER_ID = "Problem with user identifier"
ANNALIST_USER_ID_INVALID = "The user identifier is missing or not a valid identifier"
ANNALIST_USER_LABEL = "User %(id)s in collection %(coll_id)s"
ANNALIST_USER_EXISTS = "User %(save_id)s in collection %(save_coll)s already exists"
ANNALIST_USER_NOT_EXISTS = "User %(id)s in collection %(coll_id)s does not exist"
ANNALIST_USER_REMOVED = "User %(id)s in collection %(coll_id)s was removed"
RECORD_TYPE_ID = "Problem with entity type identifier"
RECORD_TYPE_ID_INVALID = "The entity type identifier is missing or not a valid identifier"
RECORD_TYPE_LABEL = "Entity type %(id)s in collection %(coll_id)s"
RECORD_TYPE_EXISTS = "Entity type %(save_id)s in collection %(save_coll)s already exists"
RECORD_TYPE_NOT_EXISTS = "Entity type %(id)s in collection %(coll_id)s does not exist"
RECORD_TYPE_REMOVED = "Entity type %(id)s in collection %(coll_id)s was removed"
RECORD_VIEW_ID = "Problem with entity view identifier"
RECORD_VIEW_ID_INVALID = "The entity view identifier is missing or not a valid identifier"
RECORD_VIEW_LABEL = "Entity view %(id)s in collection %(coll_id)s"
RECORD_VIEW_EXISTS = "Entity view %(save_id)s in collection %(save_coll)s already exists"
RECORD_VIEW_NOT_EXISTS = "Entity view %(id)s in collection %(coll_id)s does not exist"
RECORD_VIEW_REMOVED = "Entity view %(id)s in collection %(coll_id)s was removed"
RECORD_VIEW_LOAD_ERROR = "Error loading view '%(id)s', file %(file)s: %(message)s"
DISPLAY_ALTERNATIVE_VIEW = "Displaying alternative view '%(id)s'"
RECORD_LIST_ID = "Problem with list identifier"
RECORD_LIST_ID_INVALID = "The list identifier is missing or not a valid identifier"
RECORD_LIST_LABEL = "List %(id)s in collection %(coll_id)s"
RECORD_LIST_EXISTS = "List %(save_id)s in collection %(save_coll)s already exists"
RECORD_LIST_NOT_EXISTS = "List %(id)s in collection %(coll_id)s does not exist"
RECORD_LIST_REMOVED = "List %(id)s in collection %(coll_id)s was removed"
RECORD_LIST_LOAD_ERROR = "Error loading list '%(id)s', file %(file)s: %(message)s"
DISPLAY_ALTERNATIVE_LIST = "Displaying alternative list '%(id)s'"
RECORD_GROUP_ID = "Problem with field group identifier"
RECORD_GROUP_ID_INVALID = "The field group identifier is missing or not a valid identifier"
RECORD_GROUP_LABEL = "Field group %(id)s in collection %(coll_id)s"
RECORD_GROUP_EXISTS = "Field group %(save_id)s in collection %(save_coll)s already exists"
RECORD_GROUP_NOT_EXISTS = "Field group %(id)s in collection %(coll_id)s does not exist"
RECORD_GROUP_REMOVED = "Field group %(id)s in collection %(coll_id)s was removed"
RECORD_FIELD_ID = "Problem with view field identifier"
RECORD_FIELD_ID_INVALID = "The view field identifier is missing or not a valid identifier"
RECORD_FIELD_LABEL = "View field %(id)s in collection %(coll_id)s"
RECORD_FIELD_EXISTS = "View field %(save_id)s in collection %(save_coll)s already exists"
RECORD_FIELD_NOT_EXISTS = "View field %(id)s in collection %(coll_id)s does not exist"
RECORD_FIELD_REMOVED = "View field %(id)s in collection %(coll_id)s was removed"
RECORD_VOCAB_ID = "Problem with vocabulary identifier"
RECORD_VOCAB_ID_INVALID = "The vocabulary namespace identifier is missing or not a valid identifier"
RECORD_VOCAB_LABEL = "Vocabulary %(id)s in collection %(coll_id)s"
RECORD_VOCAB_EXISTS = "Vocabulary %(save_id)s in collection %(save_coll)s already exists"
RECORD_VOCAB_NOT_EXISTS = "Vocabulary %(id)s in collection %(coll_id)s does not exist"
RECORD_VOCAB_REMOVED = "Vocabulary %(id)s in collection %(coll_id)s was removed"
RECORD_INFO_ID = "Problem with general information record identifier"
RECORD_INFO_ID_INVALID = "General information record identifier is missing or not a valid identifier"
RECORD_INFO_LABEL = "General information record %(id)s in collection %(coll_id)s"
RECORD_INFO_EXISTS = "General information record %(save_id)s in collection %(save_coll)s already exists"
RECORD_INFO_NOT_EXISTS = "General information record %(id)s in collection %(coll_id)s does not exist"
RECORD_INFO_REMOVED = "General information record %(id)s in collection %(coll_id)s was removed"
RECORD_ENUM_ID = "Problem with enumeration type identifier"
RECORD_ENUM_ID_INVALID = "The enumeration type identifier is missing or not a valid identifier"
RECORD_ENUM_LABEL = "Enumeration type %(id)s in collection %(coll_id)s"
RECORD_ENUM_EXISTS = "Enumeration type %(save_id)s in collection %(save_coll)s already exists"
RECORD_ENUM_NOT_EXISTS = "Enumeration type %(id)s in collection %(coll_id)s does not exist"
RECORD_ENUM_REMOVED = "Enumeration type %(id)s in collection %(coll_id)s was removed"
ENTITY_DATA_ID = "Problem with entity identifier"
ENTITY_DATA_ID_INVALID = "The entity identifier is missing, too long, or not a valid identifier"
ENTITY_DATA_LABEL = "Entity %(id)s of type %(type_id)s in collection %(coll_id)s"
ENTITY_DATA_EXISTS = "Entity %(save_id)s of type %(save_type)s in collection %(save_coll)s already exists"
ENTITY_DATA_NOT_EXISTS = "Entity %(id)s of type %(type_id)s in collection %(coll_id)s does not exist"
ENTITY_DATA_REMOVED = "Entity %(id)s of type %(type_id)s in collection %(coll_id)s was removed"
ENTITY_TYPE_ID = "Problem with entity type identifier"
ENTITY_TYPE_ID_INVALID = "The entity type identifier is missing, too long, or not a valid identifier (%(type_id)s)"
ENTITY_LOAD_ERROR = "Error loading '%(id)s', file %(file)s: %(message)s"
DEFAULT_LIST_UPDATED = "Default list view for collection %(coll_id)s changed to %(list_id)s"
DEFAULT_VIEW_UPDATED = "Default view for collection %(coll_id)s changed to %(view_id)s/%(type_id)s/%(entity_id)s"
REMOVE_FIELD_ERROR = "Problem with remove field(s) request"
MOVE_FIELD_ERROR = "Problem with move field up/down request"
NO_FIELD_SELECTED = "No field(s) selected"
CREATE_FIELD_ENTITY_ERROR = "Create new entity error"
NO_REFER_TO_TYPE = "Field '%(field_label)s' does not specify a valid 'Refer to type'"
MISSING_FIELD_LABEL = "(field missing: '%(id)s')"
VIEW_DESCRIPTION_HEADING = "Problem with view description"
VIEW_PROPERTY_DUPLICATE = "Field %(field_id)s repeats use of property %(property_uri)s in view"
UNKNOWN_TASK_ID = "Unknown task Id in form response: %(task_id)s"
NO_VIEW_OR_LIST_SELECTED = "Please select an exiting view and/or list as a basis for creating new ones"
TASK_CREATE_VIEW_LIST = "Created new view and/or list for type %(id)s (%(label)s)"
TASK_CREATE_SUBTYPE = "Created subtype %(id)s (%(label)s)"
TASK_CREATE_SUBFIELD = "Created field %(id)s (%(label)s) using subproperty of %(base_uri)s."
TASK_CREATE_MANY_VALUE_FIELD = "Created repeating value field '%(field_id)s' for '%(label)s' (check subfield 'Entity type' is blank, or matches repeat field 'Value type')"
TASK_CREATE_LIST_VALUE_FIELD = "Created sequence of values field '%(field_id)s' for '%(label)s' (check subfield 'Entity type' is blank, or matches repeat field 'Value type')"
TASK_CREATE_REFERENCE_FIELD = "Created reference to field '%(field_id)s'. (Select value for 'Refer to type' on current display, and re-save. Also check subfield 'Entity type' is blank, or matches referring field 'Value type')"
# Strings for data generated by task buttons
# TYPE_COMMENT = (
# "# %(type_label)s\n\n"+
# "Entity type [%(type_label)s]($BASE:_type/%(type_id)s)."
# )
SUBTYPE_COMMENT = (
"# %(type_label)s\n\n"+
"Entity type [%(type_label)s]($BASE:_type/%(type_id)s), "+
"subtype of [%(base_type_label)s]($BASE:_type/%(base_type_id)s)."
)
SUBFIELD_LABEL = (
"@@ Subfield of %(base_field_label)s (%(base_field_id)s)@@"
)
SUBFIELD_COMMENT = (
"# %(field_label)s\n\n"+
"Field [%(field_label)s]($BASE:_field/%(field_id)s), "+
"using property uri %(field_prop_uri)s, "+
"subproperty of [%(base_field_label)s]($BASE:_field/%(base_field_id)s)."
)
TYPE_VIEW_LABEL = "%(type_label)s view"
TYPE_VIEW_COMMENT = (
"# %(type_label)s view\n\n"+
"View entity of type [%(type_label)s]($BASE:_type/%(type_id)s)."
)
TYPE_LIST_LABEL = "%(type_label)s list"
TYPE_LIST_COMMENT = (
"# %(type_label)s list\n\n"+
"List entities of type [%(type_label)s]($BASE:_type/%(type_id)s)."
)
MANY_FIELD_LABEL = "%(field_label)s (repeating)"
MANY_FIELD_COMMENT = (
"# %(field_label)s (repeating)\n\n"+
"Zero, one or more instances of [%(field_label)s]($BASE:_field/%(field_id)s)."
)
MANY_FIELD_PLACEHOLDER = "(Zero, one or more %(field_label)s fields)"
MANY_FIELD_ADD = "Add %(field_label)s"
MANY_FIELD_DELETE = "Remove %(field_label)s"
LIST_FIELD_LABEL = "%(field_label)s (sequence)"
LIST_FIELD_COMMENT = (
"# %(field_label)s (sequence)\n\n"+
"List of [%(field_label)s]($BASE:_field/%(field_id)s) fields."
)
LIST_FIELD_PLACEHOLDER = "(Sequence of %(field_label)s fields)"
LIST_FIELD_ADD = "Add %(field_label)s"
LIST_FIELD_DELETE = "Remove %(field_label)s"
FIELD_REF_LABEL = "%(field_label)s (ref)"
FIELD_REF_COMMENT = "%(field_label)s (ref)"
FIELD_REF_PLACEHOLDER = "(Reference to %(field_label)s field)"
# Other strings
COLL_README_HEAD = (
"# %(label)s\n\r"+
"\n\r"+
""
)
COLL_README = (
"# Annalist collection `%(id)s`\n\r"+
"\n\r"+
"This directory contains an [Annalist](http://annalist.net) data collection.\n\r"+
"\n\r"+
"%(heading)s"+
"%(comment)s"+
"\n\r"+
# "\n\r"+
"")
# End.
|
|
#!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
import traceback
import sys
import os
import imp
import pkgutil
import time
from util import *
from i18n import _
from util import profiler, PrintError, DaemonThread, UserCancelled
class Plugins(DaemonThread):
@profiler
def __init__(self, config, is_local, gui_name):
DaemonThread.__init__(self)
if is_local:
find = imp.find_module('plugins')
plugins = imp.load_module('electrum_plugins', *find)
else:
plugins = __import__('electrum_plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
m = loader.find_module(name).load_module(name)
d = m.__dict__
gui_good = self.gui_name in d.get('available_for', [])
# We register wallet types even if the GUI isn't provided
# otherwise the user gets a misleading message like
# "Unknown wallet type: 2fa"
details = d.get('registers_wallet_type')
if details:
self.register_plugin_wallet(name, gui_good, details)
if not gui_good:
continue
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.print_error("cannot initialize plugin %s:" % name,
str(e))
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
full_name = 'electrum_plugins.' + name + '.' + self.gui_name
loader = pkgutil.find_loader(full_name)
if not loader:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
p = loader.load_module(full_name)
plugin = p.Plugin(self, self.config, name)
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.print_error("loaded", name)
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.print_error("closed", name)
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError:
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def hardware_wallets(self, action):
wallet_types, descs = [], []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.wallet_plugin_loader(name)
if action == 'restore' or p.is_enabled():
wallet_types.append(details[1])
descs.append(details[2])
except:
traceback.print_exc()
self.print_error("cannot load plugin for:", name)
return wallet_types, descs
def register_plugin_wallet(self, name, gui_good, details):
from wallet import Wallet
def dynamic_constructor(storage):
return self.wallet_plugin_loader(name).wallet_class(storage)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.print_error("registering wallet %s: %s" %(name, details))
Wallet.register_plugin_wallet(details[0], details[1],
dynamic_constructor)
def wallet_plugin_loader(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.print_error("stopped")
hook_names = set()
hooks = {}
def hook(func):
hook_names.add(func.func_name)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
print_error("Plugin error")
traceback.print_exc(file=sys.stdout)
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(PrintError):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def diagnostic_name(self):
return self.name
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.remove((self, getattr(self, k)))
hooks[k] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def settings_dialog(self):
pass
class DeviceNotFoundError(Exception):
pass
class DeviceUnpairableError(Exception):
pass
Device = namedtuple("Device", "path interface_number id_ product_key")
DeviceInfo = namedtuple("DeviceInfo", "device description initialized")
class DeviceMgr(ThreadJob, PrintError):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
super(DeviceMgr, self).__init__()
# Keyed by wallet. The value is the device id if the wallet
# has been paired, and None otherwise.
self.wallets = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# For synchronization
self.lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.print_error("Registering", client)
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def wallet_id(self, wallet):
with self.lock:
return self.wallets.get(wallet)
def wallet_by_id(self, id_):
with self.lock:
for wallet, wallet_id in self.wallets.items():
if wallet_id == id_:
return wallet
return None
def unpair_wallet(self, wallet):
with self.lock:
if not wallet in self.wallets:
return
wallet_id = self.wallets.pop(wallet)
client = self.client_lookup(wallet_id)
self.clients.pop(client, None)
wallet.unpaired()
if client:
client.close()
def unpair_id(self, id_):
with self.lock:
wallet = self.wallet_by_id(id_)
if wallet:
self.unpair_wallet(wallet)
def pair_wallet(self, wallet, id_):
with self.lock:
self.wallets[wallet] = id_
wallet.paired()
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_, handler):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices(handler)
return self.client_lookup(id_)
def client_for_wallet(self, plugin, wallet, force_pair):
assert wallet.handler
devices = self.scan_devices(wallet.handler)
wallet_id = self.wallet_id(wallet)
client = self.client_lookup(wallet_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = wallet.handler
return client
for device in devices:
if device.id_ == wallet_id:
return self.create_client(device, wallet.handler, plugin)
if force_pair:
return self.force_pair_wallet(plugin, wallet, devices)
return None
def force_pair_wallet(self, plugin, wallet, devices):
first_address, derivation = wallet.first_address()
assert first_address
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
info = self.select_device(wallet, plugin, devices)
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = wallet.handler
# This will trigger a PIN/passphrase entry request
try:
client_first_address = client.first_address(derivation)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_first_address = None
if client_first_address == first_address:
self.pair_wallet(wallet, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('Electrum cannot pair with your %s.\n\n'
'Before you request parkbytes to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all parkbytes you '
'receive will be unspendable.') % plugin.device)
def unpaired_device_infos(self, handler, plugin, devices=None):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if devices is None:
devices = self.scan_devices(handler)
devices = [dev for dev in devices if not self.wallet_by_id(dev.id_)]
states = [_("wiped"), _("initialized")]
infos = []
for device in devices:
if not device.product_key in plugin.DEVICE_IDS:
continue
client = self.create_client(device, handler, plugin)
if not client:
continue
state = states[client.is_initialized()]
label = client.label() or _("An unnamed %s") % plugin.device
descr = "%s (%s)" % (label, state)
infos.append(DeviceInfo(device, descr, client.is_initialized()))
return infos
def select_device(self, wallet, plugin, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(wallet.handler, plugin, devices)
if infos:
break
msg = _('Could not connect to your %s. Verify the cable is '
'connected and that no other application is using it.\n\n'
'Try to connect again?') % plugin.device
if not wallet.handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
msg = _("Please select which %s device to use:") % plugin.device
descriptions = [info.description for info in infos]
return infos[wallet.handler.query_choice(msg, descriptions)]
def scan_devices(self, handler):
# All currently supported hardware libraries use hid, so we
# assume it here. This can be easily abstracted if necessary.
# Note this import must be local so those without hardware
# wallet libraries are not affected.
import hid
self.print_error("scanning devices...")
# First see what's connected that we know about
devices = []
for d in hid.enumerate(0, 0):
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', 0)
devices.append(Device(d['path'], interface_number,
d['serial_number'], product_key))
# Now find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs:
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
|
|
import os
import numpy as np
from os.path import join as pjoin
from dipy.viz import actor, window, widget, fvtk
from dipy.data import DATA_DIR
from dipy.data import fetch_viz_icons, read_viz_icons
import numpy.testing as npt
from dipy.testing.decorators import xvfb_it
use_xvfb = os.environ.get('TEST_WITH_XVFB', False)
if use_xvfb == 'skip':
skip_it = True
else:
skip_it = False
@npt.dec.skipif(not actor.have_vtk or not actor.have_vtk_colors or skip_it)
@xvfb_it
def test_button_and_slider_widgets():
recording = False
filename = "test_button_and_slider_widgets.log.gz"
recording_filename = pjoin(DATA_DIR, filename)
renderer = window.Renderer()
# create some minimalistic streamlines
lines = [np.array([[-1, 0, 0.], [1, 0, 0.]]),
np.array([[-1, 1, 0.], [1, 1, 0.]])]
colors = np.array([[1., 0., 0.], [0.3, 0.7, 0.]])
stream_actor = actor.streamtube(lines, colors)
states = {'camera_button_count': 0,
'plus_button_count': 0,
'minus_button_count': 0,
'slider_moved_count': 0,
}
renderer.add(stream_actor)
# the show manager allows to break the rendering process
# in steps so that the widgets can be added properly
show_manager = window.ShowManager(renderer, size=(800, 800))
if recording:
show_manager.initialize()
show_manager.render()
def button_callback(obj, event):
print('Camera pressed')
states['camera_button_count'] += 1
def button_plus_callback(obj, event):
print('+ pressed')
states['plus_button_count'] += 1
def button_minus_callback(obj, event):
print('- pressed')
states['minus_button_count'] += 1
fetch_viz_icons()
button_png = read_viz_icons(fname='camera.png')
button = widget.button(show_manager.iren,
show_manager.ren,
button_callback,
button_png, (.98, 1.), (80, 50))
button_png_plus = read_viz_icons(fname='plus.png')
button_plus = widget.button(show_manager.iren,
show_manager.ren,
button_plus_callback,
button_png_plus, (.98, .9), (120, 50))
button_png_minus = read_viz_icons(fname='minus.png')
button_minus = widget.button(show_manager.iren,
show_manager.ren,
button_minus_callback,
button_png_minus, (.98, .9), (50, 50))
def print_status(obj, event):
rep = obj.GetRepresentation()
stream_actor.SetPosition((rep.GetValue(), 0, 0))
states['slider_moved_count'] += 1
slider = widget.slider(show_manager.iren, show_manager.ren,
callback=print_status,
min_value=-1,
max_value=1,
value=0.,
label="X",
right_normalized_pos=(.98, 0.6),
size=(120, 0), label_format="%0.2lf")
# This callback is used to update the buttons/sliders' position
# so they can stay on the right side of the window when the window
# is being resized.
global size
size = renderer.GetSize()
def win_callback(obj, event):
global size
if size != obj.GetSize():
button.place(renderer)
button_plus.place(renderer)
button_minus.place(renderer)
slider.place(renderer)
size = obj.GetSize()
if recording:
# show_manager.add_window_callback(win_callback)
# you can also register any callback in a vtk way like this
# show_manager.window.AddObserver(vtk.vtkCommand.ModifiedEvent,
# win_callback)
show_manager.record_events_to_file(recording_filename)
print(states)
else:
show_manager.play_events_from_file(recording_filename)
npt.assert_equal(states["camera_button_count"], 7)
npt.assert_equal(states["plus_button_count"], 3)
npt.assert_equal(states["minus_button_count"], 4)
npt.assert_equal(states["slider_moved_count"], 116)
if not recording:
button.Off()
slider.Off()
# Uncomment below to test the slider and button with analyze
# button.place(renderer)
# slider.place(renderer)
arr = window.snapshot(renderer, size=(800, 800))
report = window.analyze_snapshot(arr)
# import pylab as plt
# plt.imshow(report.labels, origin='lower')
# plt.show()
npt.assert_equal(report.objects, 4)
report = window.analyze_renderer(renderer)
npt.assert_equal(report.actors, 1)
@npt.dec.skipif(not actor.have_vtk or not actor.have_vtk_colors or skip_it)
@xvfb_it
def test_text_widget():
interactive = False
renderer = window.Renderer()
axes = fvtk.axes()
window.add(renderer, axes)
renderer.ResetCamera()
show_manager = window.ShowManager(renderer, size=(900, 900))
if interactive:
show_manager.initialize()
show_manager.render()
fetch_viz_icons()
button_png = read_viz_icons(fname='home3.png')
def button_callback(obj, event):
print('Button Pressed')
button = widget.button(show_manager.iren,
show_manager.ren,
button_callback,
button_png, (.8, 1.2), (100, 100))
global rulez
rulez = True
def text_callback(obj, event):
global rulez
print('Text selected')
if rulez:
obj.GetTextActor().SetInput("Diffusion Imaging Rulez!!")
rulez = False
else:
obj.GetTextActor().SetInput("Diffusion Imaging in Python")
rulez = True
show_manager.render()
text = widget.text(show_manager.iren,
show_manager.ren,
text_callback,
message="Diffusion Imaging in Python",
left_down_pos=(0., 0.),
right_top_pos=(0.4, 0.05),
opacity=1.,
border=False)
if not interactive:
button.Off()
text.Off()
pass
if interactive:
show_manager.render()
show_manager.start()
arr = window.snapshot(renderer, size=(900, 900))
report = window.analyze_snapshot(arr)
npt.assert_equal(report.objects, 3)
# If you want to see the segmented objects after the analysis is finished
# you can use imshow(report.labels, origin='lower')
if __name__ == '__main__':
npt.run_module_suite()
|
|
#!/usr/bin/env python3
# Minimal install
# apt-get install python3-regex
# apt-get install python3-pandas
import sys
import re
from optparse import OptionParser
import BQhelper as bq
import pandas as pd
# Infer subexpressions to access indicator columns
# Column Matcher
Patterns={
'TestDate':[ # NB: Row dates are converted to TIMESTAMP to facilitate comparsions, etc
('[dD]ate', 'TIMESTAMP({c})'),
('[tT]est.*[dD]ate', 'TIMESTAMP({c})'),
('[pP]artition.*[dD]ate','TIMESTAMP({c})'),
('log_time', 'TIMESTAMP({c})')
],
'fileName':[
('Machine', 'CONCAT(server.Machine,"-",server.Site)'),
('[fF]ile[nN]ame', '{c}'),
('.','"No Server Name"'), # default to an unreasonable name
],
'parseTime':[
('[pP]arse.*[tT]ime', '{c}'),
('.', "TIMESTAMP('1970-01-01')") # default to an unreasonable time
],
'UUID':[
('id', '{c}'),
'a.UUID',
'result.Control.UUID',
('UUID', '{c}'),
'test_id',
('.', '"ERROR_DISCOVERING_UUID"') # default to an errored UUID
]
}
def columnMatcher(patterns, cols, needed=None, verbose=False):
"""Infer BQ expressions to extract required columns
"""
def matchHelper(var, matches, cols, verbose):
for m in matches:
try:
r, v = m
except ValueError: # shortcut, single item match
if verbose:
print('Simple:', m)
if m in cols:
return {var:m}
continue
if verbose:
print ("Re:", r, v)
for c in cols:
if re.search(r, c):
return {var:v.format(c=c)}
print("Warning no mapping for", var)
return {var:None}
res={}
for var, matches in patterns.items():
if needed and var not in needed:
continue
if verbose:
print ('VAR:', var, matches)
res.update(matchHelper(var, matches, cols, verbose))
return res
def UnitTestColumnMatcher():
tests=['test__date', 'TestDate', 'PartitionDate', 'ParseInfo.TaskFileName' ]
for t in tests:
print (" Test", t)
print ('=== Result:', t, columnMatcher(Patterns, [t]))
def inferColumns(table, needed=None):
return columnMatcher(Patterns, bq.getColumns(table), needed)
mainQ="""
WITH
canonic AS (
SELECT
{TestDate} AS TestDate,
REGEXP_EXTRACT({fileName}, 'mlab[1-4]-[a-z][a-z][a-z][0-9][0-9t]') AS ShortName,
{parseTime} AS ParseTime,
{UUID} AS UUID,
FROM `{fullTableName}`
),
HotUUIDs AS (
SELECT UUID, count(*) AS cnt
FROM canonic
WHERE UUID NOT IN ( 'ERROR_DISCOVERING_UUID', '' )
GROUP BY UUID
ORDER BY cnt desc
limit 1
),
ServerSet AS (
SELECT ShortName AS UniqueName FROM canonic GROUP BY ShortName
),
ServerDays AS (
SELECT
ShortName,
TIMESTAMP_DIFF(MAX(TestDate), MIN(TestDate), DAY)+1 AS cnt,
TIMESTAMP_DIFF(MAX(TestDate), MIN(TestDate), DAY)+1 - COUNT( DISTINCT TestDate) AS missing,
MAX(TestDate) AS EndDate,
FROM canonic
GROUP BY ShortName
),
# Since some messages need to be strings, we force all to be strings
RawReport AS (
SELECT 10 AS seq, "Total Days" AS name, CAST(TIMESTAMP_DIFF(MAX(TestDate), MIN(TestDate), DAY)+1 AS STRING) AS val FROM canonic UNION ALL
SELECT 12, "First Day", CAST(MIN(TestDate) AS STRING) FROM canonic UNION ALL
SELECT 13, "Last Day", CAST(MAX(TestDate) AS STRING) FROM canonic UNION ALL
SELECT 20, "Total Rows", CAST(COUNT (*) AS STRING) FROM canonic UNION ALL
SELECT 30, "Total Servers", CAST(COUNT ( DISTINCT ShortName ) AS STRING) FROM canonic UNION ALL
SELECT 50, "Oldest Parse Time", CAST(MIN(ParseTime) AS STRING) FROM canonic UNION ALL
SELECT 51, "Newest Parse Time", CAST(MAX(parseTime) AS STRING) FROM canonic UNION ALL
{expandedReport}
SELECT 99, "End-of-Report", ""
)
select * FROM RawReport ORDER BY seq
"""
defaultQ="""
SELECT 11, "Missing Days", CAST(TIMESTAMP_DIFF(MAX(TestDate), MIN(TestDate), DAY)+1 - COUNT( DISTINCT TestDate) AS STRING) FROM canonic UNION ALL
SELECT 21, "Missing UUIDs (ERROR_DISCOVERING_UUID)", CAST(COUNTIF (UUID is Null OR UUID = 'ERROR_DISCOVERING_UUID') AS STRING) FROM canonic UNION ALL
SELECT 22, "Duplicated UUIDs",CAST( COUNTIF(UUID IS NOT NULL AND UUID != 'ERROR_DISCOVERING_UUID') - COUNT( DISTINCT UUID ) AS STRING) FROM canonic UNION ALL
SELECT 24, "Total unique UUIDs", CAST(COUNT( DISTINCT UUID ) AS STRING) FROM canonic UNION ALL
SELECT 31, "Rows Missing Servers", CAST(COUNTIF ( ShortName IS Null ) AS STRING) FROM canonic UNION ALL
SELECT 32, "Test Servers (0t, 1t)", CAST(COUNTIF ( UniqueName like '%t' ) AS STRING) FROM ServerSet UNION ALL
SELECT 33, "Mlab4's", CAST(COUNTIF ( UniqueName like 'mlab4%' ) AS STRING) FROM ServerSet UNION ALL
SELECT 52, "Span of Parse dates", CAST(TIMESTAMP_DIFF(MAX(parseTime), MIN(ParseTime), day) AS STRING) FROM canonic UNION ALL"""
extendedQ="""
SELECT 23, CONCAT("Top dup:", UUID), CAST(cnt AS STRING) FROM HotUUIDs UNION ALL
SELECT 40, "Currently Active Servers", CAST(COUNTIF ( TIMESTAMP_DIFF(CURRENT_TIMESTAMP(), EndDate, DAY) < 4) AS STRING) FROM ServerDays UNION ALL
SELECT 41, "Total Server-days", CAST(SUM(cnt) AS STRING) FROM ServerDays UNION ALL
SELECT 42, "Missing Server-days", CAST(SUM(missing) AS STRING) FROM ServerDays UNION ALL
"""
def resourceReport(rows=0):
print('Resource consumption')
fmt="%20s: %s"
print (fmt%('Rows (M)', rows/1000000))
print (fmt%('slot_milli', bq.jobInfo.slot_millis))
if rows>0 and bq.jobInfo.slot_millis>0:
print (fmt%('slot_milli/row', bq.jobInfo.slot_millis/rows))
print (fmt%('bytes processed', bq.jobInfo.total_bytes_processed))
if rows>0 and bq.jobInfo.total_bytes_processed>0:
print (fmt%('bytes processed/row', bq.jobInfo.total_bytes_processed/rows))
def inventoryTable(fullTableName, **args):
print ('Inferred Column Mappings')
expansion=inferColumns(fullTableName)
for v, e in expansion.items():
print ("%40s AS %s"%(e, v))
if args.get('quick'):
expandedReport = ''
elif args.get('extended'):
expandedReport = defaultQ + extendedQ
else:
expandedReport = defaultQ
print ('Data Statistics')
res=bq.IndexedDataFrameQuery(mainQ, fullTableName=fullTableName, expandedReport=expandedReport, index='seq', **expansion, **args)
for i in res['seq']:
print ("%50s %s"%(res['name'][i], res['val'][i]))
totalRows=int(res.loc[20]['val']) # seq of "Total Rows"
resourceReport(totalRows)
return
def UnitTestInventoryTable():
inventoryTable('measurement-lab.ndt.ndt5')
def inventoryDataSet(dataSet, **args):
tables=bq.getTables(dataSet)
for t in tables:
table = dataSet+'.'+t
print('')
print('==================================================')
print ('Table Statistics for', table)
try:
inventoryTable(table, **args)
except Exception as e:
print (" Crashed: ", type(e))
def inspectDataSetMappings(dataSet, needed=None):
tables=bq.getTables(dataSet)
for t in tables:
table = dataSet+'.'+t
print('')
print ('Table column mappings for', table)
expansion=inferColumns(table, needed)
for v, e in expansion.items():
print ("%40s AS %s"%(e, v))
def UnitTestAll():
UnitTestColumnMatcher()
UnitTestInventoryTable()
if __name__ == "__main__":
try:
arg = sys.argv[1]
except IndexError:
print ('Requires a table or dataset name')
exit(-2)
if len(arg.split('.')) == 3:
inventoryTable(arg)
elif len(arg.split('.')) == 2:
inventoryDataSet(arg)
else:
print ("Must be either project.dataset or project.dataset.table")
|
|
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cros pinchrome: Pin chrome to an earlier version."""
from __future__ import print_function
import fnmatch
import glob
import os
import re
import shutil
import tempfile
from chromite.cbuildbot import constants
from chromite.cbuildbot import repository
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import portage_util
from chromite.scripts import cros_mark_as_stable
from chromite import cros
class UprevNotFound(Exception):
"""Exception to throw when no Chrome Uprev CL is found."""
# git utility functions.
def CloneWorkingRepo(dest, url, reference, branch):
"""Clone a git repository with an existing local copy as a reference.
Also copy the hooks into the new repository.
Args:
dest: The directory to clone int.
url: The URL of the repository to clone.
reference: Local checkout to draw objects from.
branch: The branch to clone.
"""
repository.CloneGitRepo(dest, url, reference=reference,
single_branch=True, branch=branch)
for name in glob.glob(os.path.join(reference, '.git', 'hooks', '*')):
newname = os.path.join(dest, '.git', 'hooks', os.path.basename(name))
shutil.copyfile(name, newname)
shutil.copystat(name, newname)
# Portage utilities.
def UpdateManifest(ebuild):
"""Update the manifest for an ebuild.
Args:
ebuild: Path to the ebuild to update the manifest for.
"""
ebuild = cros_build_lib.ToChrootPath(os.path.realpath(ebuild))
cros_build_lib.RunCommand(['ebuild', ebuild, 'manifest'], quiet=True,
enter_chroot=True)
def SplitPVPath(path):
"""Utility function to run both SplitEbuildPath and SplitPV.
Args:
path: Ebuild path to run those functions on.
Returns:
The output of SplitPV.
"""
return portage_util.SplitPV(portage_util.SplitEbuildPath(path)[2])
def RevertStableEBuild(dirname, rev):
"""Revert the stable ebuilds for a package back to a particular revision.
Also add/remove the files in git.
Args:
dirname: Path to the ebuild directory.
rev: Revision to revert back to.
Returns:
The name of the ebuild reverted to.
"""
package = os.path.basename(dirname.rstrip(os.sep))
pattern = '%s-*.ebuild' % package
# Get rid of existing stable ebuilds.
ebuilds = glob.glob(os.path.join(dirname, pattern))
for ebuild in ebuilds:
parts = SplitPVPath(ebuild)
if parts.version != '9999':
git.RmPath(ebuild)
# Bring back the old stable ebuild.
names = git.GetObjectAtRev(dirname, './', rev).split()
names = fnmatch.filter(names, pattern)
names = [name for name in names
if SplitPVPath(os.path.join(dirname, name)).version != '9999']
if not names:
return None
assert len(names) == 1
name = names[0]
git.RevertPath(dirname, name, rev)
# Update the manifest.
UpdateManifest(os.path.join(dirname, name))
manifest_path = os.path.join(dirname, 'Manifest')
if os.path.exists(manifest_path):
git.AddPath(manifest_path)
return os.path.join(dirname, name)
def RevertBinhostConf(overlay, conf_files, rev):
"""Revert binhost config files back to a particular revision.
Args:
overlay: The overlay holding the binhost config files.
conf_files: A list of config file names.
rev: The revision to revert back to.
"""
binhost_dir = os.path.join(overlay, 'chromeos', 'binhost')
for conf_file in conf_files:
try:
git.RevertPath(os.path.join(binhost_dir, 'target'), conf_file, rev)
except Exception as e1:
try:
git.RevertPath(os.path.join(binhost_dir, 'host'), conf_file, rev)
except Exception as e2:
raise Exception(str(e1) + '\n' + str(e2))
def MaskNewerPackages(overlay, ebuilds):
"""Mask ebuild versions newer than the ones passed in.
This creates a new mask file called chromepin which masks ebuilds newer than
the ones passed in. To undo the masking, just delete that file. The
mask file is added with git.
Args:
overlay: The overlay that will hold the mask file.
ebuilds: List of ebuilds to set up masks for.
"""
content = '# Pin chrome by masking more recent versions.\n'
for ebuild in ebuilds:
parts = portage_util.SplitEbuildPath(ebuild)
content += '>%s\n' % os.path.join(parts[0], parts[2])
mask_file = os.path.join(overlay, MASK_FILE)
osutils.WriteFile(mask_file, content)
git.AddPath(mask_file)
# Tools to pick the point right before an uprev to pin chrome to and get
# information about it.
CONF_RE = re.compile(
r'^\s*(?P<conf>[^:\n]+): updating LATEST_RELEASE_CHROME_BINHOST',
flags=re.MULTILINE)
# Interesting paths.
OVERLAY = os.path.join(constants.SOURCE_ROOT,
constants.CHROMIUMOS_OVERLAY_DIR)
OVERLAY_URL = (constants.EXTERNAL_GOB_URL +
'/chromiumos/overlays/chromiumos-overlay')
PRIV_OVERLAY = os.path.join(constants.SOURCE_ROOT, 'src',
'private-overlays',
'chromeos-partner-overlay')
PRIV_OVERLAY_URL = (constants.INTERNAL_GOB_URL +
'/chromeos/overlays/chromeos-partner-overlay')
MASK_FILE = os.path.join('profiles', 'default', 'linux',
'package.mask', 'chromepin')
class ChromeUprev(object):
"""A class to represent Chrome uprev CLs in the public overlay."""
def __init__(self, ebuild_dir, before=None):
"""Construct a Chrome uprev object
Args:
ebuild_dir: Path to the directory with the chrome ebuild in it.
before: CL to work backwards from.
"""
# Format includes the hash, commit body including subject, and author date.
cmd = ['log', '-n', '1', '--author', 'chrome-bot', '--grep',
cros_mark_as_stable.GIT_COMMIT_SUBJECT,
'--format=format:%H%n%aD%n%B']
if before:
cmd.append(str(before) + '~')
cmd.append('.')
log = git.RunGit(ebuild_dir, cmd).output
if not log.strip():
raise UprevNotFound('No uprev CL was found')
self.sha, _, log = log.partition('\n')
self.date, _, message = log.partition('\n')
self.conf_files = [m.group('conf') for m in CONF_RE.finditer(message)]
entries = git.RawDiff(ebuild_dir, '%s^!' % self.sha)
for entry in entries:
if entry.status != 'R':
continue
from_path = entry.src_file
to_path = entry.dst_file
if (os.path.splitext(from_path)[1] != '.ebuild' or
os.path.splitext(to_path)[1] != '.ebuild'):
continue
self.from_parts = SplitPVPath(from_path)
self.to_parts = SplitPVPath(to_path)
if (self.from_parts.package != 'chromeos-chrome' or
self.to_parts.package != 'chromeos-chrome'):
continue
break
else:
raise Exception('Failed to find chromeos-chrome uprev in CL %s' %
self.sha)
class UprevList(object):
"""A generator which returns chrome uprev CLs in a particular repository.
It also keeps track of what CLs have been presented so the one the user
chose can be retrieved.
"""
def __init__(self, chrome_path):
"""Initialize the class.
Args:
chrome_path: The path to the repository to search.
"""
self.uprevs = []
self.last = None
self.chrome_path = chrome_path
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
before = self.last.sha if self.last else None
try:
self.last = ChromeUprev(self.chrome_path, before=before)
except UprevNotFound:
raise StopIteration()
ver = self.last.from_parts.version + ' (%s)' % self.last.date
self.uprevs.append(self.last)
return ver
# Tools to find the binhost updates in the private overlay which go with the
# ones in the public overlay.
class BinHostUprev(object):
"""Class which represents an uprev CL for the private binhost configs."""
def __init__(self, sha, log):
self.sha = sha
self.conf_files = [m.group('conf') for m in CONF_RE.finditer(log)]
def FindPrivateConfCL(overlay, pkg_dir):
"""Find the private binhost uprev CL which goes with the public one.
Args:
overlay: Path to the private overlay.
pkg_dir: What the package directory should contain to be considered a
match.
Returns:
A BinHostUprev object representing the CL.
"""
binhost_dir = os.path.join(overlay, 'chromeos', 'binhost')
before = None
plus_package_re = re.compile(r'^\+.*%s' % re.escape(pkg_dir),
flags=re.MULTILINE)
while True:
cmd = ['log', '-n', '1', '--grep', 'LATEST_RELEASE_CHROME_BINHOST',
'--format=format:%H']
if before:
cmd.append('%s~' % before)
cmd.append('.')
sha = git.RunGit(binhost_dir, cmd).output.strip()
if not sha:
return None
cl = git.RunGit(overlay, ['show', '-M', sha]).output
if plus_package_re.search(cl):
return BinHostUprev(sha, cl)
before = sha
# The main attraction.
@cros.CommandDecorator('pinchrome')
class PinchromeCommand(cros.CrosCommand):
"""Pin chrome to an earlier revision."""
def __init__(self, options):
super(PinchromeCommand, self).__init__(options)
# Make up a branch name which is unlikely to collide.
self.branch_name = 'chrome_pin_' + cros_build_lib.GetRandomString()
@classmethod
def AddParser(cls, parser):
super(cls, PinchromeCommand).AddParser(parser)
parser.add_argument('--unpin', help='Unpin chrome.', default=False,
action='store_true')
parser.add_argument('--bug', help='Used in the "BUG" field of CLs.',
required=True)
parser.add_argument('--branch', default='master',
help='The branch to pin chrome on (default master).')
parser.add_argument('--nowipe', help='Preserve the working directory',
default=True, dest='wipe', action='store_false')
parser.add_argument('--dryrun', action='store_true',
help='Prepare pinning CLs but don\'t upload them')
def CommitMessage(self, subject, cq_depend=None, change_id=None):
"""Generate a commit message
Args:
subject: The subject of the message.
cq_depend: An optional CQ-DEPEND target.
change_id: An optional change ID.
Returns:
The commit message.
"""
message = [
'%s' % subject,
'',
'BUG=%s' % self.options.bug,
'TEST=None',
]
if cq_depend:
message += ['CQ-DEPEND=%s' % cq_depend]
if change_id:
message += [
'',
'Change-Id: %s' % change_id,
]
return '\n'.join(message)
def unpin(self, work_dir):
"""Unpin chrome."""
overlay = os.path.join(work_dir, 'overlay')
print('Setting up working directory...')
CloneWorkingRepo(overlay, OVERLAY_URL, OVERLAY, self.options.branch)
print('Done')
mask_file = os.path.join(overlay, MASK_FILE)
if not os.path.exists(mask_file):
raise Exception('Mask file not found. Is Chrome pinned?')
git.CreateBranch(overlay, self.branch_name, track=True,
branch_point='origin/%s' % self.options.branch)
git.RmPath(mask_file)
git.Commit(overlay, self.CommitMessage('Chrome: Unpin chrome'))
git.UploadCL(overlay, OVERLAY_URL, self.options.branch,
dryrun=self.options.dryrun)
def pin(self, work_dir):
"""Pin chrome."""
overlay = os.path.join(work_dir, 'overlay')
priv_overlay = os.path.join(work_dir, 'priv_overlay')
print('Setting up working directory...')
CloneWorkingRepo(overlay, OVERLAY_URL, OVERLAY, self.options.branch)
CloneWorkingRepo(priv_overlay, PRIV_OVERLAY_URL, PRIV_OVERLAY,
self.options.branch)
print('Done')
# Interesting paths.
chrome_dir = os.path.join(overlay, constants.CHROME_CP)
other_dirs = [os.path.join(overlay, pkg) for pkg in
constants.OTHER_CHROME_PACKAGES]
# Let the user pick what version to pin chrome to.
uprev_list = UprevList(chrome_dir)
choice = cros_build_lib.GetChoice('Versions of chrome to pin to:',
uprev_list, group_size=5)
pin_version = uprev_list.uprevs[choice]
commit_subject = ('Chrome: Pin to version %s' %
pin_version.from_parts.version)
# Public branch.
git.CreateBranch(overlay, self.branch_name, track=True,
branch_point='origin/%s' % self.options.branch)
target_sha = pin_version.sha + '~'
ebs = [RevertStableEBuild(chrome_dir, target_sha)]
for pkg_dir in other_dirs:
ebs.append(RevertStableEBuild(pkg_dir, target_sha))
RevertBinhostConf(overlay, pin_version.conf_files, target_sha)
MaskNewerPackages(overlay, (eb for eb in ebs if eb))
pub_cid = git.Commit(overlay, 'Public overlay commit')
if not pub_cid:
raise Exception('Don\'t know the commit ID of the public overlay CL.')
# Find out what package directory the binhost configs should point to.
binhost_dir = os.path.join(overlay, 'chromeos', 'binhost')
target_file = os.path.join(binhost_dir, 'target', pin_version.conf_files[0])
host_file = os.path.join(binhost_dir, 'host', pin_version.conf_files[0])
conf_file = target_file if os.path.exists(target_file) else host_file
conf_content = osutils.ReadFile(conf_file)
match = re.search('/(?P<package_dir>[^/\n]*)/packages', conf_content)
if not match:
raise Exception('Failed to parse binhost conf %s' % conf_content.strip())
pkg_dir = match.group('package_dir')
# Private branch.
git.CreateBranch(priv_overlay, self.branch_name, track=True,
branch_point='origin/%s' % self.options.branch)
binhost_uprev = FindPrivateConfCL(priv_overlay, pkg_dir)
if not binhost_uprev:
raise Exception('Failed to find private binhost uprev.')
target_sha = binhost_uprev.sha
RevertBinhostConf(priv_overlay, binhost_uprev.conf_files, target_sha)
commit_message = self.CommitMessage(commit_subject, pub_cid)
priv_cid = git.Commit(priv_overlay, commit_message)
if not priv_cid:
raise Exception('Don\'t know the commit ID of the private overlay CL.')
# Update the commit message on the public overlay CL.
commit_message = self.CommitMessage(commit_subject, '*' + priv_cid, pub_cid)
git.Commit(overlay, commit_message, amend=True)
# Upload the CLs.
git.UploadCL(overlay, OVERLAY_URL, self.options.branch,
dryrun=self.options.dryrun)
git.UploadCL(priv_overlay, PRIV_OVERLAY_URL, self.options.branch,
dryrun=self.options.dryrun)
def Run(self):
"""Run cros pinchrome."""
self.options.Freeze()
chroot_tmp = os.path.join(constants.SOURCE_ROOT,
constants.DEFAULT_CHROOT_DIR, 'tmp')
tmp_override = None if cros_build_lib.IsInsideChroot() else chroot_tmp
work_dir = tempfile.mkdtemp(prefix='pinchrome_', dir=tmp_override)
try:
if self.options.unpin:
self.unpin(work_dir)
else:
self.pin(work_dir)
finally:
if self.options.wipe:
osutils.RmDir(work_dir)
else:
print('Leaving working directory at %s.' % work_dir)
|
|
import json
import uuid
from unittest.mock import patch
import django_rq
from django.http import HttpResponse
from django.urls import reverse
from requests import Session
from rest_framework import status
from extras.models import Webhook
from extras.webhooks import enqueue_object, flush_webhooks, generate_signature
from extras.workers import generate_signature, process_webhook
from peering.models import AutonomousSystem
from utils.enums import ObjectChangeAction
from utils.models import Tag
from utils.testing import APITestCase
class WebhookTest(APITestCase):
def setUp(self):
super().setUp()
# Make sure the queue is empty before testing
self.queue = django_rq.get_queue("default")
self.queue.empty()
@classmethod
def setUpTestData(cls):
TEST_URL = "http://localhost/"
TEST_SECRET = "thisisaverystrongsecret"
webhooks = Webhook.objects.bulk_create(
[
Webhook(
name="Create Webhook",
type_create=True,
url=TEST_URL,
secret=TEST_SECRET,
),
Webhook(
name="Update Webhook",
type_update=True,
url=TEST_URL,
secret=TEST_SECRET,
),
Webhook(
name="Delete Webhook",
type_delete=True,
url=TEST_URL,
secret=TEST_SECRET,
),
]
)
Tag.objects.bulk_create(
(
Tag(name="Foo", slug="foo"),
Tag(name="Bar", slug="bar"),
Tag(name="Baz", slug="baz"),
)
)
def test_enqueue_webhook_create(self):
data = {
"asn": 64500,
"name": "AS 1",
"tags": [
{"name": "Foo"},
{"name": "Bar"},
],
}
url = reverse("peering-api:autonomoussystem-list")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(AutonomousSystem.objects.count(), 1)
self.assertEqual(AutonomousSystem.objects.first().tags.count(), 2)
# Verify that a job was queued for the object creation webhook
self.assertEqual(self.queue.count, 1)
job = self.queue.jobs[0]
self.assertEqual(job.kwargs["webhook"], Webhook.objects.get(type_create=True))
self.assertEqual(job.kwargs["event"], ObjectChangeAction.CREATE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], response.data["id"])
self.assertEqual(len(job.kwargs["data"]["tags"]), len(response.data["tags"]))
self.assertEqual(job.kwargs["snapshots"]["postchange"]["name"], "AS 1")
self.assertEqual(
sorted(job.kwargs["snapshots"]["postchange"]["tags"]), ["Bar", "Foo"]
)
def test_enqueue_webhook_bulk_create(self):
# Create multiple objects via the REST API
data = [
{
"asn": 64500,
"name": "AS 1",
"tags": [
{"name": "Foo"},
{"name": "Bar"},
],
},
{
"asn": 64501,
"name": "AS 2",
"tags": [
{"name": "Foo"},
{"name": "Bar"},
],
},
{
"asn": 64502,
"name": "AS 3",
"tags": [
{"name": "Foo"},
{"name": "Bar"},
],
},
]
url = reverse("peering-api:autonomoussystem-list")
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(AutonomousSystem.objects.count(), 3)
self.assertEqual(AutonomousSystem.objects.first().tags.count(), 2)
# Verify that a webhook was queued for each object
self.assertEqual(self.queue.count, 3)
for i, job in enumerate(self.queue.jobs):
self.assertEqual(
job.kwargs["webhook"], Webhook.objects.get(type_create=True)
)
self.assertEqual(job.kwargs["event"], ObjectChangeAction.CREATE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], response.data[i]["id"])
self.assertEqual(
len(job.kwargs["data"]["tags"]), len(response.data[i]["tags"])
)
self.assertEqual(
job.kwargs["snapshots"]["postchange"]["name"], response.data[i]["name"]
)
self.assertEqual(
sorted(job.kwargs["snapshots"]["postchange"]["tags"]), ["Bar", "Foo"]
)
def test_enqueue_webhook_update(self):
asn = AutonomousSystem.objects.create(asn=64500, name="AS 1")
asn.tags.set(Tag.objects.filter(name__in=["Foo", "Bar"]))
# Update an object via the REST API
data = {
"name": "My AS",
"comments": "Updated the AS",
"tags": [{"name": "Baz"}],
}
url = reverse("peering-api:autonomoussystem-detail", kwargs={"pk": asn.pk})
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
# Verify that a job was queued for the object update webhook
self.assertEqual(self.queue.count, 1)
job = self.queue.jobs[0]
self.assertEqual(job.kwargs["webhook"], Webhook.objects.get(type_update=True))
self.assertEqual(job.kwargs["event"], ObjectChangeAction.UPDATE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], asn.pk)
self.assertEqual(len(job.kwargs["data"]["tags"]), len(response.data["tags"]))
self.assertEqual(job.kwargs["snapshots"]["prechange"]["name"], "AS 1")
self.assertEqual(
sorted(job.kwargs["snapshots"]["prechange"]["tags"]), ["Bar", "Foo"]
)
self.assertEqual(job.kwargs["snapshots"]["postchange"]["name"], "My AS")
self.assertEqual(sorted(job.kwargs["snapshots"]["postchange"]["tags"]), ["Baz"])
def test_enqueue_webhook_bulk_update(self):
asns = (
AutonomousSystem(asn=64500, name="AS 1"),
AutonomousSystem(asn=64501, name="AS 2"),
AutonomousSystem(asn=64502, name="AS 3"),
)
AutonomousSystem.objects.bulk_create(asns)
for asn in asns:
asn.tags.set(Tag.objects.filter(name__in=["Foo", "Bar"]))
# Update three objects via the REST API
data = [
{"id": asns[0].pk, "name": "ASN 1", "tags": [{"name": "Baz"}]},
{"id": asns[1].pk, "name": "ASN 2", "tags": [{"name": "Baz"}]},
{"id": asns[2].pk, "name": "ASN 3", "tags": [{"name": "Baz"}]},
]
url = reverse("peering-api:autonomoussystem-list")
response = self.client.patch(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
# Verify that a job was queued for the object update webhook
self.assertEqual(self.queue.count, 3)
for i, job in enumerate(self.queue.jobs):
self.assertEqual(
job.kwargs["webhook"], Webhook.objects.get(type_update=True)
)
self.assertEqual(job.kwargs["event"], ObjectChangeAction.UPDATE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], data[i]["id"])
self.assertEqual(
len(job.kwargs["data"]["tags"]), len(response.data[i]["tags"])
)
self.assertEqual(job.kwargs["snapshots"]["prechange"]["name"], asns[i].name)
self.assertEqual(
sorted(job.kwargs["snapshots"]["prechange"]["tags"]), ["Bar", "Foo"]
)
self.assertEqual(
job.kwargs["snapshots"]["postchange"]["name"], response.data[i]["name"]
)
self.assertEqual(
sorted(job.kwargs["snapshots"]["postchange"]["tags"]), ["Baz"]
)
def test_enqueue_webhook_delete(self):
asn = AutonomousSystem.objects.create(asn=64500, name="AS 1")
asn.tags.set(Tag.objects.filter(name__in=["Foo", "Bar"]))
# Delete an object via the REST API
url = reverse("peering-api:autonomoussystem-detail", kwargs={"pk": asn.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
# Verify that a job was queued for the object update webhook
self.assertEqual(self.queue.count, 1)
job = self.queue.jobs[0]
self.assertEqual(job.kwargs["webhook"], Webhook.objects.get(type_delete=True))
self.assertEqual(job.kwargs["event"], ObjectChangeAction.DELETE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], asn.pk)
self.assertEqual(job.kwargs["snapshots"]["prechange"]["name"], "AS 1")
self.assertEqual(
sorted(job.kwargs["snapshots"]["prechange"]["tags"]), ["Bar", "Foo"]
)
def test_enqueue_webhook_bulk_delete(self):
asns = (
AutonomousSystem(asn=64500, name="AS 1"),
AutonomousSystem(asn=64501, name="AS 2"),
AutonomousSystem(asn=64502, name="AS 3"),
)
AutonomousSystem.objects.bulk_create(asns)
for asn in asns:
asn.tags.set(Tag.objects.filter(name__in=["Foo", "Bar"]))
# Delete three objects via the REST API
data = [{"id": asn.pk} for asn in asns]
url = reverse("peering-api:autonomoussystem-list")
response = self.client.delete(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
# Verify that a job was queued for the object update webhook
self.assertEqual(self.queue.count, 3)
for i, job in enumerate(self.queue.jobs):
self.assertEqual(
job.kwargs["webhook"], Webhook.objects.get(type_delete=True)
)
self.assertEqual(job.kwargs["event"], ObjectChangeAction.DELETE)
self.assertEqual(job.kwargs["model_name"], "autonomoussystem")
self.assertEqual(job.kwargs["data"]["id"], asns[i].pk)
self.assertEqual(job.kwargs["snapshots"]["prechange"]["name"], asns[i].name)
self.assertEqual(
sorted(job.kwargs["snapshots"]["prechange"]["tags"]), ["Bar", "Foo"]
)
def test_worker(self):
request_id = uuid.uuid4()
def mock_send(_, request, **kwargs):
"""
Mocked implementation of Session.send() that always returns a 200 HTTP
status code.
"""
webhook = Webhook.objects.get(type_create=True)
signature = generate_signature(request.body, webhook.secret)
# Validate the outgoing request headers
self.assertEqual(request.headers["Content-Type"], webhook.http_content_type)
self.assertEqual(request.headers["X-Hook-Signature"], signature)
# Validate the outgoing request body
body = json.loads(request.body)
self.assertEqual(body["event"], ObjectChangeAction.CREATE)
self.assertEqual(body["timestamp"], job.kwargs["timestamp"])
self.assertEqual(body["model"], "autonomoussystem")
self.assertEqual(body["username"], "testuser")
self.assertEqual(body["request_id"], str(request_id))
self.assertEqual(body["data"]["name"], "AS 1")
return HttpResponse()
# Enqueue a webhook for processing
webhooks_queue = []
asn = AutonomousSystem.objects.create(asn=64500, name="AS 1")
enqueue_object(
webhooks_queue,
instance=asn,
user=self.user,
request_id=request_id,
action=ObjectChangeAction.CREATE,
)
flush_webhooks(webhooks_queue)
# Retrieve the job from queue
job = self.queue.jobs[0]
# Patch the Session object with our dummy_send() method, then process the webhook for sending
with patch.object(Session, "send", mock_send) as mock_send:
process_webhook(**job.kwargs)
|
|
from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import datetime, date, timedelta
from psycopg2.extensions import QuotedString, Binary, AsIs
class PostgresWriter(object):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def __init__(self):
self.column_types = {}
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
hash_key = hash(frozenset(column.items()))
self.column_types[hash_key] = self.column_type_info(column).split(" ")[0]
return self.column_types[hash_key]
def column_type_info(self, column):
"""
"""
if column.get('auto_increment', None):
return 'integer DEFAULT nextval(\'%s_%s_seq\'::regclass) NOT NULL' % (
column['table_name'], column['name'])
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
t = lambda v: not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime':
default = None
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
if "0000-00-00 00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00'"
if "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
return default, 'timestamp without time zone'
elif column['type'] == 'time':
default = " DEFAULT NOW()" if t(default) else None
return default, 'time without time zone'
elif 'blob' in column['type'] or 'binary' in column['type']:
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif re.search(r'^enum', column['type']):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'enum|\(|\)', '', column['type'])
max_enum_size = max([(len(e) - 2) for e in enum.split(',')])
return default, ' character varying(%s) check(%s in (%s))' % (max_enum_size, column['name'], enum)
elif 'bit(' in column['type']:
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif 'set(' in column['type']:
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
hash_key = hash(frozenset(column.items()))
column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif isinstance(row[index], (str, unicode, basestring)):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace('\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
# We got here because you used a tinyint(1), if you didn't want a bool, don't use that type
row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]
elif isinstance(row[index], (date, datetime)):
row[index] = row[index].isoformat()
elif isinstance(row[index], timedelta):
row[index] = datetime.utcfromtimestamp(row[index].total_seconds()).time().isoformat()
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString(table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS %s CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE %s INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(%s, %s, true);' % (QuotedString(serial_key_seq).getquoted(), maxval))
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name, columns))
return (table_sql, serial_key_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s_%s' % (table.name, '_'.join(re.sub('[\W]+', '', c) for c in primary_index[0]['columns'])),
'column_names': ', '.join('%s' % col for col in primary_index[0]['columns']),
})
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s_%s' % (table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
return index_sql
def write_constraints(self, table):
constraint_sql = []
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s);""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column']})
return constraint_sql
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Utility device function to use for testing
def TestDeviceFuncPinVariableToCpu(op):
if op.device:
return op.device
return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device
class GraphUtilTest(test.TestCase):
def testTwoDeviceFunctions(self):
with ops.Graph().as_default() as g:
var_0 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_0",
container="",
shared_name="")
with g.device(TestDeviceFuncPinVariableToCpu):
var_1 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_1",
container="",
shared_name="")
var_2 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_2",
container="",
shared_name="")
var_3 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_3",
container="",
shared_name="")
with g.device(TestDeviceFuncPinVariableToCpu):
var_4 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_4",
container="",
shared_name="")
with g.device("/device:GPU:0"):
var_5 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_5",
container="",
shared_name="")
var_6 = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="var_6",
container="",
shared_name="")
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, None)
self.assertDeviceEqual(var_3.device, None)
self.assertDeviceEqual(var_4.device, "/device:CPU:0")
self.assertDeviceEqual(var_5.device, "/device:GPU:0")
self.assertDeviceEqual(var_6.device, "/device:CPU:0")
@test_util.run_v1_only("b/120545219")
def testNestedDeviceFunctions(self):
with ops.Graph().as_default():
var_0 = variables.VariableV1(0)
with ops.device(TestDeviceFuncPinVariableToCpu):
var_1 = variables.VariableV1(1)
with ops.device(lambda op: "/device:GPU:0"):
var_2 = variables.VariableV1(2)
with ops.device("/device:GPU:0"): # Implicit merging device function.
var_3 = variables.VariableV1(3)
self.assertDeviceEqual(var_0.device, None)
self.assertDeviceEqual(var_1.device, "/device:CPU:0")
self.assertDeviceEqual(var_2.device, "/device:GPU:0")
self.assertDeviceEqual(var_3.device, "/device:GPU:0")
def testExplicitDevice(self):
with ops.Graph().as_default() as g:
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/job:ps"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, None)
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/job:ps")
def testDefaultDevice(self):
with ops.Graph().as_default() as g, g.device(
TestDeviceFuncPinVariableToCpu):
with g.device("/job:ps"):
const_0 = constant_op.constant(5.0)
with g.device("/device:GPU:0"):
const_1 = constant_op.constant(5.0)
with g.device("/device:GPU:1"):
const_2 = constant_op.constant(5.0)
with g.device("/device:CPU:0"):
const_3 = constant_op.constant(5.0)
with g.device("/device:CPU:1"):
const_4 = constant_op.constant(5.0)
with g.device("/replica:0"):
const_5 = constant_op.constant(5.0)
self.assertDeviceEqual(const_0.device, "/job:ps")
self.assertDeviceEqual(const_1.device, "/device:GPU:0")
self.assertDeviceEqual(const_2.device, "/device:GPU:1")
self.assertDeviceEqual(const_3.device, "/device:CPU:0")
self.assertDeviceEqual(const_4.device, "/device:CPU:1")
self.assertDeviceEqual(const_5.device, "/replica:0")
def testExtractSubGraph(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
n1.input.extend(["n5"])
n2 = graph_def.node.add()
n2.name = "n2"
# Take the first output of the n1 node as the input.
n2.input.extend(["n1:0"])
n3 = graph_def.node.add()
n3.name = "n3"
# Add a control input (which isn't really needed by the kernel, but
# rather to enforce execution order between nodes).
n3.input.extend(["^n2"])
n4 = graph_def.node.add()
n4.name = "n4"
# It is fine to have a loops in the graph as well.
n5 = graph_def.node.add()
n5.name = "n5"
n5.input.extend(["n1"])
sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"])
self.assertEqual("n1", sub_graph.node[0].name)
self.assertEqual("n2", sub_graph.node[1].name)
self.assertEqual("n3", sub_graph.node[2].name)
self.assertEqual("n5", sub_graph.node[3].name)
def testExtractSubGraphWithInvalidDestNodes(self):
graph_def = graph_pb2.GraphDef()
n1 = graph_def.node.add()
n1.name = "n1"
with self.assertRaisesRegex(TypeError, "must be an iterable"):
graph_util.extract_sub_graph(graph_def, "n1")
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
new_node.input.extend(inputs)
return new_node
def create_constant_node_def(self,
name,
value,
dtype,
shape=None,
inputs=None):
node = self.create_node_def("Const", name, inputs or [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testRemoveTrainingNodes(self):
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = graph_util.remove_training_nodes(graph_def)
self.assertProtoEquals(expected_output, output)
def testRemoveIdentityChains(self):
"""Check that chains of Identity nodes are correctly pruned.
Create a chain of four nodes, A, B, C, and D where A inputs B, B inputs C,
and C inputs D. Nodes B and C are "Identity" and should be pruned, resulting
in the nodes A and D, where A inputs D.
"""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_node_def("Aop", "A", ["B"]),
self.create_node_def("Identity", "B", ["C"]),
self.create_node_def("Identity", "C", ["D"]),
self.create_node_def("Dop", "D", [])
])
expected_graph_def = graph_pb2.GraphDef()
expected_graph_def.node.extend([
self.create_node_def("Aop", "A", ["D"]),
self.create_node_def("Dop", "D", [])
])
self.assertProtoEquals(expected_graph_def,
graph_util.remove_training_nodes(graph_def))
def testRemoveIdentityUsedAsControlInputInConst(self):
"""Check that Identity nodes used as control inputs are not removed."""
graph_def = graph_pb2.GraphDef()
graph_def.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertProtoEquals(graph_def,
graph_util.remove_training_nodes(graph_def))
def testSimpleGraphdefsCompareEqual(self):
graph_def1 = graph_pb2.GraphDef()
graph_def1.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
graph_def2 = graph_pb2.GraphDef()
graph_def2.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertTrue(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testNodeDefsInDifferentOrderCompareEqual(self):
graph_def1 = graph_pb2.GraphDef()
graph_def1.node.extend([
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", []),
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
])
graph_def2 = graph_pb2.GraphDef()
graph_def2.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertTrue(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testDifferentGraphDefsCompareNotEqual(self):
graph_def1 = graph_pb2.GraphDef()
graph_def1.node.extend([
self.create_constant_node_def("C", 1, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
graph_def2 = graph_pb2.GraphDef()
graph_def2.node.extend([
self.create_constant_node_def("C", 2, dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertFalse(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testGraphdefsWithNanCompareNonEqual(self):
graph_def1 = graph_pb2.GraphDef()
graph_def1.node.extend([
self.create_constant_node_def(
"C", float("nan"), dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
graph_def2 = graph_pb2.GraphDef()
graph_def2.node.extend([
self.create_constant_node_def(
"C", float("nan"), dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertFalse(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testSimpleGraphdefEqualityWithNansEqual(self):
graph_def1 = graph_pb2.GraphDef()
graph_def1.node.extend([
self.create_constant_node_def(
"C", float("nan"), dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
graph_def2 = graph_pb2.GraphDef()
graph_def2.node.extend([
self.create_constant_node_def(
"C", float("nan"), dtypes.float32, inputs=["^I"]),
self.create_node_def("Identity", "I", ["Base"]),
self.create_node_def("BaseOp", "Base", [])
])
self.assertTrue(
graph_util.graph_defs_equal(
graph_def1, graph_def2, treat_nan_as_equal=True))
def testGraphDefsWithFunctionLibsCompareEqual(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition])
graph_def1 = graph_pb2.GraphDef()
graph_def1.library.CopyFrom(library)
graph_def2 = graph_pb2.GraphDef()
graph_def2.library.CopyFrom(library)
self.assertTrue(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testGraphDefsWithPermutedFunctionsCompareEqual(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x)
definition_1 = F1.definition
definition_2 = F2.definition
library = function_pb2.FunctionDefLibrary()
library.function.extend([definition_1, definition_2])
graph_def1 = graph_pb2.GraphDef()
graph_def1.library.CopyFrom(library)
reversed_library = function_pb2.FunctionDefLibrary()
reversed_library.function.extend([definition_2, definition_1])
graph_def2 = graph_pb2.GraphDef()
graph_def2.library.CopyFrom(reversed_library)
self.assertTrue(graph_util.graph_defs_equal(graph_def1, graph_def2))
def testGraphDefsWithPermutedNodesInFunctionsCompareEqual(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
f1_def = F1.definition
library = function_pb2.FunctionDefLibrary()
library.function.extend([f1_def])
graph_def1 = graph_pb2.GraphDef()
graph_def1.library.CopyFrom(library)
reversed_function = function_pb2.FunctionDef()
reversed_function.CopyFrom(f1_def)
# Clear the node_def attribute.
del reversed_function.node_def[:]
reversed_function.node_def.extend(reversed(f1_def.node_def))
reversed_library = function_pb2.FunctionDefLibrary()
reversed_library.function.extend([reversed_function])
graph_def2 = graph_pb2.GraphDef()
graph_def2.library.CopyFrom(reversed_library)
self.assertTrue(graph_util.graph_defs_equal(graph_def1, graph_def2))
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
import json
import logging
import re
import urllib
import rfc822
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.ext.webapp.mail_handlers import BounceNotification
import settings
app = flask.Flask(__name__)
# Parsing very large messages could cause out-of-memory errors.
MAX_BODY_SIZE = 100 * 1024
def require_task_header():
"""Abort if this is not a Google Cloud Tasks request."""
if settings.UNIT_TEST_MODE or settings.DEV_MODE:
return
if 'X-AppEngine-QueueName' not in flask.request.headers:
flask.abort(403, msg='Lacking X-AppEngine-QueueName header')
def get_param(request, name, required=True):
"""Get the specified JSON parameter."""
json_body = request.get_json(force=True)
val = json_body.get(name)
if required and not val:
flask.abort(400, msg='Missing parameter %r' % name)
return val
@app.route('/py2')
def py2_health_check():
"""Prove that this GAE module is responding."""
return {'message': 'OK py2'}
@app.route('/tasks/outbound-email', methods=['POST'])
def handle_outbound_mail_task():
"""Task to send a notification email to one recipient."""
require_task_header()
to = get_param(flask.request, 'to')
from_user = get_param(flask.request, 'from_user', required=False)
subject = get_param(flask.request, 'subject')
email_html = get_param(flask.request, 'html')
references = get_param(flask.request, 'references', required=False)
if settings.SEND_ALL_EMAIL_TO and to != settings.REVIEW_COMMENT_MAILING_LIST:
to_user, to_domain = to.split('@')
to = settings.SEND_ALL_EMAIL_TO % {'user': to_user, 'domain': to_domain}
sender = 'Chromestatus <admin@%s.appspotmail.com>' % settings.APP_ID
if from_user:
sender = '%s via Chromestatus <admin+%s@%s.appspotmail.com>' % (
from_user, from_user, settings.APP_ID)
message = mail.EmailMessage(
sender=sender, to=to, subject=subject, html=email_html)
message.check_initialized()
if references:
message.headers = {
'References': references,
'In-Reply-To': references,
}
logging.info('Will send the following email:\n')
logging.info('Sender: %s', message.sender)
logging.info('To: %s', message.to)
logging.info('Subject: %s', message.subject)
logging.info('References: %s', references or '(not included)')
logging.info('In-Reply-To: %s', references or '(not included)')
logging.info('Body:\n%s', message.html[:settings.MAX_LOG_LINE])
if settings.SEND_EMAIL:
message.send()
logging.info('Email sent')
else:
logging.info('Email not sent because of settings.SEND_EMAIL')
return {'message': 'Done'}
BAD_WRAP_RE = re.compile('=\r\n')
BAD_EQ_RE = re.compile('=3D')
IS_INTERNAL_HANDLER = True
# For docs on AppEngine's bounce email handling, see:
# https://cloud.google.com/appengine/docs/python/mail/bounce
# Source code is in file:
# google_appengine/google/appengine/ext/webapp/mail_handlers.py
@app.route('/_ah/bounce', methods=['POST'])
def handle_bounce():
"""Handler to notice when email to given user is bouncing."""
receive(BounceNotification(flask.request.form))
return {'message': 'Done'}
def receive(bounce_message):
email_addr = bounce_message.original.get('to')
subject = 'Mail to %r bounced' % email_addr
logging.info(subject)
# TODO(jrobbins): Re-implement this without depending on models.
# Instead create a task and then have that processed in py3.
# pref_list = models.UserPref.get_prefs_for_emails([email_addr])
# user_pref = pref_list[0]
# user_pref.bounced = True
# user_pref.put()
# Escalate to someone who might do something about it, e.g.
# find a new owner for a component.
body = ('The following message bounced.\n'
'=================\n'
'From: {from}\n'
'To: {to}\n'
'Subject: {subject}\n\n'
'{text}\n'.format(**bounce_message.original))
logging.info(body)
message = mail.EmailMessage(
sender='Chromestatus <admin@%s.appspotmail.com>' % settings.APP_ID,
to=settings.BOUNCE_ESCALATION_ADDR, subject=subject, body=body)
message.check_initialized()
if settings.SEND_EMAIL:
message.send()
def _extract_addrs(header_value):
"""Given a message header value, return email address found there."""
friendly_addr_pairs = list(rfc822.AddressList(header_value))
return [addr for _friendly, addr in friendly_addr_pairs]
def call_py3_task_handler(handler_path, task_dict):
"""Request that our py3 code handle the rest of the work."""
handler_host = 'http://localhost:8080'
if settings.APP_ID == 'cr-status':
handler_host = 'https://cr-status.appspot.com'
if settings.APP_ID == 'cr-status-staging':
handler_host = 'https://cr-status-staging.appspot.com'
handler_url = handler_host + handler_path
request_body = json.dumps(task_dict).encode()
logging.info('task_dict is %r', task_dict)
# AppEngine automatically sets header X-Appengine-Inbound-Appid,
# and that header is stripped from external requests. So,
# require_task_header() can check for it to authenticate.
handler_response = urlfetch.fetch(
url=handler_url, payload=request_body, method=urlfetch.POST,
follow_redirects=False)
logging.info('request_response is %r:\n%r',
handler_response.status_code, handler_response.content)
return handler_response
def get_incoming_message():
"""Get an email message object from the request data."""
data = flask.request.get_data(as_text=True)
msg = mail.InboundEmailMessage(data).original
return msg
@app.route('/_ah/mail/<string:addr>', methods=['POST'])
def handle_incoming_mail(addr=None):
"""Handle an incoming email by making a task to examine it.
This code checks some basic properties of the incoming message
to make sure that it is worth examining. Then it puts all the
relevent fields into a dict and makes a new Cloud Task which
is futher processed in python 3 code.
"""
logging.info('Request Headers: %r', flask.request.headers)
logging.info('\n\n\nPOST for InboundEmail and addr is %r', addr)
if addr != settings.INBOUND_EMAIL_ADDR:
logging.info('Message not sent directly to our address')
return {'message': 'Wrong address'}
if flask.request.content_length > MAX_BODY_SIZE:
logging.info('Message too big, ignoring')
return {'message': 'Too big'}
msg = get_incoming_message()
precedence = msg.get('precedence', '')
if precedence.lower() in ['bulk', 'junk']:
logging.info('Precedence: %r indicates an autoresponder', precedence)
return {'message': 'Wrong precedence'}
from_addrs = (_extract_addrs(msg.get('x-original-from', '')) or
_extract_addrs(msg.get('from', '')))
if from_addrs:
from_addr = from_addrs[0]
else:
logging.info('could not parse from addr')
return {'message': 'Missing From'}
in_reply_to = msg.get('in-reply-to', '')
body = u''
for part in msg.walk():
# We only process plain text emails.
if part.get_content_type() == 'text/plain':
body = part.get_payload(decode=True)
if not isinstance(body, unicode):
body = body.decode('utf-8')
break # Only consider the first text part.
to_addr = urllib.unquote(addr)
subject = msg.get('subject', '')
task_dict = {
'to_addr': to_addr,
'from_addr': from_addr,
'subject': subject,
'in_reply_to': in_reply_to,
'body': body,
}
logging.info('task_dict is %r', task_dict)
response = call_py3_task_handler('/tasks/detect-intent', task_dict)
if response.status_code and response.status_code != 200:
logging.warning('Handoff to py3 failed.')
flask.abort(400)
return {'message': 'Done'}
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import logging
import time
try:
import simplejson as json
except ImportError:
import json
import requests
from helloclient.openstack.common.apiclient import exceptions
from helloclient.openstack.common import importutils
_logger = logging.getLogger(__name__)
class HTTPClient(object):
"""This client handles sending HTTP requests to OpenStack servers.
Features:
- share authentication information between several clients to different
services (e.g., for compute and image clients);
- reissue authentication request for expired tokens;
- encode/decode JSON bodies;
- raise exceptions on HTTP errors;
- pluggable authentication;
- store authentication information in a keyring;
- store time spent for requests;
- register clients for particular services, so one can use
`http_client.identity` or `http_client.compute`;
- log requests and responses in a format that is easy to copy-and-paste
into terminal and send the same request with curl.
"""
user_agent = "helloclient.openstack.common.apiclient"
def __init__(self,
auth_plugin,
region_name=None,
endpoint_type="publicURL",
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.auth_plugin = auth_plugin
self.endpoint_type = endpoint_type
self.region_name = region_name
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
self.cached_token = None
def _http_log_req(self, method, url, kwargs):
if not self.debug:
return
string_parts = [
"curl -i",
"-X '%s'" % method,
"'%s'" % url,
]
for element in kwargs['headers']:
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
string_parts.append(header)
_logger.debug("REQ: %s" % " ".join(string_parts))
if 'data' in kwargs:
_logger.debug("REQ BODY: %s\n" % (kwargs['data']))
def _http_log_resp(self, resp):
if not self.debug:
return
_logger.debug(
"RESP: [%s] %s\n",
resp.status_code,
resp.headers)
if resp._content_consumed:
_logger.debug(
"RESP BODY: %s\n",
resp.text)
def serialize(self, kwargs):
if kwargs.get('json') is not None:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['json'])
try:
del kwargs['json']
except KeyError:
pass
def get_timings(self):
return self.times
def reset_timings(self):
self.times = []
def request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around `requests.Session.request` to handle tasks such as
setting headers, JSON encoding/decoding, and error handling.
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
' requests.Session.request (such as `headers`) or `json`
that will be encoded as JSON and used as `data` argument
"""
kwargs.setdefault("headers", kwargs.get("headers", {}))
kwargs["headers"]["User-Agent"] = self.user_agent
if self.original_ip:
kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % (
self.original_ip, self.user_agent)
if self.timeout is not None:
kwargs.setdefault("timeout", self.timeout)
kwargs.setdefault("verify", self.verify)
if self.cert is not None:
kwargs.setdefault("cert", self.cert)
self.serialize(kwargs)
self._http_log_req(method, url, kwargs)
if self.timings:
start_time = time.time()
resp = self.http.request(method, url, **kwargs)
if self.timings:
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
self._http_log_resp(resp)
if resp.status_code >= 400:
_logger.debug(
"Request returned failure status: %s",
resp.status_code)
raise exceptions.from_response(resp, method, url)
return resp
@staticmethod
def concat_url(endpoint, url):
"""Concatenate endpoint and final URL.
E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
"http://keystone/v2.0/tokens".
:param endpoint: the base URL
:param url: the final URL
"""
return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
def client_request(self, client, method, url, **kwargs):
"""Send an http request using `client`'s endpoint and specified `url`.
If request was rejected as unauthorized (possibly because the token is
expired), issue one authorization attempt and send the request once
again.
:param client: instance of BaseClient descendant
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
' `HTTPClient.request`
"""
filter_args = {
"endpoint_type": client.endpoint_type or self.endpoint_type,
"service_type": client.service_type,
}
token, endpoint = (self.cached_token, client.cached_endpoint)
just_authenticated = False
if not (token and endpoint):
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
pass
if not (token and endpoint):
self.authenticate()
just_authenticated = True
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
if not (token and endpoint):
raise exceptions.AuthorizationFailure(
"Cannot find endpoint or token for request")
old_token_endpoint = (token, endpoint)
kwargs.setdefault("headers", {})["X-Auth-Token"] = token
self.cached_token = token
client.cached_endpoint = endpoint
# Perform the request once. If we get Unauthorized, then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
except exceptions.Unauthorized as unauth_ex:
if just_authenticated:
raise
self.cached_token = None
client.cached_endpoint = None
self.authenticate()
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
raise unauth_ex
if (not (token and endpoint) or
old_token_endpoint == (token, endpoint)):
raise unauth_ex
self.cached_token = token
client.cached_endpoint = endpoint
kwargs["headers"]["X-Auth-Token"] = token
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
def add_client(self, base_client_instance):
"""Add a new instance of :class:`BaseClient` descendant.
`self` will store a reference to `base_client_instance`.
Example:
>>> def test_clients():
... from keystoneclient.auth import keystone
... from openstack.common.apiclient import client
... auth = keystone.KeystoneAuthPlugin(
... username="user", password="pass", tenant_name="tenant",
... auth_url="http://auth:5000/v2.0")
... openstack_client = client.HTTPClient(auth)
... # create nova client
... from novaclient.v1_1 import client
... client.Client(openstack_client)
... # create keystone client
... from keystoneclient.v2_0 import client
... client.Client(openstack_client)
... # use them
... openstack_client.identity.tenants.list()
... openstack_client.compute.servers.list()
"""
service_type = base_client_instance.service_type
if service_type and not hasattr(self, service_type):
setattr(self, service_type, base_client_instance)
def authenticate(self):
self.auth_plugin.authenticate(self)
# Store the authentication results in the keyring for later requests
if self.keyring_saver:
self.keyring_saver.save(self)
class BaseClient(object):
"""Top-level object to access the OpenStack API.
This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient`
will handle a bunch of issues such as authentication.
"""
service_type = None
endpoint_type = None # "publicURL" will be used
cached_endpoint = None
def __init__(self, http_client, extensions=None):
self.http_client = http_client
http_client.add_client(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
def client_request(self, method, url, **kwargs):
return self.http_client.client_request(
self, method, url, **kwargs)
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.client_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
@staticmethod
def get_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = "Invalid %s client version '%s'. must be one of: %s" % (
(api_name, version, ', '.join(version_map.keys())))
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
|
|
# -*- coding: utf-8 -*-
#
# general_models.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Graph Embedding Model
1. TransE
2. TransR
3. RESCAL
4. DistMult
5. ComplEx
6. RotatE
7. SimplE
"""
import os
import numpy as np
import math
import dgl.backend as F
import pdb
import torch
import torch.nn as nn
backend = os.environ.get('DGLBACKEND', 'pytorch')
from .pytorch.tensor_models import logsigmoid
from .pytorch.tensor_models import abs
from .pytorch.tensor_models import masked_select
from .pytorch.tensor_models import get_device, get_dev
from .pytorch.tensor_models import norm
from .pytorch.tensor_models import get_scalar
from .pytorch.tensor_models import reshape
from .pytorch.tensor_models import cuda
from .pytorch.tensor_models import ExternalEmbedding
from .pytorch.tensor_models import InferEmbedding
from .pytorch.score_fun import *
from .pytorch.loss import LossGenerator
DEFAULT_INFER_BATCHSIZE = 2048
EMB_INIT_EPS = 2.0
class InferModel(object):
def __init__(self, device, model_name, hidden_dim,
double_entity_emb=False, double_relation_emb=False,
gamma=0., batch_size=DEFAULT_INFER_BATCHSIZE):
super(InferModel, self).__init__()
self.device = device
self.model_name = model_name
entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim
relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim
self.entity_emb = InferEmbedding(device)
self.relation_emb = InferEmbedding(device)
self.batch_size = batch_size
if model_name == 'TransE' or model_name == 'TransE_l2':
self.score_func = TransEScore(gamma, 'l2')
elif model_name == 'TransE_l1':
self.score_func = TransEScore(gamma, 'l1')
elif model_name == 'TransR':
assert False, 'Do not support inference of TransR model now.'
elif model_name == 'DistMult':
self.score_func = DistMultScore()
elif model_name == 'ComplEx':
self.score_func = ComplExScore()
elif model_name == 'RESCAL':
self.score_func = RESCALScore(relation_dim, entity_dim)
elif model_name == 'RotatE':
emb_init = (gamma + EMB_INIT_EPS) / hidden_dim
self.score_func = RotatEScore(gamma, emb_init)
elif model_name == 'SimplE':
self.score_func = SimplEScore()
def load_emb(self, path, dataset):
"""Load the model.
Parameters
----------
path : str
Directory to load the model.
dataset : str
Dataset name as prefix to the saved embeddings.
"""
self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity')
self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation')
self.score_func.load(path, dataset+'_'+self.model_name)
def score(self, head, rel, tail, triplet_wise=False):
head_emb = self.entity_emb(head)
rel_emb = self.relation_emb(rel)
tail_emb = self.entity_emb(tail)
num_head = F.shape(head)[0]
num_rel = F.shape(rel)[0]
num_tail = F.shape(tail)[0]
batch_size = self.batch_size
score = []
if triplet_wise:
class FakeEdge(object):
def __init__(self, head_emb, rel_emb, tail_emb):
self._hobj = {}
self._robj = {}
self._tobj = {}
self._hobj['emb'] = head_emb
self._robj['emb'] = rel_emb
self._tobj['emb'] = tail_emb
@property
def src(self):
return self._hobj
@property
def dst(self):
return self._tobj
@property
def data(self):
return self._robj
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
edata = FakeEdge(sh_emb, sr_emb, st_emb)
score.append(F.copy_to(self.score_func.edge_func(edata)['score'], F.cpu()))
score = F.cat(score, dim=0)
return score
else:
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
s_score = []
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
s_score.append(F.copy_to(self.score_func.infer(sh_emb, rel_emb, st_emb), F.cpu()))
score.append(F.cat(s_score, dim=2))
score = F.cat(score, dim=0)
return F.reshape(score, (num_head * num_rel * num_tail,))
@property
def num_entity(self):
return self.entity_emb.emb.shape[0]
@property
def num_rel(self):
return self.relation_emb.emb.shape[0]
class MLP(torch.nn.Module):
def __init__(self, input_entity_dim, entity_dim, input_relation_dim, relation_dim):
super(MLP, self).__init__()
self.transform_e_net = torch.nn.Linear(input_entity_dim, entity_dim)
self.transform_r_net = torch.nn.Linear(input_relation_dim, relation_dim)
self.reset_parameters()
def embed_entity(self, embeddings):
# print("embedding", embeddings.device)
# print("e_net", self.transform_e_net.weight.device)
return self.transform_e_net(embeddings)
def embed_relation(self, embeddings):
return self.transform_r_net(embeddings)
def reset_parameters(self):
nn.init.xavier_uniform_(self.transform_r_net.weight)
nn.init.xavier_uniform_(self.transform_e_net.weight)
class KEModel(object):
""" DGL Knowledge Embedding Model.
Parameters
----------
args:
Global configs.
model_name : str
Which KG model to use, including 'TransE_l1', 'TransE_l2', 'TransR',
'RESCAL', 'DistMult', 'ComplEx', 'RotatE', 'SimplE'
n_entities : int
Num of entities.
n_relations : int
Num of relations.
hidden_dim : int
Dimension size of embedding.
gamma : float
Gamma for score function.
double_entity_emb : bool
If True, entity embedding size will be 2 * hidden_dim.
Default: False
double_relation_emb : bool
If True, relation embedding size will be 2 * hidden_dim.
Default: False
"""
def __init__(self, args, model_name, n_entities, n_relations, hidden_dim, gamma,
double_entity_emb=False, double_relation_emb=False, ent_feat_dim=-1, rel_feat_dim=-1):
super(KEModel, self).__init__()
self.args = args
self.has_edge_importance = args.has_edge_importance
self.n_entities = n_entities
self.n_relations = n_relations
self.model_name = model_name
self.hidden_dim = hidden_dim
self.eps = EMB_INIT_EPS
self.emb_init = (gamma + self.eps) / hidden_dim
entity_dim = 2 * hidden_dim if double_entity_emb else hidden_dim
relation_dim = 2 * hidden_dim if double_relation_emb else hidden_dim
self.encoder_model_name = args.encoder_model_name
device = get_device(args)
self.loss_gen = LossGenerator(args, args.loss_genre, args.neg_adversarial_sampling,
args.adversarial_temperature, args.pairwise)
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb = ExternalEmbedding(args, n_entities, entity_dim,
F.cpu() if args.mix_cpu_gpu else device)
if self.encoder_model_name in ['roberta', 'concat']:
assert ent_feat_dim != -1 and rel_feat_dim != -1
self.entity_feat = ExternalEmbedding(args, n_entities, ent_feat_dim,
F.cpu() if args.mix_cpu_gpu else device, is_feat=True)
# For RESCAL, relation_emb = relation_dim * entity_dim
if model_name == 'RESCAL':
rel_dim = relation_dim * entity_dim
else:
rel_dim = relation_dim
self.use_mlp = self.encoder_model_name in ['concat', 'roberta']
if self.encoder_model_name == 'concat':
self.transform_net = MLP(entity_dim+ent_feat_dim, entity_dim, relation_dim+rel_feat_dim, relation_dim)
# self.transform_e_net = torch.nn.Linear(entity_dim, entity_dim)
# self.transform_r_net = torch.nn.Linear(relation_dim, relation_dim)
elif self.encoder_model_name == 'roberta':
self.transform_net = MLP(ent_feat_dim, entity_dim, rel_feat_dim, relation_dim)
self.rel_dim = rel_dim
self.entity_dim = entity_dim
self.strict_rel_part = args.strict_rel_part
self.soft_rel_part = args.soft_rel_part
print(self.strict_rel_part, self.soft_rel_part)
assert not self.strict_rel_part and not self.soft_rel_part
if not self.strict_rel_part and not self.soft_rel_part:
if self.encoder_model_name in ['shallow', 'concat']:
self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim,
F.cpu() if args.mix_cpu_gpu else device)
if self.encoder_model_name in ['roberta', 'concat']:
self.relation_feat = ExternalEmbedding(args, n_relations, rel_feat_dim,
F.cpu() if args.mix_cpu_gpu else device, is_feat=True)
else:
self.global_relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu())
if model_name == 'TransE' or model_name == 'TransE_l2':
self.score_func = TransEScore(gamma, 'l2')
elif model_name == 'TransE_l1':
self.score_func = TransEScore(gamma, 'l1')
elif model_name == 'TransR':
projection_emb = ExternalEmbedding(args,
n_relations,
entity_dim * relation_dim,
F.cpu() if args.mix_cpu_gpu else device)
self.score_func = TransRScore(gamma, projection_emb, relation_dim, entity_dim)
elif model_name == 'DistMult':
self.score_func = DistMultScore()
elif model_name == 'ComplEx':
self.score_func = ComplExScore()
elif model_name == 'RESCAL':
self.score_func = RESCALScore(relation_dim, entity_dim)
elif model_name == 'RotatE':
self.score_func = RotatEScore(gamma, self.emb_init)
elif model_name == 'SimplE':
self.score_func = SimplEScore()
self.model_name = model_name
self.head_neg_score = self.score_func.create_neg(True)
self.tail_neg_score = self.score_func.create_neg(False)
self.head_neg_prepare = self.score_func.create_neg_prepare(True)
self.tail_neg_prepare = self.score_func.create_neg_prepare(False)
self.reset_parameters()
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process embeddings access.
"""
if self.encoder_model_name in ['concat', 'shallow']:
self.entity_emb.share_memory()
if self.encoder_model_name in ['concat', 'roberta']:
self.entity_feat.share_memory()
if self.strict_rel_part or self.soft_rel_part:
self.global_relation_emb.share_memory()
else:
if self.encoder_model_name in ['concat', 'shallow']:
self.relation_emb.share_memory()
if self.encoder_model_name in ['concat', 'roberta']:
self.relation_feat.share_memory()
if self.model_name == 'TransR':
self.score_func.share_memory()
if self.use_mlp:
self.transform_net.share_memory()
def save_emb(self, path, dataset):
"""Save the model.
Parameters
----------
path : str
Directory to save the model.
dataset : str
Dataset name as prefix to the saved embeddings.
"""
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb.save(path, dataset+'_'+self.model_name+'_entity')
if self.encoder_model_name in ['roberta', 'concat']:
torch.save({'transform_state_dict': self.transform_net.state_dict()}, os.path.join(path, dataset+"_"+self.model_name+"_mlp"))
if self.strict_rel_part or self.soft_rel_part:
self.global_relation_emb.save(path, dataset+'_'+self.model_name+'_relation')
else:
if self.encoder_model_name in ['shallow', 'concat']:
self.relation_emb.save(path, dataset+'_'+self.model_name+'_relation')
self.score_func.save(path, dataset+'_'+self.model_name)
def load_emb(self, path, dataset):
"""Load the model.
Parameters
----------
path : str
Directory to load the model.
dataset : str
Dataset name as prefix to the saved embeddings.
"""
self.entity_emb.load(path, dataset+'_'+self.model_name+'_entity')
self.relation_emb.load(path, dataset+'_'+self.model_name+'_relation')
self.score_func.load(path, dataset+'_'+self.model_name)
def reset_parameters(self):
"""Re-initialize the model.
"""
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb.init(self.emb_init)
self.score_func.reset_parameters()
if (not self.strict_rel_part) and (not self.soft_rel_part):
if self.encoder_model_name in ['shallow', 'concat']:
self.relation_emb.init(self.emb_init)
else:
self.global_relation_emb.init(self.emb_init)
if self.use_mlp:
self.transform_net.reset_parameters()
def predict_score(self, g):
"""Predict the positive score.
Parameters
----------
g : DGLGraph
Graph holding positive edges.
Returns
-------
tensor
The positive score
"""
self.score_func(g)
return g.edata['score']
def predict_neg_score(self, pos_g, neg_g, to_device=None, gpu_id=-1, trace=False,
neg_deg_sample=False):
"""Calculate the negative score.
Parameters
----------
pos_g : DGLGraph
Graph holding positive edges.
neg_g : DGLGraph
Graph holding negative edges.
to_device : func
Function to move data into device.
gpu_id : int
Which gpu to move data to.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: False
neg_deg_sample : bool
If True, we use the head and tail nodes of the positive edges to
construct negative edges.
Default: False
Returns
-------
tensor
The negative score
"""
num_chunks = neg_g.num_chunks
chunk_size = neg_g.chunk_size
neg_sample_size = neg_g.neg_sample_size
mask = F.ones((num_chunks, chunk_size * (neg_sample_size + chunk_size)),
dtype=F.float32, ctx=F.context(pos_g.ndata['emb']))
if neg_g.neg_head:
neg_head_ids = neg_g.ndata['id'][neg_g.head_nid]
if self.encoder_model_name == 'roberta':
neg_head = self.transform_net.embed_entity(self.entity_feat(neg_head_ids, gpu_id, False))
elif self.encoder_model_name == 'shallow':
neg_head = self.entity_emb(neg_head_ids, gpu_id, trace)
elif self.encoder_model_name == 'concat':
neg_head = self.transform_net.embed_entity(torch.cat([self.entity_feat(neg_head_ids, gpu_id, False), self.entity_emb(neg_head_ids, gpu_id, trace)], -1))
head_ids, tail_ids = pos_g.all_edges(order='eid')
if to_device is not None and gpu_id >= 0:
tail_ids = to_device(tail_ids, gpu_id)
tail = pos_g.ndata['emb'][tail_ids]
rel = pos_g.edata['emb']
# When we train a batch, we could use the head nodes of the positive edges to
# construct negative edges. We construct a negative edge between a positive head
# node and every positive tail node.
# When we construct negative edges like this, we know there is one positive
# edge for a positive head node among the negative edges. We need to mask
# them.
if neg_deg_sample:
head = pos_g.ndata['emb'][head_ids]
head = head.reshape(num_chunks, chunk_size, -1)
neg_head = neg_head.reshape(num_chunks, neg_sample_size, -1)
neg_head = F.cat([head, neg_head], 1)
neg_sample_size = chunk_size + neg_sample_size
mask[:,0::(neg_sample_size + 1)] = 0
neg_head = neg_head.reshape(num_chunks * neg_sample_size, -1)
neg_head, tail = self.head_neg_prepare(pos_g.edata['id'], num_chunks, neg_head, tail, gpu_id, trace)
neg_score = self.head_neg_score(neg_head, rel, tail,
num_chunks, chunk_size, neg_sample_size)
else:
neg_tail_ids = neg_g.ndata['id'][neg_g.tail_nid]
if self.encoder_model_name == 'roberta':
neg_tail = self.transform_net.embed_entity(self.entity_feat(neg_tail_ids, gpu_id, False))
elif self.encoder_model_name == 'shallow':
neg_tail = self.entity_emb(neg_tail_ids, gpu_id, trace)
elif self.encoder_model_name == 'concat':
neg_tail = self.transform_net.embed_entity(torch.cat([self.entity_feat(neg_tail_ids, gpu_id, False), self.entity_emb(neg_tail_ids, gpu_id, trace)], -1))
head_ids, tail_ids = pos_g.all_edges(order='eid')
if to_device is not None and gpu_id >= 0:
head_ids = to_device(head_ids, gpu_id)
head = pos_g.ndata['emb'][head_ids]
rel = pos_g.edata['emb']
# This is negative edge construction similar to the above.
if neg_deg_sample:
tail = pos_g.ndata['emb'][tail_ids]
tail = tail.reshape(num_chunks, chunk_size, -1)
neg_tail = neg_tail.reshape(num_chunks, neg_sample_size, -1)
neg_tail = F.cat([tail, neg_tail], 1)
neg_sample_size = chunk_size + neg_sample_size
mask[:,0::(neg_sample_size + 1)] = 0
neg_tail = neg_tail.reshape(num_chunks * neg_sample_size, -1)
head, neg_tail = self.tail_neg_prepare(pos_g.edata['id'], num_chunks, head, neg_tail, gpu_id, trace)
neg_score = self.tail_neg_score(head, rel, neg_tail,
num_chunks, chunk_size, neg_sample_size)
if neg_deg_sample:
neg_g.neg_sample_size = neg_sample_size
mask = mask.reshape(num_chunks, chunk_size, neg_sample_size)
return neg_score * mask
else:
return neg_score
def forward_test_wikikg(self, query, ans, candidate, mode, gpu_id=-1):
"""Do the forward and generate ranking results.
Parameters
----------
pos_g : DGLGraph
Graph holding positive edges.
neg_g : DGLGraph
Graph holding negative edges.
logs : List
Where to put results in.
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
scores = self.predict_score_wikikg(query, candidate, mode, to_device=cuda, gpu_id=gpu_id, trace=False)
argsort = F.argsort(scores, dim=1, descending=True)
return argsort[:,:10]
def predict_score_wikikg(self, query, candidate, mode, to_device=None, gpu_id=-1, trace=False):
num_chunks = len(query)
chunk_size = 1
neg_sample_size = candidate.shape[1]
if mode == 'h,r->t':
if self.encoder_model_name == 'roberta':
neg_tail = self.transform_net.embed_entity(self.entity_feat(candidate.view(-1), gpu_id, False))
head = self.transform_net.embed_entity(self.entity_feat(query[:,0], gpu_id, False))
rel = self.transform_net.embed_relation(self.relation_feat(query[:,1], gpu_id, False))
elif self.encoder_model_name == 'shallow':
neg_tail = self.entity_emb(candidate.view(-1), gpu_id, False)
head = self.entity_emb(query[:,0], gpu_id, False)
rel = self.relation_emb(query[:,1], gpu_id, False)
elif self.encoder_model_name == 'concat':
neg_tail = self.transform_net.embed_entity(torch.cat([self.entity_feat(candidate.view(-1), gpu_id, False), self.entity_emb(candidate.view(-1), gpu_id, False)], -1))
head = self.transform_net.embed_entity(torch.cat([self.entity_feat(query[:,0], gpu_id, False), self.entity_emb(query[:,0], gpu_id, False)], -1))
rel = self.transform_net.embed_relation(torch.cat([self.relation_feat(query[:,1], gpu_id, False), self.relation_emb(query[:,1], gpu_id, False)], -1))
neg_score = self.tail_neg_score(head, rel, neg_tail,
num_chunks, chunk_size, neg_sample_size)
else:
assert False
return neg_score.squeeze(dim=1)
# @profile
def forward(self, pos_g, neg_g, gpu_id=-1):
"""Do the forward.
Parameters
----------
pos_g : DGLGraph
Graph holding positive edges.
neg_g : DGLGraph
Graph holding negative edges.
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
Returns
-------
tensor
loss value
dict
loss info
"""
# print(gpu_id, self.transform_net.transform_e_net.weight)
if self.encoder_model_name == 'roberta':
pos_g.ndata['emb'] = self.transform_net.embed_entity(self.entity_feat(pos_g.ndata['id'], gpu_id, False))
pos_g.edata['emb'] = self.transform_net.embed_relation(self.relation_feat(pos_g.edata['id'], gpu_id, False))
elif self.encoder_model_name == 'shallow':
pos_g.ndata['emb'] = self.entity_emb(pos_g.ndata['id'], gpu_id, True)
pos_g.edata['emb'] = self.relation_emb(pos_g.edata['id'], gpu_id, True)
elif self.encoder_model_name == 'concat':
pos_g.ndata['emb'] = self.transform_net.embed_entity(torch.cat([self.entity_feat(pos_g.ndata['id'], gpu_id, False), self.entity_emb(pos_g.ndata['id'], gpu_id, True)], -1))
pos_g.edata['emb'] = self.transform_net.embed_relation(torch.cat([self.relation_feat(pos_g.edata['id'], gpu_id, False), self.relation_emb(pos_g.edata['id'], gpu_id, True)], -1))
self.score_func.prepare(pos_g, gpu_id, True)
pos_score = self.predict_score(pos_g)
if gpu_id >= 0:
neg_score = self.predict_neg_score(pos_g, neg_g, to_device=cuda,
gpu_id=gpu_id, trace=True,
neg_deg_sample=self.args.neg_deg_sample)
else:
neg_score = self.predict_neg_score(pos_g, neg_g, trace=True,
neg_deg_sample=self.args.neg_deg_sample)
neg_score = reshape(neg_score, -1, neg_g.neg_sample_size)
# subsampling weight
# TODO: add subsampling to new sampler
#if self.args.non_uni_weight:
# subsampling_weight = pos_g.edata['weight']
# pos_score = (pos_score * subsampling_weight).sum() / subsampling_weight.sum()
# neg_score = (neg_score * subsampling_weight).sum() / subsampling_weight.sum()
#else:
edge_weight = F.copy_to(pos_g.edata['impts'], get_dev(gpu_id)) if self.has_edge_importance else None
loss, log = self.loss_gen.get_total_loss(pos_score, neg_score, edge_weight)
# regularization: TODO(zihao)
#TODO: only reg ent&rel embeddings. other params to be added.
if self.args.regularization_coef > 0.0 and self.args.regularization_norm > 0 and self.encoder_model_name in ['concat', 'shallow']:
coef, nm = self.args.regularization_coef, self.args.regularization_norm
reg = coef * (norm(self.entity_emb.curr_emb(), nm) + norm(self.relation_emb.curr_emb(), nm))
log['regularization'] = get_scalar(reg)
loss = loss + reg
return loss, log
def update(self, gpu_id=-1):
""" Update the embeddings in the model
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb.update(gpu_id)
self.relation_emb.update(gpu_id)
self.score_func.update(gpu_id)
def prepare_relation(self, device=None):
""" Prepare relation embeddings in multi-process multi-gpu training model.
device : th.device
Which device (GPU) to put relation embeddings in.
"""
print("prepare relation")
self.relation_emb = ExternalEmbedding(self.args, self.n_relations, self.rel_dim, device)
self.relation_emb.init(self.emb_init)
if self.model_name == 'TransR':
local_projection_emb = ExternalEmbedding(self.args, self.n_relations,
self.entity_dim * self.rel_dim, device)
self.score_func.prepare_local_emb(local_projection_emb)
self.score_func.reset_parameters()
def prepare_cross_rels(self, cross_rels):
self.relation_emb.setup_cross_rels(cross_rels, self.global_relation_emb)
if self.model_name == 'TransR':
self.score_func.prepare_cross_rels(cross_rels)
def writeback_relation(self, rank=0, rel_parts=None):
""" Writeback relation embeddings in a specific process to global relation embedding.
Used in multi-process multi-gpu training model.
rank : int
Process id.
rel_parts : List of tensor
List of tensor stroing edge types of each partition.
"""
idx = rel_parts[rank]
if self.soft_rel_part:
idx = self.relation_emb.get_noncross_idx(idx)
self.global_relation_emb.emb[idx] = F.copy_to(self.relation_emb.emb, F.cpu())[idx]
if self.model_name == 'TransR':
self.score_func.writeback_local_emb(idx)
def load_relation(self, device=None):
""" Sync global relation embeddings into local relation embeddings.
Used in multi-process multi-gpu training model.
device : th.device
Which device (GPU) to put relation embeddings in.
"""
self.relation_emb = ExternalEmbedding(self.args, self.n_relations, self.rel_dim, device)
self.relation_emb.emb = F.copy_to(self.global_relation_emb.emb, device)
if self.model_name == 'TransR':
local_projection_emb = ExternalEmbedding(self.args, self.n_relations,
self.entity_dim * self.rel_dim, device)
self.score_func.load_local_emb(local_projection_emb)
def create_async_update(self):
"""Set up the async update for entity embedding.
"""
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb.create_async_update()
def finish_async_update(self):
"""Terminate the async update for entity embedding.
"""
if self.encoder_model_name in ['shallow', 'concat']:
self.entity_emb.finish_async_update()
def pull_model(self, client, pos_g, neg_g):
with th.no_grad():
entity_id = F.cat(seq=[pos_g.ndata['id'], neg_g.ndata['id']], dim=0)
relation_id = pos_g.edata['id']
entity_id = F.tensor(np.unique(F.asnumpy(entity_id)))
relation_id = F.tensor(np.unique(F.asnumpy(relation_id)))
l2g = client.get_local2global()
global_entity_id = l2g[entity_id]
entity_data = client.pull(name='entity_emb', id_tensor=global_entity_id)
relation_data = client.pull(name='relation_emb', id_tensor=relation_id)
self.entity_emb.emb[entity_id] = entity_data
self.relation_emb.emb[relation_id] = relation_data
def push_gradient(self, client):
with th.no_grad():
l2g = client.get_local2global()
for entity_id, entity_data in self.entity_emb.trace:
grad = entity_data.grad.data
global_entity_id =l2g[entity_id]
client.push(name='entity_emb', id_tensor=global_entity_id, data_tensor=grad)
for relation_id, relation_data in self.relation_emb.trace:
grad = relation_data.grad.data
client.push(name='relation_emb', id_tensor=relation_id, data_tensor=grad)
self.entity_emb.trace = []
self.relation_emb.trace = []
|
|
from lsst.sims.maf.db import trackingDb, resultsDb
import os
import json
class MetricObj(object):
"""
Save a metric as an object
"""
def __init__(self, metadata):
self.metadata = metadata
self.plots = {}
self.stats = []
def __repr__(self):
return json.dumps(self.metadata)
def __str__(self):
return json.dumps(self.metadata)
def to_json(self):
return json.dumps(self.metadata)
def info(self):
results = self.metadata.copy()
if len(self.plots.keys()) > 0:
results['plots'] = self.plots
if len(self.stats) > 0:
results['stats'] = self.stats
return results
class RunObj(object):
"""
Save a run as an object
"""
def __init__(self, metadata):
self.metadata = metadata
self.metrics = None
self.run_db = resultsDb.ResultsDb(resultsDbAddress='sqlite:///' + self.metadata['mafDir'] + '/resultsDb_sqlite.db')
# initialize dictionary
self.metric_objs = {}
self.load_metric_objs()
def load_metric_objs(self):
metadata_list = ['metricId', 'metricName', 'slicerName', 'metricMetadata', 'simDataName', 'metricDataFile',
'displayGroup', 'displaySubgroup', 'displayOrder', 'displayCaption']
# join metrics and displays
sql = 'SELECT A.metricId, ' + ', '.join(metadata_list[1:]) + ' FROM displays AS A, metrics AS B WHERE A.metricId = B.metricId'
metrics = self.run_db.session.execute(sql).fetchall()
for metric in metrics:
metadata = {}
for m in metadata_list:
metadata[m] = getattr(metric, m)
metric_obj = MetricObj(metadata)
self.metric_objs[metadata['metricId']] = metric_obj
metric_obj.run = self
metric_obj.metadata['mafRunId'] = self.metadata['mafRunId']
metric_obj.metadata['mafDir'] = os.path.relpath(self.metadata['mafDir'], ".")
# get all plots
plots = self.run_db.session.query(resultsDb.PlotRow).all()
for plot in plots:
self.metric_objs[plot.metricId].plots[plot.plotType] = plot.plotFile
# get all stats
stats = self.run_db.session.query(resultsDb.SummaryStatRow).all()
for stat in stats:
self.metric_objs[stat.metricId].stats.append({'summaryName': stat.summaryName,
'summaryValue': stat.summaryValue})
def __repr__(self):
return json.dumps(self.metadata)
class ShowMafDBController(object):
def __init__(self, tracking_db_address):
self.tracking_db = trackingDb.TrackingDb(trackingDbAddress=tracking_db_address)
self.run_objs = []
self.all_metrics = []
self.all_metrics_idx = {}
self.load_run_objs()
self.build_metric_index()
def load_run_objs(self):
self.run_objs = []
runs = self.tracking_db.session.query(trackingDb.RunRow).all()
metadata_list = ['mafRunId', 'opsimRun', 'opsimComment', 'mafComment', 'mafDir', 'opsimDate', 'mafDate']
for run in runs:
metadata = {}
for m in metadata_list:
metadata[m] = getattr(run, m)
run_obj = RunObj(metadata)
self.run_objs.append(run_obj)
def build_metric_index(self):
"""
Building hash table index for searching.
The metrics will be stored in the list at the corresponding bucket.
"""
self.all_metrics = []
self.all_metrics_idx['name'] = {}
self.all_metrics_idx['sim_data'] = {}
self.all_metrics_idx['slicer'] = {}
for run_obj in self.run_objs:
for idx in run_obj.metric_objs:
metric_obj = run_obj.metric_objs[idx]
self.all_metrics.append(metric_obj)
# if the index not exist, init the list
if metric_obj.metadata['metricName'] not in self.all_metrics_idx['name']:
self.all_metrics_idx['name'][metric_obj.metadata['metricName']] = []
self.all_metrics_idx['name'][metric_obj.metadata['metricName']].append(metric_obj)
# if the index not exist, init the list
if metric_obj.metadata['simDataName'] not in self.all_metrics_idx['sim_data']:
self.all_metrics_idx['sim_data'][metric_obj.metadata['simDataName']] = []
self.all_metrics_idx['sim_data'][metric_obj.metadata['simDataName']].append(metric_obj)
# if the index not exist, init the list
if metric_obj.metadata['slicerName'] not in self.all_metrics_idx['slicer']:
self.all_metrics_idx['slicer'][metric_obj.metadata['slicerName']] = []
self.all_metrics_idx['slicer'][metric_obj.metadata['slicerName']].append(metric_obj)
def get_all_metrics(self):
return map(lambda x: x.info(), self.all_metrics)
def get_all_sim_data(self):
return self.all_metrics_idx['sim_data'].keys()
def get_all_slicer(self):
return self.all_metrics_idx['slicer'].keys()
def search_metrics(self, keywords):
"""
given search keywords, return a list of metrics
:param keywords:
{
'name': ['metric_name_key_1', 'metric_name_key_2' ... ],
'sim_data': 'sim_data_name',
'slicer': 'slicer_name'
}
:return:
"""
results = None
if keywords.get('name'):
# if this is the first seach category, initialize search results
if results is None:
results = []
# iterate through all the name index to find match metric name
for search_key in keywords.get('name'):
for name_idx in self.all_metrics_idx['name']:
if name_idx.find(search_key) >= 0:
results.extend(self.all_metrics_idx['name'][name_idx])
if keywords.get('sim_data'):
# if this is the first seach category, initialize search results
if results is None:
results = []
for search_key in keywords.get('sim_data'):
if search_key in self.all_metrics_idx['sim_data']:
results.extend(self.all_metrics_idx['sim_data'][search_key])
else:
new_results = []
for metric in results:
for search_key in keywords.get('sim_data'):
if search_key == metric.metadata['simDataName']:
new_results.append(metric)
results = new_results
if keywords.get('slicer'):
# if this is the first seach category, initialize search results
if results is None:
results = []
for search_key in keywords.get('slicer'):
if search_key in self.all_metrics_idx['slicer']:
results.extend(self.all_metrics_idx['slicer'][search_key])
else:
new_results = []
for metric in results:
for search_key in keywords.get('slicer'):
if search_key == metric.metadata['slicerName']:
new_results.append(metric)
results = new_results
return map(lambda x: x.info(), results)
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Query capability built on skew metamodel
tags_spec -> s3, elb, rds
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import itertools
import json
import jmespath
import six
import os
from c7n.actions import ActionRegistry
from c7n.exceptions import ClientError, ResourceLimitExceeded, PolicyExecutionError
from c7n.filters import FilterRegistry, MetricsFilter
from c7n.manager import ResourceManager
from c7n.registry import PluginRegistry
from c7n.tags import register_ec2_tags, register_universal_tags
from c7n.utils import (
local_session, generate_arn, get_retry, chunks, camelResource)
try:
from botocore.paginate import PageIterator, Paginator
except ImportError:
# Likely using another provider in a serverless environment
class PageIterator(object):
pass
class Paginator(object):
pass
class ResourceQuery(object):
def __init__(self, session_factory):
self.session_factory = session_factory
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
def _invoke_client_enum(self, client, enum_op, params, path, retry=None):
if client.can_paginate(enum_op):
p = client.get_paginator(enum_op)
if retry:
p.PAGE_ITERATOR_CLS = RetryPageIterator
results = p.paginate(**params)
data = results.build_full_result()
else:
op = getattr(client, enum_op)
data = op(**params)
if path:
path = jmespath.compile(path)
data = path.search(data)
return data
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type)
client = local_session(self.session_factory).client(
m.service, resource_manager.config.region)
enum_op, path, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
return self._invoke_client_enum(
client, enum_op, params, path,
getattr(resource_manager, 'retry', None)) or []
def get(self, resource_manager, identities):
"""Get resources by identities
"""
m = self.resolve(resource_manager.resource_type)
params = {}
client_filter = False
# Try to formulate server side query
if m.filter_name:
if m.filter_type == 'list':
params[m.filter_name] = identities
elif m.filter_type == 'scalar':
assert len(identities) == 1, "Scalar server side filter"
params[m.filter_name] = identities[0]
else:
client_filter = True
resources = self.filter(resource_manager, **params)
if client_filter:
# This logic was added to prevent the issue from:
# https://github.com/cloud-custodian/cloud-custodian/issues/1398
if all(map(lambda r: isinstance(r, six.string_types), resources)):
resources = [r for r in resources if r in identities]
else:
resources = [r for r in resources if r[m.id] in identities]
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. efs mount targets (parent efs), route53 resource
records (parent hosted zone), ecs services (ecs cluster).
"""
capture_parent_id = False
parent_key = 'c7n:parent-id'
def __init__(self, session_factory, manager):
self.session_factory = session_factory
self.manager = manager
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type)
client = local_session(self.session_factory).client(m.service)
enum_op, path, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
parent_type, parent_key, annotate_parent = m.parent_spec
parents = self.manager.get_resource_manager(parent_type)
parent_ids = [p[parents.resource_type.id] for p in parents.resources()]
# Bail out with no parent ids...
existing_param = parent_key in params
if not existing_param and len(parent_ids) == 0:
return []
# Handle a query with parent id
if existing_param:
return self._invoke_client_enum(client, enum_op, params, path)
# Have to query separately for each parent's children.
results = []
for parent_id in parent_ids:
merged_params = self.get_parent_parameters(params, parent_id, parent_key)
subset = self._invoke_client_enum(
client, enum_op, merged_params, path, retry=self.manager.retry)
if annotate_parent:
for r in subset:
r[self.parent_key] = parent_id
if subset and self.capture_parent_id:
results.extend([(parent_id, s) for s in subset])
elif subset:
results.extend(subset)
return results
def get_parent_parameters(self, params, parent_id, parent_key):
return dict(params, **{parent_key: parent_id})
class QueryMeta(type):
def __new__(cls, name, parents, attrs):
if 'resource_type' not in attrs:
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
if attrs['resource_type']:
m = ResourceQuery.resolve(attrs['resource_type'])
# Generic cloud watch metrics support
if m.dimension:
attrs['filter_registry'].register('metrics', MetricsFilter)
# EC2 Service boilerplate ...
if m.service == 'ec2':
# Generic ec2 resource tag support
if getattr(m, 'taggable', True):
register_ec2_tags(
attrs['filter_registry'], attrs['action_registry'])
if getattr(m, 'universal_taggable', False):
compatibility = isinstance(m.universal_taggable, bool) and True or False
register_universal_tags(
attrs['filter_registry'], attrs['action_registry'],
compatibility=compatibility)
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
def _napi(op_name):
return op_name.title().replace('_', '')
sources = PluginRegistry('sources')
@sources.register('describe')
class DescribeSource(object):
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.get_query()
def get_resources(self, ids, cache=True):
return self.query.get(self.manager, ids)
def resources(self, query):
return self.query.filter(self.manager, **query)
def get_query(self):
return self.resource_query_factory(self.manager.session_factory)
def get_query_params(self, query_params):
return query_params
def get_permissions(self):
m = self.manager.get_model()
perms = ['%s:%s' % (m.service, _napi(m.enum_spec[0]))]
if getattr(m, 'detail_spec', None):
perms.append("%s:%s" % (m.service, _napi(m.detail_spec[0])))
if getattr(m, 'batch_detail_spec', None):
perms.append("%s:%s" % (m.service, _napi(m.batch_detail_spec[0])))
return perms
def augment(self, resources):
model = self.manager.get_model()
if getattr(model, 'detail_spec', None):
detail_spec = getattr(model, 'detail_spec', None)
_augment = _scalar_augment
elif getattr(model, 'batch_detail_spec', None):
detail_spec = getattr(model, 'batch_detail_spec', None)
_augment = _batch_augment
else:
return resources
_augment = functools.partial(
_augment, self.manager, model, detail_spec)
with self.manager.executor_factory(
max_workers=self.manager.max_workers) as w:
results = list(w.map(
_augment, chunks(resources, self.manager.chunk_size)))
return list(itertools.chain(*results))
@sources.register('describe-child')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
def get_query(self):
return self.resource_query_factory(
self.manager.session_factory, self.manager)
@sources.register('config')
class ConfigSource(object):
retry = staticmethod(get_retry(('ThrottlingException',)))
def __init__(self, manager):
self.manager = manager
def get_permissions(self):
return ["config:GetResourceConfigHistory",
"config:ListDiscoveredResources"]
def get_resources(self, ids, cache=True):
client = local_session(self.manager.session_factory).client('config')
results = []
m = self.manager.get_model()
for i in ids:
revisions = self.retry(
client.get_resource_config_history,
resourceId=i,
resourceType=m.config_type,
limit=1).get('configurationItems')
if not revisions:
continue
results.append(self.load_resource(revisions[0]))
return list(filter(None, results))
def get_query_params(self, query):
"""Parse config select expression from policy and parameter.
On policy config supports a full statement being given, or
a clause that will be added to the where expression.
If no query is specified, a default query is utilized.
A valid query should at minimum select fields
for configuration, supplementaryConfiguration and
must have resourceType qualifier.
"""
if query and not isinstance(query, dict):
raise PolicyExecutionError("invalid config source query %s" % (query,))
if query is None and 'query' in self.manager.data:
_q = [q for q in self.manager.data['query'] if 'expr' in q]
if _q:
query = _q.pop()
if query is None and 'query' in self.manager.data:
_c = [q['clause'] for q in self.manager.data['query'] if 'clause' in q]
if _c:
_c = _c.pop()
elif query:
return query
else:
_c = None
s = "select configuration, supplementaryConfiguration where resourceType = '{}'".format(
self.manager.resource_type.config_type)
if _c:
s += "AND {}".format(_c)
return {'expr': s}
def load_resource(self, item):
if isinstance(item['configuration'], six.string_types):
item_config = json.loads(item['configuration'])
else:
item_config = item['configuration']
return camelResource(item_config)
def resources(self, query=None):
client = local_session(self.manager.session_factory).client('config')
query = self.get_query_params(query)
pager = Paginator(
client.select_resource_config,
{'input_token': 'NextToken', 'output_token': 'NextToken',
'result_key': 'Results'},
client.meta.service_model.operation_model('SelectResourceConfig'))
pager.PAGE_ITERATOR_CLS = RetryPageIterator
results = []
for page in pager.paginate(Expression=query['expr']):
results.extend([
self.load_resource(json.loads(r)) for r in page['Results']])
return results
def augment(self, resources):
return resources
@six.add_metaclass(QueryMeta)
class QueryResourceManager(ResourceManager):
resource_type = ""
# TODO Check if we can move to describe source
max_workers = 3
chunk_size = 20
permissions = ()
_generate_arn = None
retry = staticmethod(
get_retry((
'ThrottlingException',
'RequestLimitExceeded',
'Throttled',
'Throttling',
'Client.RequestLimitExceeded')))
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
@property
def source_type(self):
return self.data.get('source', 'describe')
def get_source(self, source_type):
return sources.get(source_type)(self)
@classmethod
def has_arn(cls):
if cls.resource_type.arn is not None:
return bool(cls.resource_type.arn)
elif getattr(cls.resource_type, 'arn_type', None) is not None:
return True
elif cls.__dict__.get('get_arns'):
return True
return False
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@classmethod
def match_ids(cls, ids):
"""return ids that match this resource type's id format."""
id_prefix = getattr(cls.get_model(), 'id_prefix', None)
if id_prefix is not None:
return [i for i in ids if i.startswith(id_prefix)]
return ids
def get_permissions(self):
perms = self.source.get_permissions()
if getattr(self, 'permissions', None):
perms.extend(self.permissions)
return perms
def get_cache_key(self, query):
return {
'account': self.account_id,
'region': self.config.region,
'resource': str(self.__class__.__name__),
'source': self.source_type,
'q': query
}
def resources(self, query=None):
query = self.source.get_query_params(query)
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
if query is None:
query = {}
with self.ctx.tracer.subsegment('resource-fetch'):
resources = self.source.resources(query)
with self.ctx.tracer.subsegment('resource-augment'):
resources = self.augment(resources)
self._cache.save(cache_key, resources)
resource_count = len(resources)
with self.ctx.tracer.subsegment('filter'):
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
Ideally this would be at a higher level but we've hidden
filtering behind the resource manager facade for default usage.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def _get_cached_resources(self, ids):
key = self.get_cache_key(None)
if self._cache.load():
resources = self._cache.get(key)
if resources is not None:
self.log.debug("Using cached results for get_resources")
m = self.get_model()
id_set = set(ids)
return [r for r in resources if r[m.id] in id_set]
return None
def get_resources(self, ids, cache=True, augment=True):
if cache:
resources = self._get_cached_resources(ids)
if resources is not None:
return resources
try:
resources = self.source.get_resources(ids)
if augment:
resources = self.augment(resources)
return resources
except ClientError as e:
self.log.warning("event ids not resolved: %s error:%s" % (ids, e))
return []
def augment(self, resources):
"""subclasses may want to augment resources with additional information.
ie. we want tags by default (rds, elb), and policy, location, acl for
s3 buckets.
"""
return self.source.augment(resources)
@property
def account_id(self):
""" Return the current account ID.
This should now be passed in using the --account-id flag, but for a
period of time we will support the old behavior of inferring this from
IAM.
"""
return self.config.account_id
@property
def region(self):
""" Return the current region.
"""
return self.config.region
def get_arns(self, resources):
arns = []
m = self.get_model()
arn_key = getattr(m, 'arn', None)
if arn_key is False:
raise ValueError("%s do not have arns" % self.type)
id_key = m.id
for r in resources:
_id = r[id_key]
if arn_key:
arns.append(r[arn_key])
elif 'arn' in _id[:3]:
arns.append(_id)
else:
arns.append(self.generate_arn(_id))
return arns
@property
def generate_arn(self):
""" Generates generic arn if ID is not already arn format.
"""
if self._generate_arn is None:
self._generate_arn = functools.partial(
generate_arn,
self.resource_type.arn_service or self.resource_type.service,
region=not self.resource_type.global_resource and self.config.region or "",
account_id=self.account_id,
resource_type=self.resource_type.arn_type,
separator=self.resource_type.arn_separator)
return self._generate_arn
class MaxResourceLimit(object):
C7N_MAXRES_OP = os.environ.get("C7N_MAXRES_OP", 'or')
def __init__(self, policy, selection_count, population_count):
self.p = policy
self.op = MaxResourceLimit.C7N_MAXRES_OP
self.selection_count = selection_count
self.population_count = population_count
self.amount = None
self.percentage_amount = None
self.percent = None
self._parse_policy()
def _parse_policy(self,):
if isinstance(self.p.max_resources, dict):
self.op = self.p.max_resources.get("op", MaxResourceLimit.C7N_MAXRES_OP).lower()
self.percent = self.p.max_resources.get("percent")
self.amount = self.p.max_resources.get("amount")
if isinstance(self.p.max_resources, int):
self.amount = self.p.max_resources
if isinstance(self.p.max_resources_percent, (int, float)):
self.percent = self.p.max_resources_percent
if self.percent:
self.percentage_amount = self.population_count * (self.percent / 100.0)
def check_resource_limits(self):
if self.percentage_amount and self.amount:
if (self.selection_count > self.amount and
self.selection_count > self.percentage_amount and self.op == "and"):
raise ResourceLimitExceeded(
("policy:%s exceeded resource-limit:{limit} and percentage-limit:%s%% "
"found:{selection_count} total:{population_count}")
% (self.p.name, self.percent), "max-resource and max-percent",
self.amount, self.selection_count, self.population_count)
if self.amount:
if self.selection_count > self.amount and self.op != "and":
raise ResourceLimitExceeded(
("policy:%s exceeded resource-limit:{limit} "
"found:{selection_count} total: {population_count}") % self.p.name,
"max-resource", self.amount, self.selection_count, self.population_count)
if self.percentage_amount:
if self.selection_count > self.percentage_amount and self.op != "and":
raise ResourceLimitExceeded(
("policy:%s exceeded resource-limit:{limit}%% "
"found:{selection_count} total:{population_count}") % self.p.name,
"max-percent", self.percent, self.selection_count, self.population_count)
class ChildResourceManager(QueryResourceManager):
child_source = 'describe-child'
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
return self.get_resource_manager(self.resource_type.parent_spec[0])
def _batch_augment(manager, model, detail_spec, resource_set):
detail_op, param_name, param_key, detail_path, detail_args = detail_spec
client = local_session(manager.session_factory).client(
model.service, region_name=manager.config.region)
op = getattr(client, detail_op)
if manager.retry:
args = (op,)
op = manager.retry
else:
args = ()
kw = {param_name: [param_key and r[param_key] or r for r in resource_set]}
if detail_args:
kw.update(detail_args)
response = op(*args, **kw)
return response[detail_path]
def _scalar_augment(manager, model, detail_spec, resource_set):
detail_op, param_name, param_key, detail_path = detail_spec
client = local_session(manager.session_factory).client(
model.service, region_name=manager.config.region)
op = getattr(client, detail_op)
if manager.retry:
args = (op,)
op = manager.retry
else:
args = ()
results = []
for r in resource_set:
kw = {param_name: param_key and r[param_key] or r}
response = op(*args, **kw)
if detail_path:
response = response[detail_path]
else:
response.pop('ResponseMetadata')
if param_key is None:
response[model.id] = r
r = response
else:
r.update(response)
results.append(r)
return results
class RetryPageIterator(PageIterator):
retry = staticmethod(QueryResourceManager.retry)
def _make_request(self, current_kwargs):
return self.retry(self._method, **current_kwargs)
class TypeMeta(type):
def __repr__(cls):
identifier = None
if cls.config_type:
identifier = cls.config_type
elif cls.arn_type:
identifier = "AWS::%s::%s" % (cls.service.title(), cls.arn_type.title())
elif cls.enum_spec:
identifier = "AWS::%s::%s" % (cls.service.title(), cls.enum_spec[1])
else:
identifier = "AWS::%s::%s" % (cls.service.title(), cls.id)
return "<TypeInfo %s>" % identifier
@six.add_metaclass(TypeMeta)
class TypeInfo(object):
"""Resource Type Metadata"""
###########
# Required
# id field, should be the identifier used for apis
id = None
# name field, used for display
name = None
# which aws service (per sdk) has the api for this resource.
service = None
# used to query the resource by describe-sources
enum_spec = None
###########
# Optional
###########
# Arn handling / generation metadata
# arn resource attribute, when describe format has arn
arn = None
# type, used for arn construction, also required for universal tag augment
arn_type = None
# how arn type is separated from rest of arn
arn_separator = "/"
# for services that need custom labeling for arns
arn_service = None
##########
# Resource retrieval
# filter_name, when fetching a single resource via enum_spec
# technically optional, but effectively required for serverless
# event policies else we have to enumerate the population.
filter_name = None
# filter_type, scalar or list
filter_type = None
# used to enrich the resource descriptions returned by enum_spec
detail_spec = None
# used when the api supports getting resource details enmasse
batch_detail_spec = None
##########
# Misc
# used for reporting, array of fields
default_report_fields = ()
# date, latest date associated to resource, generally references
# either create date or modified date.
date = None
# dimension, defines that resource has cloud watch metrics and the
# resource id can be passed as this value. further customizations
# of dimensions require subclass metrics filter.
dimension = None
# AWS Config Service resource type name
config_type = None
# Whether or not resource group tagging api can be used, in which
# case we'll automatically register tag actions/filters.
#
# Note values of True will register legacy tag filters/actions, values
# of object() will just register current standard tag/filters/actions.
universal_taggable = False
# Denotes if this resource exists across all regions (iam, cloudfront, r53)
global_resource = False
# Generally we utilize a service to namespace mapping in the metrics filter
# however some resources have a type specific namespace (ig. ebs)
metrics_namespace = None
# specific to ec2 service resources used to disambiguate a resource by its id
id_prefix = None
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class Order(resource.Resource):
"""A resource allowing for the generation secret material by Barbican.
The resource allows to generate some secret material. It can be, for
example, some key or certificate. The order encapsulates the workflow
and history for the creation of a secret. The time to generate a secret can
vary depending on the type of secret.
"""
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'barbican'
entity = 'orders'
PROPERTIES = (
NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION,
ALGORITHM, BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
PASS_PHRASE
) = (
'name', 'payload_content_type', 'mode', 'expiration',
'algorithm', 'bit_length', 'type', 'request_type', 'subject_dn',
'source_container_ref', 'ca_id', 'profile', 'request_data',
'pass_phrase'
)
ATTRIBUTES = (
STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
CERTIFICATE, INTERMEDIATES, CONTAINER_REF
) = (
'status', 'order_ref', 'secret_ref', 'public_key', 'private_key',
'certificate', 'intermediates', 'container_ref'
)
ORDER_TYPES = (
KEY, ASYMMETRIC, CERTIFICATE
) = (
'key', 'asymmetric', 'certificate'
)
# full-cmc is declared but not yet supported in barbican
REQUEST_TYPES = (
STORED_KEY, SIMPLE_CMC, CUSTOM
) = (
'stored-key', 'simple-cmc', 'custom'
)
ALLOWED_PROPERTIES_FOR_TYPE = {
KEY: [NAME, ALGORITHM, BIT_LENGTH, MODE, PAYLOAD_CONTENT_TYPE,
EXPIRATION],
ASYMMETRIC: [NAME, ALGORITHM, BIT_LENGTH, MODE, PASS_PHRASE,
PAYLOAD_CONTENT_TYPE, EXPIRATION],
CERTIFICATE: [NAME, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF,
CA_ID, PROFILE, REQUEST_DATA]
}
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Human readable name for the secret.'),
),
PAYLOAD_CONTENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type/format the secret data is provided in.'),
),
EXPIRATION: properties.Schema(
properties.Schema.STRING,
_('The expiration date for the secret in ISO-8601 format.'),
constraints=[
constraints.CustomConstraint('iso_8601'),
],
),
ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm type used to generate the secret. '
'Required for key and asymmetric types of order.'),
),
BIT_LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('The bit-length of the secret. Required for key and '
'asymmetric types of order.'),
),
MODE: properties.Schema(
properties.Schema.STRING,
_('The type/mode of the algorithm associated with the secret '
'information.'),
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the order.'),
constraints=[
constraints.AllowedValues(ORDER_TYPES),
],
required=True,
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
constraints=[constraints.AllowedValues(REQUEST_TYPES)]
),
SUBJECT_DN: properties.Schema(
properties.Schema.STRING,
_('The subject of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SOURCE_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('The source of certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
constraints=[
constraints.CustomConstraint('barbican.container')
],
),
CA_ID: properties.Schema(
properties.Schema.STRING,
_('The identifier of the CA to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PROFILE: properties.Schema(
properties.Schema.STRING,
_('The profile of certificate to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_DATA: properties.Schema(
properties.Schema.STRING,
_('The content of the CSR. Only for certificate orders.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PASS_PHRASE: properties.Schema(
properties.Schema.STRING,
_('The passphrase the created key. Can be set only '
'for asymmetric type of order.'),
support_status=support.SupportStatus(version='5.0.0'),
),
}
attributes_schema = {
STATUS: attributes.Schema(
_('The status of the order.'),
type=attributes.Schema.STRING
),
ORDER_REF: attributes.Schema(
_('The URI to the order.'),
type=attributes.Schema.STRING
),
SECRET_REF: attributes.Schema(
_('The URI to the created secret.'),
type=attributes.Schema.STRING
),
CONTAINER_REF: attributes.Schema(
_('The URI to the created container.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PUBLIC_KEY: attributes.Schema(
_('The payload of the created public key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PRIVATE_KEY: attributes.Schema(
_('The payload of the created private key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
CERTIFICATE: attributes.Schema(
_('The payload of the created certificate, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
INTERMEDIATES: attributes.Schema(
_('The payload of the created intermediates, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
info = dict((k, v) for k, v in self.properties.items()
if v is not None)
order = self.client().orders.create(**info)
order_ref = order.submit()
self.resource_id_set(order_ref)
# NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
# need not to be fixed re LP bug #1393268
return order_ref
def validate(self):
super(Order, self).validate()
if self.properties[self.TYPE] != self.CERTIFICATE:
if (self.properties[self.ALGORITHM] is None
or self.properties[self.BIT_LENGTH] is None):
msg = _("Properties %(algorithm)s and %(bit_length)s are "
"required for %(type)s type of order.") % {
'algorithm': self.ALGORITHM,
'bit_length': self.BIT_LENGTH,
'type': self.properties[self.TYPE]}
raise exception.StackValidationFailed(message=msg)
else:
if (self.properties[self.PROFILE] and
not self.properties[self.CA_ID]):
raise exception.ResourcePropertyDependency(
prop1=self.PROFILE, prop2=self.CA_ID
)
declared_props = sorted([k for k, v in six.iteritems(
self.properties) if k != self.TYPE and v is not None])
allowed_props = sorted(self.ALLOWED_PROPERTIES_FOR_TYPE[
self.properties[self.TYPE]])
diff = sorted(set(declared_props) - set(allowed_props))
if diff:
msg = _("Unexpected properties: %(unexpected)s. Only these "
"properties are allowed for %(type)s type of order: "
"%(allowed)s.") % {
'unexpected': ', '.join(diff),
'type': self.properties[self.TYPE],
'allowed': ', '.join(allowed_props)}
raise exception.StackValidationFailed(message=msg)
def check_create_complete(self, order_href):
order = self.client().orders.get(order_href)
if order.status == 'ERROR':
reason = order.error_reason
code = order.error_status_code
msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s")
% {'name': self.name, 'code': code, 'reason': reason})
raise exception.Error(msg)
return order.status == 'ACTIVE'
def _resolve_attribute(self, name):
client = self.client()
order = client.orders.get(self.resource_id)
if name in (
self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
self.INTERMEDIATES):
container = client.containers.get(order.container_ref)
secret = getattr(container, name)
return secret.payload
return getattr(order, name)
def resource_mapping():
return {
'OS::Barbican::Order': Order,
}
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
This script is meant as a simple way to reply to ical invitations from mutt.
See README for instructions and LICENSE for licensing information.
"""
from __future__ import with_statement
__author__="Martin Sander"
__license__="MIT"
import vobject
import tempfile, time
import os, sys
import warnings
from datetime import datetime
from subprocess import Popen, PIPE
from getopt import gnu_getopt as getopt
usage="""
usage:
%s [OPTIONS] -e your@email.address filename.ics
OPTIONS:
-i interactive
-a accept
-d decline
-t tentatively accept
(accept is default, last one wins)
""" % sys.argv[0]
def del_if_present(dic, key):
if dic.has_key(key):
del dic[key]
def set_accept_state(attendees, state):
for attendee in attendees:
attendee.params['PARTSTAT'] = [unicode(state)]
for i in ["RSVP","ROLE","X-NUM-GUESTS","CUTYPE"]:
del_if_present(attendee.params,i)
return attendees
def get_accept_decline():
while True:
sys.stdout.write("\nAccept Invitation? [Y/n/t]")
ans = sys.stdin.readline()
if ans.lower() == 'y\n' or ans == '\n':
return 'ACCEPTED'
elif ans.lower() == 'n\n':
return 'DECLINED'
elif ans.lower() =='t\n':
return 'TENTATIVE'
def get_answer(invitation):
# create
ans = vobject.newFromBehavior('vcalendar')
ans.add('method')
ans.method.value = "REPLY"
ans.add('vevent')
# just copy from invitation
#for i in ["uid", "summary", "dtstart", "dtend", "organizer"]:
# There's a problem serializing TZ info in Python, temp fix
for i in ["uid", "summary", "organizer"]:
if invitation.vevent.contents.has_key(i):
ans.vevent.add( invitation.vevent.contents[i][0] )
# new timestamp
ans.vevent.add('dtstamp')
ans.vevent.dtstamp.value = datetime.utcnow().replace(
tzinfo = invitation.vevent.dtstamp.value.tzinfo)
return ans
def write_to_tempfile(ical):
tempdir = tempfile.mkdtemp()
icsfile = tempdir+"/event-reply.ics"
with open(icsfile,"w") as f:
f.write(ical.serialize())
return icsfile, tempdir
def get_mutt_command(ical, email_address, accept_decline, icsfile):
accept_decline = accept_decline.capitalize()
if ical.vevent.contents.has_key('organizer'):
if hasattr(ical.vevent.organizer,'EMAIL_param'):
sender = ical.vevent.organizer.EMAIL_param
else:
sender = ical.vevent.organizer.value.split(':')[1] #workaround for MS
else:
sender = "NO SENDER"
summary = ical.vevent.contents['summary'][0].value.encode()
command = ["mutt", "-a", icsfile,
"-s", "'%s: %s'" % (accept_decline, summary), "--", sender]
#Uncomment the below line, and move it above the -s line to enable the wrapper
#"-e", 'set sendmail=\'ical_reply_sendmail_wrapper.sh\'',
return command
def execute(command, mailtext):
process = Popen(command, stdin=PIPE)
process.stdin.write(mailtext)
process.stdin.close()
result = None
while result is None:
result = process.poll()
time.sleep(.1)
if result != 0:
print "unable to send reply, subprocess exited with\
exit code %d\nPress return to continue" % result
sys.stdin.readline()
def openics(invitation_file):
with open(invitation_file) as f:
try:
with warnings.catch_warnings(): #vobject uses deprecated Exception stuff
warnings.simplefilter("ignore")
invitation = vobject.readOne(f, ignoreUnreadable=True)
except AttributeError:
invitation = vobject.readOne(f, ignoreUnreadable=True)
return invitation
def display(ical):
summary = ical.vevent.contents['summary'][0].value.encode()
if ical.vevent.contents.has_key('organizer'):
if hasattr(ical.vevent.organizer,'EMAIL_param'):
sender = ical.vevent.organizer.EMAIL_param
else:
sender = ical.vevent.organizer.value.split(':')[1] #workaround for MS
else:
sender = "NO SENDER"
if ical.vevent.contents.has_key('description'):
description = ical.vevent.contents['description'][0].value
else:
description = "NO DESCRIPTION"
if ical.vevent.contents.has_key('attendee'):
attendees = ical.vevent.contents['attendee']
else:
attendees = ""
sys.stdout.write("From:\t" + sender + "\n")
sys.stdout.write("Title:\t" + summary + "\n")
sys.stdout.write("To:\t")
for attendee in attendees:
if hasattr(attendee,'EMAIL_param'):
sys.stdout.write(attendee.CN_param + " <" + attendee.EMAIL_param + ">, ")
else:
sys.stdout.write(attendee.CN_param + " <" + attendee.value.split(':')[1] + ">, ") #workaround for MS
sys.stdout.write("\n\n")
sys.stdout.write(description + "\n")
if __name__=="__main__":
email_address = None
accept_decline = 'ACCEPTED'
opts, args=getopt(sys.argv[1:],"e:aidt")
if len(args) < 1:
sys.stderr.write(usage)
sys.exit(1)
invitation = openics(args[0])
#print(invitation)
display(invitation)
for opt,arg in opts:
if opt == '-e':
email_address = arg
if opt == '-i':
accept_decline = get_accept_decline()
if opt == '-a':
accept_decline = 'ACCEPTED'
if opt == '-d':
accept_decline = 'DECLINED'
if opt == '-t':
accept_decline = 'TENTATIVE'
ans = get_answer(invitation)
if invitation.vevent.contents.has_key('attendee'):
attendees = invitation.vevent.contents['attendee']
else:
attendees = ""
set_accept_state(attendees,accept_decline)
ans.vevent.add('attendee')
ans.vevent.attendee_list.pop()
flag = 1
for attendee in attendees:
if hasattr(attendee,'EMAIL_param'):
if attendee.EMAIL_param == email_address:
ans.vevent.attendee_list.append(attendee)
flag = 0
else:
if attendee.value.split(':')[1] == email_address:
ans.vevent.attendee_list.append(attendee)
flag = 0
if flag:
sys.stderr.write("Seems like you have not been invited to this event!\n")
sys.exit(1)
icsfile, tempdir = write_to_tempfile(ans)
mutt_command = get_mutt_command(ans, email_address, accept_decline, icsfile)
mailtext = "'%s has %s'" % (email_address, accept_decline.lower())
execute(mutt_command, mailtext)
os.remove(icsfile)
os.rmdir(tempdir)
|
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
import itertools
import os
import re
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
try:
basestring # Python 2
except NameError: # Python 3
basestring = str
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
ENV_PATTERN = re.compile(r"//\s+Environment Variables:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
NO_HARNESS_PATTERN = re.compile(r"^// NO HARNESS$", flags=re.MULTILINE)
# Flags known to misbehave when combining arbitrary mjsunit tests. Can also
# be compiled regular expressions.
COMBINE_TESTS_FLAGS_BLACKLIST = [
'--check-handle-count',
'--enable-tracing',
re.compile('--experimental.*'),
'--expose-trigger-failure',
re.compile('--harmony.*'),
'--mock-arraybuffer-allocator',
'--print-ast',
re.compile('--trace.*'),
'--wasm-lazy-compilation',
]
class TestLoader(testsuite.JSTestLoader):
@property
def excluded_files(self):
return {
"mjsunit.js",
"mjsunit_suppressions.js",
}
class TestSuite(testsuite.TestSuite):
def _test_loader_class(self):
return TestLoader
def _test_combiner_class(self):
return TestCombiner
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
source = self.get_source()
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
while True:
if files_match:
files_list += files_match.group(1).strip().split()
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
files = [ os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list ]
testfilename = os.path.join(self.suite.root,
self.path + self._get_suffix())
if SELF_SCRIPT_PATTERN.search(source):
files = (
["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")] +
files)
if NO_HARNESS_PATTERN.search(source):
mjsunit_files = []
else:
mjsunit_files = [os.path.join(self.suite.root, "mjsunit.js")]
files_suffix = []
if MODULE_PATTERN.search(source):
files_suffix.append("--module")
files_suffix.append(testfilename)
self._source_files = files
self._source_flags = self._parse_source_flags(source)
self._mjsunit_files = mjsunit_files
self._files_suffix = files_suffix
self._env = self._parse_source_env(source)
def _parse_source_env(self, source):
env_match = ENV_PATTERN.search(source)
env = {}
if env_match:
for env_pair in env_match.group(1).strip().split():
var, value = env_pair.split('=')
env[var] = value
return env
def _get_source_flags(self):
return self._source_flags
def _get_files_params(self):
files = list(self._source_files)
if not self._test_config.no_harness:
files += self._mjsunit_files
files += self._files_suffix
if self._test_config.isolates:
files += ['--isolate'] + files
return files
def _get_cmd_env(self):
return self._env
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
class TestCombiner(testsuite.TestCombiner):
def get_group_key(self, test):
"""Combine tests with the same set of flags.
Ignore:
1. Some special cases where it's not obvious what to pass in the command.
2. Tests with flags that can cause failure even inside try-catch wrapper.
3. Tests that use async functions. Async functions can be scheduled after
exiting from try-catch wrapper and cause failure.
"""
if (len(test._files_suffix) > 1 or
test._env or
not test._mjsunit_files or
test._source_files):
return None
source_flags = test._get_source_flags()
if ('--expose-trigger-failure' in source_flags or
'--throws' in source_flags):
return None
source_code = test.get_source()
# Maybe we could just update the tests to await all async functions they
# call?
if 'async' in source_code:
return None
# TODO(machenbach): Remove grouping if combining tests in a flag-independent
# way works well.
return 1
def _combined_test_class(self):
return CombinedTest
class CombinedTest(testcase.D8TestCase):
"""Behaves like normal mjsunit tests except:
1. Expected outcome is always PASS
2. Instead of one file there is a try-catch wrapper with all combined tests
passed as arguments.
"""
def __init__(self, name, tests):
super(CombinedTest, self).__init__(tests[0].suite, '', name,
tests[0]._test_config)
self._tests = tests
def _prepare_outcomes(self, force_update=True):
self._statusfile_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
self.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
def _get_shell_flags(self):
"""In addition to standard set of shell flags it appends:
--disable-abortjs: %AbortJS can abort the test even inside
trycatch-wrapper, so we disable it.
--es-staging: We blacklist all harmony flags due to false positives,
but always pass the staging flag to cover the mature features.
--omit-quit: Calling quit() in JS would otherwise early terminate.
--quiet-load: suppress any stdout from load() function used by
trycatch-wrapper.
"""
return [
'--test',
'--disable-abortjs',
'--es-staging',
'--omit-quit',
'--quiet-load',
]
def _get_cmd_params(self):
return (
super(CombinedTest, self)._get_cmd_params() +
['tools/testrunner/trycatch_loader.js', '--'] +
self._tests[0]._mjsunit_files +
['--'] +
[t._files_suffix[0] for t in self._tests]
)
def _merge_flags(self, flags):
"""Merges flags from a list of flags.
Flag values not starting with '-' are merged with the preceeding flag,
e.g. --foo 1 will become --foo=1. All other flags remain the same.
Returns: A generator of flags.
"""
if not flags:
return
# Iterate over flag pairs. ['-'] is a sentinel value for the last iteration.
for flag1, flag2 in itertools.izip(flags, flags[1:] + ['-']):
if not flag2.startswith('-'):
assert '=' not in flag1
yield flag1 + '=' + flag2
elif flag1.startswith('-'):
yield flag1
def _is_flag_blacklisted(self, flag):
for item in COMBINE_TESTS_FLAGS_BLACKLIST:
if isinstance(item, basestring):
if item == flag:
return True
elif item.match(flag):
return True
return False
def _get_combined_flags(self, flags_gen):
"""Combines all flags - dedupes, keeps order and filters some flags.
Args:
flags_gen: Generator for flag lists.
Returns: A list of flags.
"""
merged_flags = self._merge_flags(list(itertools.chain(*flags_gen)))
unique_flags = OrderedDict((flag, True) for flag in merged_flags).keys()
return [
flag for flag in unique_flags
if not self._is_flag_blacklisted(flag)
]
def _get_source_flags(self):
# Combine flags from all source files.
return self._get_combined_flags(
test._get_source_flags() for test in self._tests)
def _get_statusfile_flags(self):
# Combine flags from all status file entries.
return self._get_combined_flags(
test._get_statusfile_flags() for test in self._tests)
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import os
import json
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cluster import DeviceType
from paddle.distributed.auto_parallel.cluster import LinkType
cluster_json = """
{
"machines": [
{
"hostname": "machine0",
"addr": "0.0.0.1",
"port": "768",
"devices": [
{
"global_id": 0,
"local_id": 0,
"type": "GPU",
"model": "A100-SXM4-40GB",
"sp_gflops": 19500,
"dp_gflops": 9700,
"memory": 40
},
{
"global_id": 1,
"local_id": 1,
"type": "GPU",
"model": "A100-SXM4-40GB",
"sp_gflops": 19500,
"dp_gflops": 9700,
"memory": 40
},
{
"global_id": 2,
"local_id": 0,
"type": "CPU",
"model": "Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GH",
"arch": "x86_64",
"vendor": "GenuineIntel",
"sp_gflops": 150,
"dp_gflops": 75,
"memory": 1510
},
{
"global_id": 3,
"local_id": 0,
"type": "NIC"
}
],
"links": [
{
"source_global_id": 0,
"target_global_id": 1,
"type": "NVL",
"bandwidth": 252
},
{
"source_global_id": 0,
"target_global_id": 2,
"type": "PHB",
"bandwidth": 12
},
{
"source_global_id": 1,
"target_global_id": 2,
"type": "PHB",
"bandwidth": 12
},
{
"source_global_id": 0,
"target_global_id": 3,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 1,
"target_global_id": 3,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 2,
"target_global_id": 3,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 3,
"target_global_id": 7,
"type": "NET",
"bandwidth": 1
}
]
},
{
"hostname": "machine1",
"addr": "0.0.0.2",
"port": "768",
"devices": [
{
"global_id": 4,
"local_id": 0,
"type": "GPU",
"model": "Tesla V100-SXM2-32GB",
"sp_gflops": 15700,
"dp_gflops": 7800,
"memory": 32
},
{
"global_id": 5,
"local_id": 1,
"type": "GPU",
"model": "Tesla V100-SXM2-32GB",
"sp_gflops": 15700,
"dp_gflops": 7800,
"memory": 32
},
{
"global_id": 6,
"local_id": 0,
"type": "CPU",
"model": "Intel(R) Xeon(R) Gold 6271C CPU @ 2.60G",
"arch": "x86_64",
"vendor": "GenuineIntel",
"sp_gflops": 150,
"dp_gflops": 75,
"memory": "503"
},
{
"global_id": 7,
"local_id": 0,
"type": "NIC"
}
],
"links": [
{
"source_global_id": 4,
"target_global_id": 5,
"type": "NVL",
"bandwidth": 42
},
{
"source_global_id": 4,
"target_global_id": 6,
"type": "PHB",
"bandwidth": 12
},
{
"source_global_id": 5,
"target_global_id": 6,
"type": "PHB",
"bandwidth": 12
},
{
"source_global_id": 4,
"target_global_id": 7,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 5,
"target_global_id": 7,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 6,
"target_global_id": 7,
"type": "NET",
"bandwidth": 1
},
{
"source_global_id": 7,
"target_global_id": 3,
"type": "NET",
"bandwidth": 1
}
]
}
]
}
"""
class TestAutoParallelCluster(unittest.TestCase):
def test_cluster(self):
cluster_json_file = ""
cluster_json_object = json.loads(cluster_json)
with open("./auto_parallel_cluster.json", "w") as cluster_json_file:
json.dump(cluster_json_object, cluster_json_file)
cluster = Cluster()
cluster.build_from_file("./auto_parallel_cluster.json")
os.remove("./auto_parallel_cluster.json")
self.assertEqual(len(cluster.get_all_devices("GPU")), 4)
self.assertEqual(len(cluster.get_all_devices("CPU")), 2)
self.assertEqual(len(cluster.get_all_devices("NIC")), 2)
self.assertEqual(len(cluster.machines), 2)
# machine0
machine0 = cluster.machines[0]
self.assertEqual(machine0.id, 0)
self.assertEqual(machine0.hostname, "machine0")
self.assertEqual(machine0.addr, "0.0.0.1")
self.assertEqual(machine0.port, "768")
self.assertEqual(len(machine0.devices), 4)
self.assertEqual(len(machine0.links), 7)
# device0
device0_machine0 = machine0.devices[0]
self.assertEqual(device0_machine0.global_id, 0)
self.assertEqual(device0_machine0.local_id, 0)
self.assertEqual(device0_machine0.type, DeviceType.GPU)
self.assertEqual(device0_machine0.model, "A100-SXM4-40GB")
self.assertAlmostEqual(device0_machine0.sp_gflops, 19500)
self.assertAlmostEqual(device0_machine0.dp_gflops, 9700)
self.assertAlmostEqual(device0_machine0.memory, 40)
# device0, link0
link0_machine0 = machine0.links[(0, 1)]
self.assertEqual(link0_machine0.source.global_id, 0)
self.assertEqual(link0_machine0.target.global_id, 1)
self.assertEqual(link0_machine0.type, LinkType.NVL)
self.assertAlmostEqual(link0_machine0.bandwidth, 252)
self.assertAlmostEqual(link0_machine0.latency, 0)
# device 0, link 1
link1_machine0 = machine0.links[(0, 2)]
self.assertEqual(link1_machine0.source.global_id, 0)
self.assertEqual(link1_machine0.target.global_id, 2)
self.assertEqual(link1_machine0.type, LinkType.PHB)
self.assertAlmostEqual(link1_machine0.bandwidth, 12)
self.assertAlmostEqual(link1_machine0.latency, 0)
# device0, link2
link2_machine0 = machine0.links[(0, 3)]
self.assertEqual(link2_machine0.source.global_id, 0)
self.assertEqual(link2_machine0.target.global_id, 3)
self.assertEqual(link2_machine0.type, LinkType.NET)
self.assertAlmostEqual(link2_machine0.bandwidth, 1)
self.assertAlmostEqual(link2_machine0.latency, 0)
# device1
device1_machine0 = machine0.devices[1]
self.assertEqual(device1_machine0.global_id, 1)
self.assertEqual(device1_machine0.local_id, 1)
self.assertEqual(device1_machine0.type, DeviceType.GPU)
self.assertEqual(device1_machine0.model, "A100-SXM4-40GB")
self.assertAlmostEqual(device1_machine0.sp_gflops, 19500)
self.assertAlmostEqual(device1_machine0.dp_gflops, 9700)
self.assertAlmostEqual(device1_machine0.memory, 40)
# device1, link0
link0_machine0 = machine0.links[(1, 2)]
self.assertEqual(link0_machine0.source.global_id, 1)
self.assertEqual(link0_machine0.target.global_id, 2)
self.assertEqual(link0_machine0.type, LinkType.PHB)
self.assertAlmostEqual(link0_machine0.bandwidth, 12)
self.assertAlmostEqual(link0_machine0.latency, 0)
# device1, link1
link1_machine0 = machine0.links[(1, 3)]
self.assertEqual(link1_machine0.source.global_id, 1)
self.assertEqual(link1_machine0.target.global_id, 3)
self.assertEqual(link1_machine0.type, LinkType.NET)
self.assertAlmostEqual(link1_machine0.bandwidth, 1)
self.assertAlmostEqual(link1_machine0.latency, 0)
# device2
device2_machine0 = machine0.devices[2]
self.assertEqual(device2_machine0.global_id, 2)
self.assertEqual(device2_machine0.local_id, 0)
self.assertEqual(device2_machine0.type, DeviceType.CPU)
self.assertEqual(device2_machine0.model,
"Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GH")
self.assertAlmostEqual(device2_machine0.sp_gflops, 150)
self.assertAlmostEqual(device2_machine0.dp_gflops, 75)
self.assertAlmostEqual(device2_machine0.memory, 1510)
# device2, link0
link0_machine0 = machine0.links[(2, 3)]
self.assertEqual(link0_machine0.source.global_id, 2)
self.assertEqual(link0_machine0.target.global_id, 3)
self.assertEqual(link0_machine0.type, LinkType.NET)
self.assertAlmostEqual(link0_machine0.bandwidth, 1)
self.assertAlmostEqual(link0_machine0.latency, 0)
# device3
device3_machine0 = machine0.devices[3]
self.assertEqual(device3_machine0.global_id, 3)
self.assertEqual(device3_machine0.local_id, 0)
self.assertEqual(device3_machine0.type, DeviceType.NIC)
self.assertAlmostEqual(device3_machine0.model, None)
self.assertAlmostEqual(device3_machine0.sp_gflops, 0)
self.assertAlmostEqual(device3_machine0.dp_gflops, 0)
self.assertAlmostEqual(device3_machine0.memory, 0)
link0_machine0 = machine0.links[(3, 7)]
# device3, link0
self.assertEqual(link0_machine0.source.global_id, 3)
self.assertEqual(link0_machine0.target.global_id, 7)
self.assertEqual(link0_machine0.type, LinkType.NET)
self.assertAlmostEqual(link0_machine0.bandwidth, 1)
self.assertAlmostEqual(link0_machine0.latency, 0)
# machine1
machine1 = cluster.machines[1]
self.assertEqual(machine1.id, 1)
self.assertEqual(machine1.hostname, "machine1")
self.assertEqual(machine1.addr, "0.0.0.2")
self.assertEqual(machine1.port, "768")
self.assertEqual(len(machine1.devices), 4)
self.assertEqual(len(machine1.links), 7)
# device4
device4_machine1 = machine1.devices[4]
self.assertEqual(device4_machine1.global_id, 4)
self.assertEqual(device4_machine1.local_id, 0)
self.assertEqual(device4_machine1.type, DeviceType.GPU)
self.assertEqual(device4_machine1.model, "Tesla V100-SXM2-32GB")
self.assertAlmostEqual(device4_machine1.sp_gflops, 15700)
self.assertAlmostEqual(device4_machine1.dp_gflops, 7800)
self.assertAlmostEqual(device4_machine1.memory, 32)
# device4, link0
link0_machine1 = machine1.links[(4, 5)]
self.assertEqual(link0_machine1.source.global_id, 4)
self.assertEqual(link0_machine1.target.global_id, 5)
self.assertEqual(link0_machine1.type, LinkType.NVL)
self.assertAlmostEqual(link0_machine1.bandwidth, 42)
self.assertAlmostEqual(link0_machine1.latency, 0)
# device 4, link 1
link1_machine1 = machine1.links[(4, 6)]
self.assertEqual(link1_machine1.source.global_id, 4)
self.assertEqual(link1_machine1.target.global_id, 6)
self.assertEqual(link1_machine1.type, LinkType.PHB)
self.assertAlmostEqual(link1_machine1.bandwidth, 12)
self.assertAlmostEqual(link1_machine1.latency, 0)
# device4, link2
link2_machine1 = machine1.links[(4, 7)]
self.assertEqual(link2_machine1.source.global_id, 4)
self.assertEqual(link2_machine1.target.global_id, 7)
self.assertEqual(link2_machine1.type, LinkType.NET)
self.assertAlmostEqual(link2_machine1.bandwidth, 1)
self.assertAlmostEqual(link2_machine1.latency, 0)
# device5
device5_machine1 = machine1.devices[5]
self.assertEqual(device5_machine1.global_id, 5)
self.assertEqual(device5_machine1.local_id, 1)
self.assertEqual(device5_machine1.type, DeviceType.GPU)
self.assertEqual(device4_machine1.model, "Tesla V100-SXM2-32GB")
self.assertAlmostEqual(device4_machine1.sp_gflops, 15700)
self.assertAlmostEqual(device4_machine1.dp_gflops, 7800)
self.assertAlmostEqual(device4_machine1.memory, 32)
# device5, link0
link0_machine1 = machine1.links[(5, 6)]
self.assertEqual(link0_machine1.source.global_id, 5)
self.assertEqual(link0_machine1.target.global_id, 6)
self.assertEqual(link0_machine1.type, LinkType.PHB)
self.assertAlmostEqual(link0_machine1.bandwidth, 12)
self.assertAlmostEqual(link0_machine1.latency, 0)
# device5, link1
link1_machine1 = machine1.links[(5, 7)]
self.assertEqual(link1_machine1.source.global_id, 5)
self.assertEqual(link1_machine1.target.global_id, 7)
self.assertEqual(link1_machine1.type, LinkType.NET)
self.assertAlmostEqual(link1_machine1.bandwidth, 1)
self.assertAlmostEqual(link1_machine1.latency, 0)
# device6
device6_machine1 = machine1.devices[6]
self.assertEqual(device6_machine1.global_id, 6)
self.assertEqual(device6_machine1.local_id, 0)
self.assertEqual(device6_machine1.type, DeviceType.CPU)
self.assertEqual(device6_machine1.model,
"Intel(R) Xeon(R) Gold 6271C CPU @ 2.60G")
self.assertAlmostEqual(device6_machine1.sp_gflops, 150)
self.assertAlmostEqual(device6_machine1.dp_gflops, 75)
self.assertAlmostEqual(device6_machine1.memory, 503)
# device6, link0
link0_machine1 = machine1.links[(6, 7)]
self.assertEqual(link0_machine1.source.global_id, 6)
self.assertEqual(link0_machine1.target.global_id, 7)
self.assertEqual(link0_machine1.type, LinkType.NET)
self.assertAlmostEqual(link0_machine1.bandwidth, 1)
self.assertAlmostEqual(link0_machine1.latency, 0)
# device7
device7_machine1 = machine1.devices[7]
self.assertEqual(device7_machine1.global_id, 7)
self.assertEqual(device7_machine1.local_id, 0)
self.assertEqual(device7_machine1.type, DeviceType.NIC)
self.assertAlmostEqual(device7_machine1.model, None)
self.assertAlmostEqual(device7_machine1.sp_gflops, 0)
self.assertAlmostEqual(device7_machine1.dp_gflops, 0)
self.assertAlmostEqual(device7_machine1.memory, 0)
# device3, link0
link0_machine1 = machine1.links[(7, 3)]
self.assertEqual(link0_machine1.source.global_id, 7)
self.assertEqual(link0_machine1.target.global_id, 3)
self.assertEqual(link0_machine1.type, LinkType.NET)
self.assertAlmostEqual(link0_machine1.bandwidth, 1)
self.assertAlmostEqual(link0_machine1.latency, 0)
str = "cluster: {}".format(cluster)
if __name__ == '__main__':
unittest.main()
|
|
"""
Defines the GridMapper class, which maps from a 2-D region in data space
into a structured (gridded) 1-D output space.
"""
# Python standard library imports
from contextlib import contextmanager
# Major library imports
from numpy import column_stack, transpose
# Enthought library imports
from traits.api import Bool, DelegatesTo, Instance, Float, Property
# Local relative imports
from abstract_mapper import AbstractMapper
from base_1d_mapper import Base1DMapper
from data_range_2d import DataRange2D
from linear_mapper import LinearMapper
from log_mapper import LogMapper
class GridMapper(AbstractMapper):
"""
Maps a 2-D data space to and from screen space by specifying a 2-tuple in
data space or by specifying a pair of screen coordinates.
The mapper concerns itself only with metric and not with orientation. So,
to "flip" a screen space orientation, swap the appropriate screen space
values for **x_low_pos**, **x_high_pos**, **y_low_pos**, and
**y_high_pos**.
"""
# The data-space bounds of the mapper.
range = Instance(DataRange2D)
# The screen space position of the lower bound of the horizontal axis.
x_low_pos = Float(0.0)
# The screen space position of the upper bound of the horizontal axis.
x_high_pos = Float(1.0)
# The screen space position of the lower bound of the vertical axis.
y_low_pos = Float(0.0)
# The screen space position of the upper bound of the vertical axis.
y_high_pos = Float(1.0)
# Convenience property for low and high positions in one structure.
# Must be a tuple (x_low_pos, x_high_pos, y_low_pos, y_high_pos).
screen_bounds = Property
# Should the mapper stretch the dataspace when its screen space bounds are
# modified (default), or should it preserve the screen-to-data ratio and
# resize the data bounds? If the latter, it will only try to preserve
# the ratio if both screen and data space extents are non-zero.
stretch_data_x = DelegatesTo("_xmapper", prefix="stretch_data")
stretch_data_y = DelegatesTo("_ymapper", prefix="stretch_data")
# Should the mapper try to maintain a fixed aspect ratio between x and y
maintain_aspect_ratio = Bool
# The aspect ratio that we wish to maintain
aspect_ratio = Float(1.0)
#------------------------------------------------------------------------
# Private Traits
#------------------------------------------------------------------------
_updating_submappers = Bool(False)
_updating_aspect = Bool(False)
_xmapper = Instance(Base1DMapper)
_ymapper = Instance(Base1DMapper)
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, x_type="linear", y_type="linear", range=None, **kwargs):
# TODO: This is currently an implicit assumption, i.e. that the range
# will be passed in to the constructor. It would be impossible to
# create the xmapper and ymapper otherwise. However, this should be
# changed so that the mappers get created or modified in response to
# the .range attribute changing, instead of requiring the range to
# be passed in at construction time.
self.range = range
if "_xmapper" not in kwargs:
if x_type == "linear":
self._xmapper = LinearMapper(range=self.range.x_range)
elif x_type == "log":
self._xmapper = LogMapper(range=self.range.x_range)
else:
raise ValueError("Invalid x axis type: %s" % x_type)
else:
self._xmapper = kwargs.pop("_xmapper")
if "_ymapper" not in kwargs:
if y_type == "linear":
self._ymapper = LinearMapper(range=self.range.y_range)
elif y_type == "log":
self._ymapper = LogMapper(range=self.range.y_range)
else:
raise ValueError("Invalid y axis type: %s" % y_type)
else:
self._ymapper = kwargs.pop("_ymapper")
# Now that the mappers are created, we can go to the normal HasTraits
# constructor, which might set values that depend on us having a valid
# range and mappers.
super(GridMapper, self).__init__(**kwargs)
def map_screen(self, data_pts):
""" map_screen(data_pts) -> screen_array
Maps values from data space into screen space.
"""
xs, ys = transpose(data_pts)
screen_xs = self._xmapper.map_screen(xs)
screen_ys = self._ymapper.map_screen(ys)
screen_pts = column_stack([screen_xs, screen_ys])
return screen_pts
def map_data(self, screen_pts):
""" map_data(screen_pts) -> data_vals
Maps values from screen space into data space.
"""
screen_xs, screen_ys = transpose(screen_pts)
xs = self._xmapper.map_data(screen_xs)
ys = self._ymapper.map_data(screen_ys)
data_pts = column_stack([xs, ys])
return data_pts
def map_data_array(self, screen_pts):
return self.map_data(screen_pts)
#------------------------------------------------------------------------
# Private Methods
#------------------------------------------------------------------------
def _update_bounds(self):
with self._update_submappers():
self._xmapper.screen_bounds = (self.x_low_pos, self.x_high_pos)
self._ymapper.screen_bounds = (self.y_low_pos, self.y_high_pos)
self.updated = True
def _update_range(self):
self.updated = True
def _update_aspect_x(self):
y_width = self._ymapper.high_pos - self._ymapper.low_pos
if y_width == 0:
return
y_scale = (self._ymapper.range.high - self._ymapper.range.low)/y_width
x_range_low = self._xmapper.range.low
x_width = self._xmapper.high_pos - self._xmapper.low_pos
sign = self._xmapper.sign * self._ymapper.sign
if x_width == 0 or sign == 0:
return
x_scale = sign*y_scale/self.aspect_ratio
with self._update_aspect():
self._xmapper.range.set_bounds(x_range_low, x_range_low +
x_scale*x_width)
def _update_aspect_y(self):
x_width = self._xmapper.high_pos - self._xmapper.low_pos
if x_width == 0:
return
x_scale = (self._xmapper.range.high - self._xmapper.range.low)/x_width
y_range_low = self._ymapper.range.low
y_width = self._ymapper.high_pos-self._ymapper.low_pos
sign = self._xmapper.sign * self._ymapper.sign
if y_width == 0 or sign == 0:
return
y_scale = sign*x_scale*self.aspect_ratio
with self._update_aspect():
self._ymapper.range.set_bounds(y_range_low, y_range_low +
y_scale*y_width)
#------------------------------------------------------------------------
# Property handlers
#------------------------------------------------------------------------
def _range_changed(self, old, new):
if old is not None:
old.on_trait_change(self._update_range, "updated", remove=True)
if new is not None:
new.on_trait_change(self._update_range, "updated")
if self._xmapper is not None:
self._xmapper.range = new.x_range
if self._ymapper is not None:
self._ymapper.range = new.y_range
self._update_range()
def _x_low_pos_changed(self):
self._xmapper.low_pos = self.x_low_pos
def _x_high_pos_changed(self):
self._xmapper.high_pos = self.x_high_pos
def _y_low_pos_changed(self):
self._ymapper.low_pos = self.y_low_pos
def _y_high_pos_changed(self):
self._ymapper.high_pos = self.y_high_pos
def _set_screen_bounds(self, new_bounds):
# TODO: figure out a way to not need to do this check:
if self.screen_bounds == new_bounds:
return
self.set(x_low_pos=new_bounds[0], trait_change_notify=False)
self.set(x_high_pos=new_bounds[1], trait_change_notify=False)
self.set(y_low_pos=new_bounds[2], trait_change_notify=False)
self.set(y_high_pos=new_bounds[3], trait_change_notify=False)
self._update_bounds()
def _get_screen_bounds(self):
return (self.x_low_pos, self.x_high_pos,
self.y_low_pos, self.y_high_pos)
def _updated_fired_for__xmapper(self):
if not self._updating_aspect:
if self.maintain_aspect_ratio and self.stretch_data_x:
self._update_aspect_y()
if not self._updating_submappers:
self.updated = True
def _updated_fired_for__ymapper(self):
if not self._updating_aspect:
if self.maintain_aspect_ratio and self.stretch_data_y:
self._update_aspect_x()
if not self._updating_submappers:
self.updated = True
@contextmanager
def _update_submappers(self):
self._updating_submappers = True
try:
yield
finally:
self._updating_submappers = False
@contextmanager
def _update_aspect(self):
self._updating_aspect = True
try:
yield
finally:
self._updating_aspect = False
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Molcas Output Parser
#####################
Multiple frames are not currently supported
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import six
import pandas as pd
from pandas.io.parsers import ParserError
import numpy as np
from six import StringIO
from exa import TypedMeta
import exatomic
from .editor import Editor
from exatomic import Atom
from exatomic.algorithms.numerical import _flat_square_to_triangle, _square_indices
from exatomic.algorithms.basis import lmap, spher_lml_count
from exatomic.core.basis import (Overlap, BasisSet, BasisSetOrder,
deduplicate_basis_sets)
from exatomic.core.orbital import DensityMatrix, MOMatrix, Orbital
from exatomic.base import sym2z, z2sym
class OrbMeta(TypedMeta):
momatrix = MOMatrix
orbital = Orbital
class Orb(six.with_metaclass(OrbMeta, Editor)):
"""
Parser for molcas coefficient matrix dumps (e.g. RasOrb).
Note:
This parser assumes the file contains data from a single
calculation (i.e. a single frame).
"""
def to_universe(self):
raise NotImplementedError("This editor has no parse_atom method.")
def _read_one(self, found, keys, start, stop, osh, old, ret, key):
if not old:
# In order for this not to break if someone decides
# to change the print width format, compute it here
ln = self[start]
width = ln[3:].index(' ') + 3
keys['widths'] = [width] * len(ln.split())
keys['names'] = range(len(keys['widths']))
df = pd.read_fwf(StringIO('\n'.join(self[start:stop])), **keys)
if not osh:
ret.update({key: df.stack().dropna().values})
else: # Clean for open shell calcs
rm = found[1] - found[0] - 1
df.drop([rm - 1, rm], inplace=True)
df[0] = df[0].astype(np.float64)
df = df.stack().dropna().astype(np.float64).values
ret.update({key: df[:len(df)//2],
key+'1': df[len(df)//2:]})
def parse_momatrix(self):
_re_orb = 'ORBITAL'
_re_occ = 'OCCUPATION NUMBERS'
_re_ens = 'ONE ELECTRON ENERGIES'
_re_osh = '#UORB'
_re_idx = '#INDEX'
_re_hmn = 'human'
found = self.find(_re_orb, _re_occ, _re_ens,
_re_osh, _re_idx, keys_only=True)
found[_re_hmn] = [i for i in found[_re_occ]
if _re_hmn in self[i].lower()]
# Dimensions per irrep
dims = list(map(int, self[5].split()))
ndim = sum(dims)
osh = len(found[_re_osh]) > 0
start = found[_re_orb][0] + 1
stops = found[_re_occ] + found[_re_ens]
stop = min(stops) - 1
# Old file format
old = len(self[start].split()) == 1
# MOMatrix table
widths = [18] * 4 if old else [22] * 5
kws = {'widths': widths, 'names': range(len(widths))}
df = pd.read_fwf(StringIO('\n'.join(self[start:stop])),
widths=widths, names=range(len(widths)))
df.drop(df[df[0].str.contains(_re_orb)].index, inplace=True)
# Orbital occupations
mo, orb = {}, {}
start = found[_re_occ][0] + 1
stops = found[_re_hmn] + found[_re_ens] + found[_re_idx] + [len(self) + 1]
stop = min(stops) - 1
self._read_one(found[_re_occ], kws, start, stop,
osh, old, orb, 'occupation')
# Orbital energies
if found[_re_ens]:
start = found[_re_ens][0] + 1
stop = found[_re_idx][0]
self._read_one(found[_re_ens], kws, start, stop,
osh, old, orb, 'energy')
else:
orb.update({'energy': 0})
# Get all the groupby indices
if len(dims) > 1: # Symmetrized calc.
mo.update({
'irrep': [i for i, d in enumerate(dims) for _ in range(d * d)],
'orbital': [i for d in dims for i in range(d) for _ in range(d)],
'chi': [i for d in dims for _ in range(d) for i in range(d)]})
orb.update({'vector': [j for d in dims for j in range(d)],
'irrep': [i for i, d in enumerate(dims) for _ in range(d)]})
else:
ordx, chidx = _square_indices(ndim)
mo.update({'orbital': ordx, 'chi': chidx})
orb.update({'vector': range(ndim)})
# Unused groupby indices
orb.update({'group': 0, 'spin': 0, 'frame': 0})
mo.update({'frame': 0})
if not osh:
df[0] = df[0].astype(np.float64)
mo.update({'coef': df.stack().dropna().values})
else: # Open shell calc.
off = found[_re_orb][0] + 1
df.drop(found[_re_osh][0] - off, inplace=True)
df[0] = df[0].astype(np.float64)
coef = df.stack().dropna().values
mo.update({'coef': coef[:len(coef)//2],
'coef1': coef[len(coef)//2:]})
self.momatrix = pd.DataFrame.from_dict(mo)
self.orbital = pd.DataFrame.from_dict(orb)
def __init__(self, *args, **kwargs):
super(Orb, self).__init__(*args, **kwargs)
class OutMeta(TypedMeta):
atom = Atom
basis_set = BasisSet
basis_set_order = BasisSetOrder
class Output(six.with_metaclass(OutMeta, Editor)):
def add_orb(self, path, mocoefs='coef', orbocc='occupation'):
"""
Add a MOMatrix and Orbital table to a molcas.Output. If path is
an Editor containing momatrix and orbital tables then adds them
directly, otherwise assumes it is a molcas.Orb file.
Args:
path (str, :class:`exatomic.core.editor.Editor`): path to file or Editor object
mocoefs (str): rename coefficients
orbocc (str): rename occupations
"""
if isinstance(path, exatomic.Editor): orb = path
else: orb = Orb(path)
if mocoefs != 'coef' and orbocc == 'occupation':
orbocc = mocoefs
# MOMatrix
curmo = getattr(self, 'momatrix', None)
if curmo is None:
self.momatrix = orb.momatrix
if mocoefs != 'coef':
self.momatrix.rename(columns={'coef': mocoefs}, inplace=True)
else:
if mocoefs in self.momatrix.columns:
raise ValueError('This action would overwrite '
'coefficients. Specify mocoefs parameter.')
for i, default in enumerate(['coef', 'coef1']):
final = mocoefs + '1' if i else mocoefs
if default in orb.momatrix:
self.momatrix[final] = orb.momatrix[default]
# Orbital
curorb = getattr(self, 'orbital', None)
if curorb is None:
self.orbital = orb.orbital
if orbocc != 'occupation':
self.orbital.rename(columns={'occupation': orbocc}, inplace=True)
else:
if orbocc in self.orbital.columns:
raise ValueError('This action would overwrite '
'occupations. Specify orbocc parameter.')
for i, default in enumerate(['occupation', 'occupation1']):
final = orbocc + '1' if i else orbocc
if default in orb.orbital.columns:
self.orbital[final] = orb.orbital[default]
def add_overlap(self, path):
try: # If it's an ASCII text file
self.overlap = Overlap.from_column(path)
except ParserError: # If it's an HDF5 file
hdf = HDF(path)
if 'DESYM_CENTER_CHARGES' not in hdf._hdf:
self.overlap = hdf.overlap
return
if 'irrep' not in self.momatrix:
raise Exception("Trying to set symmetrized overlap with "
"desymmetrized MOMatrix data.")
ovl = pd.DataFrame(np.array(hdf._hdf['AO_OVERLAP_MATRIX']),
columns=('coef',))
ovl['irrep'] = self.momatrix['irrep']
ovl['chi0'] = self.momatrix['chi']
ovl['chi1'] = self.momatrix['orbital']
ovl['frame'] = 0
self.overlap = ovl
def _check_atom_sym(self):
"""Parses a less accurate atom list to check for symmetry."""
_re_sym = 'Cartesian Coordinates / Bohr, Angstrom'
start = self.find(_re_sym, keys_only=True)[0] + 4
cols = ['center', 'tag', 'x', 'y', 'z', 'd1', 'd2', 'd3']
stop = start
while len(self[stop].split()) == len(cols): stop += 1
atom = self.pandas_dataframe(start, stop, cols)
atom.drop(['d1', 'd2', 'd3'], axis=1, inplace=True)
atom['symbol'] = atom['tag'].str.extract(
'([A-z]{1,})([0-9]*)', expand=False)[0].str.lower().str.title()
atom['frame'] = 0
atom['center'] -= 1
return atom
def parse_atom(self):
"""Parses the atom list generated in SEWARD."""
_re_atom0 = 'Label Cartesian Coordinates'
_re_atom1 = 'Center Label'
found = self.find(_re_atom0, _re_atom1, keys_only=True)
if found[_re_atom0]:
accurate = True
starts = [i + 2 for i in found[_re_atom0]]
else:
accurate = False
starts = [i + 1 for i in found[_re_atom1]]
stops = starts[:] # Copy the list
for i in range(len(stops)):
while len(self[stops[i]].strip().split()) > 3:
stops[i] += 1
if not self[stops[i]].strip(): break
stops[i] -= 1
if accurate:
lns = StringIO('\n'.join([self._lines[i] for j in (range(i, j + 1)
for i, j in zip(starts, stops)) for i in j]))
cols = ['tag', 'x', 'y', 'z']
else:
lns = StringIO('\n'.join(self[starts[0]:stops[0] + 1]))
cols = ['center', 'tag', 'x', 'y', 'z', 'xa', 'ya', 'za']
atom = pd.read_csv(lns, delim_whitespace=True,
names=cols)
if len(cols) == 8:
atom.drop(['xa', 'ya', 'za'], axis=1, inplace=True)
atom['symbol'] = atom['tag'].str.extract(
'([A-z]{1,})([0-9]*)', expand=False)[0].str.lower().str.title()
atom['Z'] = atom['symbol'].map(sym2z).astype(np.int64)
atom['center'] = range(atom.shape[0])
atom['frame'] = 0
self.atom = atom
# Work-around for symmetrized calcs?
allatom = self._check_atom_sym()
self.meta['symmetrized'] = False
if allatom.shape[0] > self.atom.shape[0]:
self.atom = allatom
self.meta['symmetrized'] = True
try:
self.atom['utag'] = _add_unique_tags(self.atom)
except ValueError:
pass
def parse_basis_set_order(self):
"""
Parses the shell ordering scheme if BSSHOW specified in SEWARD.
"""
_re_bas_order = 'Basis Label Type Center'
starts = [i + 1 for i in self.find(_re_bas_order, keys_only=True)]
lines, irreps, vecs, vec, nsym = [], [], [], 0, 0
for i, start in enumerate(starts):
stop = start
while self[stop].strip():
lines.append(stop)
irreps.append(i)
vecs.append(vec)
nsym = max(nsym, len(self[stop].split()))
stop += 1
vec += 1
lines.append(stop)
vec = 0
symcols = [('ocen{}'.format(i), 'sign{}'.format(i))
for i in range((nsym - 5) // 2)]
if symcols: self.meta['symmetrized'] = True
cols = ['idx', 'tag', 'type', 'center', 'phase'] + \
[c for t in symcols for c in t]
df = pd.read_csv(StringIO('\n'.join(self[i] for i in lines)),
delim_whitespace=True, names=cols)
# Symmetrized basis functions
for col in df.columns:
if col.startswith('ocen'):
df[col] = df[col].fillna(0.).astype(np.int64) - 1
# Extra info for symmetrized calcs
df['irrep'] = irreps
df['vector'] = vecs
df.drop(['idx'], inplace=True, axis=1)
df['frame'] = 0
# 0-based indexing
df['center'] -= 1
# Clean up (L, ml) values to ints
df['L'] = df['type'].str[1].map(lmap)
df['ml'] = df['type'].str[2:]
df['ml'].update(df['ml'].map({'': 0, 'x': 1, 'y': -1, 'z': 0}))
df['ml'].update(df['ml'].str[::-1])
df['ml'] = df['ml'].astype(np.int64)
# Seward and gateway may each print basis info.
# See if this happened and if so, keep only the first half.
try: # Apparently BSSHOW doesn't always print the basis set.
if 'set' not in self.atom.columns:
self.parse_basis_set()
except ValueError:
self.basis_set_order = df
return
bs = self.basis_set
sp = self.meta['spherical']
nbf = self.atom.set.map(bs.functions(sp).groupby('set').sum()).sum()
if df.shape[0] > nbf:
df = df.loc[:nbf - 1]
irreps = irreps[:nbf]
vecs = vecs[:nbf]
self.basis_set_order = df
shls = []
grps = df.groupby(['irrep', 'center', 'L', 'ml'])
for (_, cen, L, ml), grp in grps:
shl = 0
for _ in grp.index:
shls.append(shl)
shl += 1
self.basis_set_order['shell'] = shls
def parse_basis_set(self):
"""Parses the primitive exponents, coefficients and
shell if BSSHOW specified in SEWARD."""
_re_bas_0 = 'Shell nPrim nBasis Cartesian Spherical Contaminant'
_re_bas_1 = 'Label Cartesian Coordinates / Bohr'
_re_bas_2 = 'No. Exponent Contraction Coefficients'
found = self.find(_re_bas_0, _re_bas_1, _re_bas_2, keys_only=True)
bmaps = [i + 1 for i in found[_re_bas_0]]
atoms = [i + 2 for i in found[_re_bas_1]]
alphs = [i + 1 for i in found[_re_bas_2]]
widths = [11, 7, 8, 11, 10, 12]
names = _re_bas_0.split()
setmap, basmap = {}, []
for seht, (start, atst) in enumerate(zip(bmaps, atoms)):
stop = start
while self[stop].strip(): stop += 1
while self[atst].strip():
setmap[self[atst].split()[0]] = seht
atst += 1
basmap.append(pd.read_fwf(StringIO('\n'.join(self[start:stop])),
widths=widths, header=None, names=names))
basmap[-1]['set'] = seht
self.atom['set'] = self.atom['tag'].map(setmap)
basmap = pd.concat(basmap).reset_index(drop=True)
basmap['Shell'] = basmap['Shell'].map(lmap)
prims, pset, shell = [], 0, 0
for start, seht, L, nprim, nbas in zip(alphs, basmap['set'], basmap['Shell'],
basmap['nPrim'], basmap['nBasis']):
if pset != seht: shell = 0
# In case contraction coefficients overflow to next line
nmatch = len(self[start].split())
neat = nmatch == len(self[start + 1].split())
if neat:
block = self.pandas_dataframe(start, start + nprim, nbas + 2)
else:
# Extra obfuscation to handle exotic cases
ext = 1
while nmatch != len(self[start + ext].split()):
ext += 1
stop = start + ext * nprim
collated = [''.join(self[start + i * ext : start + i * ext + ext])
for i in range(nprim)]
ncols = len(collated[0].split())
block = pd.read_csv(StringIO('\n'.join(collated)),
delim_whitespace=True, names=range(ncols))
alps = (pd.concat([block[1]] * nbas).reset_index(drop=True)
.str.replace('D', 'E').astype(np.float64))
ds = block[list(range(2, nbas + 2))].unstack().reset_index(drop=True)
pdf = pd.concat([alps, ds], axis=1)
pdf.columns = ['alpha', 'd']
pdf['L'] = L
pdf['shell'] = np.repeat(range(shell, shell + nbas), nprim)
pdf['set'] = seht
prims.append(pdf)
shell += nbas
pset = seht
prims = pd.concat(prims).reset_index(drop=True)
prims['frame'] = 0
self.basis_set = prims
self.meta['spherical'] = True
if self.basis_set.lmax < 2:
self.meta['spherical'] = False
def __init__(self, *args, **kwargs):
super(Output, self).__init__(*args, **kwargs)
class HDFMeta(TypedMeta):
atom = Atom
overlap = Overlap
orbital = Orbital
momatrix = MOMatrix
basis_set = BasisSet
basis_set_order = BasisSetOrder
class HDF(six.with_metaclass(HDFMeta, object)):
_getter_prefix = 'parse'
_to_universe = Editor._to_universe
def to_universe(self):
return self._to_universe()
def parse_atom(self):
ztag = 'CENTER_CHARGES'
xtag = 'CENTER_COORDINATES'
ltag = 'CENTER_LABELS'
self.meta['symmetrized'] = False
if 'DESYM_CENTER_CHARGES' in self._hdf:
self.meta['symmetrized'] = True
ztag = 'DESYM_' + ztag
xtag = 'DESYM_' + xtag
ltag = 'DESYM_' + ltag
Z = pd.Series(self._hdf[ztag]).astype(np.int64)
xyzs = np.array(self._hdf[xtag])
labs = pd.Series(self._hdf[ltag]).apply(
lambda s: s.decode('utf-8').strip())
self.atom = pd.DataFrame.from_dict({'Z': Z,
'x': xyzs[:, 0],
'y': xyzs[:, 1],
'z': xyzs[:, 2],
'center': range(len(Z)),
'symbol': Z.map(z2sym),
'label': labs,
'frame': 0})
if self.meta['symmetrized']:
symops = {'E': np.array([ 1., 1., 1.]),
'x': np.array([-1., 0., 0.]),
'y': np.array([ 0., -1., 0.]),
'z': np.array([ 0., 0., -1.]),
'xy': np.array([-1., -1., 0.]),
'xz': np.array([-1., 0., -1.]),
'yz': np.array([ 0., -1., -1.]),
'xyz': np.array([-1., -1., -1.])}
self.meta['symops'] = symops
self.atom[['tag', 'symop']] = self.atom['label'].str.extract('(.*):(.*)',
expand=True)
self.atom['tag'] = self.atom['tag'].str.strip()
self.atom['symop'] = self.atom['symop'].str.strip()
try:
self.atom['utag'] = _add_unique_tags(self.atom)
except ValueError:
pass
def parse_basis_set_order(self):
bso = np.array(self._hdf['BASIS_FUNCTION_IDS'])
df = {'center': bso[:, 0] - 1,
'shell': bso[:, 1] - 1,
'L': bso[:, 2],
'frame': 0}
if bso.shape[1] == 4:
df['ml'] = bso[:, 3]
else:
df['l'] = bso[:, 3]
df['m'] = bso[:, 4]
df['n'] = bso[:, 5]
self.basis_set_order = pd.DataFrame.from_dict(df)
def parse_orbital(self):
ens = np.array(self._hdf['MO_ENERGIES'])
self.orbital = pd.DataFrame.from_dict({
'energy': ens, 'vector': range(len(ens)),
'occupation': np.array(self._hdf['MO_OCCUPATIONS']),
'label': pd.Series(self._hdf['MO_TYPEINDICES']).apply(
lambda s: s.decode('utf-8')),
'frame': 0, 'group': 0, 'spin': 0})
def parse_overlap(self):
if 'symmetrized' not in self.meta: self.parse_atom()
key = 'AO_OVERLAP_MATRIX'
if not self.meta['symmetrized']:
self.overlap = Overlap.from_column(
_flat_square_to_triangle(np.array(self._hdf[key])))
else:
print('Symmetrized overlap indices not set correctly.')
self.overlap = Overlap.from_dict({
'coef': np.array(self._hdf[key]),
'chi0': 0, 'chi1': 0, 'frame': 0
})
def parse_basis_set(self):
if 'symmetrized' not in self.meta: self.parse_atom()
bset = np.array(self._hdf['PRIMITIVES'])
idxs = np.array(self._hdf['PRIMITIVE_IDS'])
bset = pd.DataFrame.from_dict({
'alpha': bset[:, 0], 'd': bset[:, 1],
'center': idxs[:, 0] - 1, 'L': idxs[:, 1],
'shell': idxs[:, 2] - 1
})
self.no_dup = bset
if self.meta['symmetrized']:
self.basis_set, atommap = deduplicate_basis_sets(bset)
self.atom['set'] = self.atom['center'].map(atommap)
else:
self.basis_set = bset.rename(columns={'center': 'set'})
def parse_momatrix(self):
if 'MO_VECTORS' not in self._hdf: return
coefs = np.array(self._hdf['MO_VECTORS'])
try:
symm = np.array(self._hdf['DESYM_MATRIX'])
print('Symmetry not supported on HDF yet.')
return
except KeyError:
dim = np.int64(np.sqrt(coefs.shape[0]))
self.momatrix = pd.DataFrame.from_dict({
'orbital': np.repeat(range(dim), dim), 'frame': 0,
'chi': np.tile(range(dim), dim), 'coef': coefs})
def __getitem__(self, key):
if isinstance(key, str):
return getattr(self, key)
raise KeyError()
def __init__(self, *args, **kwargs):
try:
import h5py
except ImportError:
print("You must install h5py for access to HDF5 utilities.")
return
if not os.path.isfile(args[0]):
print('Argument is likely incorrect file path.')
self._hdf = h5py.File(args[0], 'r')
self.meta = {'gaussian': True, 'program': 'molcas'}
def _add_unique_tags(atom):
"""De-duplicates atom identifier in symmetrized calcs."""
utags = []
for tag in atom['tag']:
utag = tag
while utag in utags:
utag = ''.join(filter(str.isalpha, utag)) + \
str(int(''.join(filter(str.isdigit, utag))) + 1)
utags.append(utag)
return utags
def parse_molcas(fp, momatrix=None, overlap=None, occvec=None, **kwargs):
"""
Will parse a Molcas output file. Optionally it will attempt
to parse additional information obtained from the same directory
from specified Orb files or the AO overlap matrix and density matrix.
If density keyword is specified, the momatrix keyword is ignored.
Args:
fp (str): Path to output file
momatrix (str): file name of the C matrix of interest
overlap (str): file name of the overlap matrix
occvec (str): an occupation vector
Returns:
parsed (Editor): contains many attributes similar to the
exatomic universe
"""
uni = Output(fp, **kwargs)
adir = os.sep.join(fp.split(os.sep)[:-1])
if momatrix is not None:
fp = os.sep.join([adir, momatrix])
if os.path.isfile(fp):
orb = Orb(fp)
uni.momatrix = orb.momatrix
uni.occupation_vector = orb.occupation_vector
occvec = occvec if occvec is not None else orb.occupation_vector
d = DensityMatrix.from_momatrix(orb.momatrix, occvec)
uni.density = d
else:
print('Is {} in the same directory as {}?'.format(momatrix, fp))
if overlap is not None:
fp = os.sep.join([adir, overlap])
if os.path.isfile(fp): uni.overlap = Overlap.from_file(fp)
else: print('Is {} in the same directory as {}?'.format(overlap, fp))
return uni
|
|
from collections import defaultdict
import threading
import traceback
import redis
import grpc
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
import ray._private.profiling as profiling
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.gcs_client = worker.gcs_client
if worker.gcs_pubsub_enabled:
self.subscriber = worker.gcs_function_key_subscriber
self.subscriber.subscribe()
else:
self.subscriber = worker.redis_client.pubsub()
self.subscriber.subscribe(
b"__keyspace@0__:"
+ ray._private.function_manager.make_exports_prefix(
self.worker.current_job_id
)
)
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
# Keep track of the number of imports that we've imported.
self.num_imported = 0
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
try:
self._do_importing()
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
if self.worker.gcs_pubsub_enabled:
key = self.subscriber.poll()
if key is None:
# subscriber has closed.
break
else:
msg = self.subscriber.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
self._do_importing()
except (OSError, redis.exceptions.ConnectionError, grpc.RpcError) as e:
logger.error(f"ImportThread: {e}")
finally:
# Close the Redis / GCS subscriber to avoid leaking file
# descriptors.
self.subscriber.close()
def _do_importing(self):
while True:
export_key = ray._private.function_manager.make_export_key(
self.num_imported + 1, self.worker.current_job_id
)
key = self.gcs_client.internal_kv_get(
export_key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE
)
if key is not None:
self._process_key(key)
self.num_imported += 1
else:
break
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = self._internal_kv_multiget(
key, ["collision_identifier", "function_name"]
)
return (
collision_identifier,
ray._private.utils.decode(function_name.encode()),
"remote function",
)
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self._internal_kv_multiget(
key, ["collision_identifier", "class_name"]
)
return (
collision_identifier,
ray._private.utils.decode(class_name.encode()),
"actor",
)
def _process_key(self, key):
"""Process the given export key from redis."""
if self.mode != ray.WORKER_MODE:
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
if key.startswith(b"RemoteFunction") or key.startswith(b"ActorClass"):
(
collision_identifier,
name,
import_type,
) = self._get_import_info_for_collision_detection(key)
self.imported_collision_identifiers[collision_identifier] += 1
if (
self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD
):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.",
import_type,
name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD,
)
if key.startswith(b"RemoteFunction:"):
# TODO (Alex): There's a race condition here if the worker is
# shutdown before the function finished registering (because core
# worker's global worker is unset before shutdown and is needed
# for profiling).
# with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun:"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass:"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
with self.worker.function_actor_manager.cv:
# Function manager may be waiting on actor class to be
# loaded for deserialization, notify it to wake up and
# check if the actor class it was looking for is loaded
self.worker.function_actor_manager.cv.notify_all()
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
assert False, "This code should be unreachable."
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function) = self._internal_kv_multiget(
key, ["job_id", "function"]
)
if self.worker.mode == ray.SCRIPT_MODE:
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
ray._private.utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id),
)
def _internal_kv_multiget(self, key, fields):
vals = self.gcs_client.internal_kv_get(
key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE
)
if vals is None:
vals = {}
else:
vals = pickle.loads(vals)
return (vals.get(field) for field in fields)
|
|
# -*- coding: utf-8 -*-
# :coding=utf-8:
# base settings - imported by other settings files, then overridden
import os.path
import posixpath
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
def env_or_default(NAME, default):
return os.environ.get(NAME, default)
# Top level of our source / repository
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# tells Pinax to serve media through the staticfiles app.
SERVE_MEDIA = DEBUG
# django-compressor is turned off by default due to deployment overhead for
# most users. See <URL> for more information
COMPRESS = False
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": env_or_default("DB_NAME", "sotmjp2015"),
"USER": env_or_default("DB_USER", ""),
"PASSWORD": env_or_default("DB_PASSWORD", ""),
"HOST": env_or_default("DB_HOST", ""),
"PORT": env_or_default("DB_PORT", ""),
}
}
INTERNAL_IPS = [
"127.0.0.1",
]
ADMINS = [
# ("Your Name", "your_email@domain.com"),
]
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Tokyo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "ja-jp"
SITE_ID = 1
# Conference ID and any URL prefixes
CONFERENCE_ID = 2
CONFERENCE_URL_PREFIXES = {
1: "2014",
2: "2015",
}
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('fr', gettext('French')),
('ja', gettext('Japanese')),
)
LOCALE_PATHS = [os.path.join(PROJECT_ROOT, "locale")]
# Absolute path to the directory that holds media - this is files uploaded
# by users, such as attachments.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = env_or_default("MEDIA_ROOT",
os.path.join(PROJECT_ROOT, "site_media", "media"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = "/%s/site_media/media/" % CONFERENCE_URL_PREFIXES[CONFERENCE_ID]
# Absolute path to the directory where static files will be gathered
# at deploy time and served from in production. Should NOT be
# in version control, or contain anything before deploying.
STATIC_ROOT = os.path.join(PROJECT_ROOT, "site_media", "static")
# URL that handles the static files like app media.
# Example: "http://media.lawrence.com"
STATIC_URL = "/%s/site_media/static/" % CONFERENCE_URL_PREFIXES[CONFERENCE_ID]
# Additional directories which hold static files
STATICFILES_DIRS = []
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"compressor.finders.CompressorFinder",
]
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = posixpath.join(STATIC_URL, "admin/")
# Subdirectory of COMPRESS_ROOT to store the cached media files in
COMPRESS_OUTPUT_DIR = ""
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
# XXX WARN:
# LocaleMiddleware must follow session middleware, auth middleware,
# and and cache middleware, and precede commonmiddleware
#
# XXX: you can add debug_toolbar
# "debug_toolbar.middleware.DebugToolbarMiddleware",
MIDDLEWARE_CLASSES = [
"djangosecure.middleware.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.locale.LocaleMiddleware",
"account.middleware.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"reversion.middleware.RevisionMiddleware",
]
ROOT_URLCONF = 'sotmjp.urls'
TEMPLATE_DIRS = [
os.path.join(PROJECT_ROOT, "sotmjp/templates"),
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"pinax_theme_bootstrap.context_processors.theme",
"account.context_processors.account",
"constance.context_processors.config",
]
INSTALLED_APPS = [
# Django
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.humanize",
# theme
"pinax_theme_bootstrap",
"bootstrapform",
# external
"account",
"compressor",
"timezones",
"metron",
"easy_thumbnails",
"sitetree",
"taggit",
"reversion",
"pinax.blog",
"djangosecure",
"raven.contrib.django",
"constance",
"constance.backends.database",
"redis_cache",
"uni_form",
"gunicorn",
"selectable",
# symposion
"symposion",
"symposion.conference",
"symposion.boxes",
"symposion.speakers",
"symposion.proposals",
"symposion.teams",
"symposion.schedule",
"symposion.sponsorship",
# custom
"markedit",
"sotmjp",
"sotmjp.proposals",
"restcms",
"leaflet",
"osm_field",
]
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
# for debug
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# for production
# EMAIL_BACKEND = "mailer.backend.DbBackend"
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_PASSWORD = 'tester@@abcd'
EMAIL_HOST_USER = 'tester.abcd@gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_VERIFICATION = False
ACCOUNT_EMAIL_AUTHENTICATION = False
ACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False
ACCOUNT_CREATE_ON_SAVE = True
ACCOUNT_EMAIL_CONFIRMATION_EMAIL = False
ACCOUNT_DELETION_EXPUNGE_CALLBACK = 'sotmjp.account.callbacks.account_delete_expunge' # NOQA
AUTHENTICATION_BACKENDS = [
"symposion.teams.backends.TeamPermissionsBackend",
"account.auth_backends.EmailAuthenticationBackend",
'django.contrib.auth.backends.ModelBackend',
]
LOGIN_URL = reverse_lazy("account_login")
ACCOUNT_SIGNUP_REDIRECT_URL = "dashboard"
ACCOUNT_LOGIN_REDIRECT_URL = "dashboard"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_USER_DISPLAY = lambda user: user.get_full_name()
LOGIN_ERROR_URL = reverse_lazy("account_login")
EMAIL_CONFIRMATION_DAYS = 2
EMAIL_DEBUG = DEBUG
DEFAULT_FROM_EMAIL = "SotM JP committee <no-reply@stateofthemap.jp>"
DEBUG_TOOLBAR_CONFIG = {
"INTERCEPT_REDIRECTS": False,
}
CONSTANCE_BACKEND = "constance.backends.database.DatabaseBackend"
CONSTANCE_CONFIG = {
# "SETTING_NAME": (default_value, "help text")
"REGISTRATION_URL": ("", _("URL for registration")),
"SPONSOR_FROM_EMAIL": ("", _("From address for emails to sponsors")),
"REGISTRATION_STATUS": ("", _("Used in the home page template."
" Valid values are 'soon',"
" 'open', 'closed' and 'over'")),
"CONFERENCE_NAME": ("State of the Map Japan",
_("Conference name (long)")),
"CONFERENCE_YEAR": ("2015", _("Conference year (4-digit)")),
"CONFERENCE_YEAR_SHORT": ("15", _("Conference year (2-digit)")),
"CONFERENCE_NAME_SHORT": ("SotM JP 15", _("Conference name (short)")),
"CONFERENCE_LOCALITY": ("Hamamatsu", _("Conference locality place")),
"CONFERENCE_COUNTRY": ("Japan", _("Conference locality country")),
"CONFERENCE_START_DATE": ("2015-10-31", _("Conference start date")),
"CONFERENCE_END_DATE": ("2015-10-31", _("Conference end date")),
"CONFERENCE_DURATION": ("2015-10-31 ~ 31", _("Conference duration")),
"CONFERENCE_DAYS": (1, _("How much days for conference(day number)?")),
"HAVE_TUTORIAL": ("No", _("Conference has tutorial program?(Yes/No)")),
"TUTORIAL_NAME": ("", _("Tutorial program name")),
"TUTORIAL_START_DATE": ("", _("Tutorial start date")),
"TUTORIAL_END_DATE": ("", _("Tutorial end date")),
"TUTORIAL_DURATION": ("", _("Tutorail duration")),
"HAVE_HACKATHON": ("No", _("Conference has hackathon program?(Yes/No)")),
"HACKATHON_NAME": ("", _("Hackathon program name")),
"HACKATHON_START_DATE": ("", _("Hackathon start date")),
"HACKATHON_END_DATE": ("", _("Hackathon end date")),
"HACKATHON_DURATION": ("", _("Hackathon duration")),
"GOOGLE_SITE_VERIFICATION_CODE": ("", _("Google site verification code")),
}
SYMPOSION_PAGE_REGEX = r"(([\w-]{1,})(/[\w-]{1,})*)/$"
PROPOSAL_FORMS = {
"talk": "sotmjp.proposals.forms.TalkProposalForm",
"poster": "sotmjp.proposals.forms.PosterProposalForm",
"lightning-talk": "sotmjp.proposals.forms.LightningTalkProposalForm",
"open-space": "sotmjp.proposals.forms.OpenSpaceProposalForm",
}
USE_X_ACCEL_REDIRECT = False
MARKEDIT_DEFAULT_SETTINGS = {
'preview': 'below',
'toolbar': {
'backgroundMode': 'dark',
}
}
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Is somebody clobbering this? We shouldn't have to set it ourselves,
# but if we don't, gunicorn's django_wsgi blows up trying to configure
# logging with an empty dictionary.
from django.utils.log import DEFAULT_LOGGING
LOGGING = DEFAULT_LOGGING
# Add config for Google Analytics
CONSTANCE_CONFIG["GOOGLE_ANALYTICS_TRACKING_ID"] = (
"", "The site's Google Analytics Tracking ID.")
# Add config for leaflet
LEAFLET_CONFIG = {
'TILES': 'http://tile.openstreetmap.jp/{z}/{x}/{y}.png',
'SPATIAL_EXTENT': (137.73265600204468, 34.701594850527215, 137.74016618728638, 34.7095329004514),
'DEFAULT_CENTER': (34.701594850527215, 137.73265600204468),
'DEFAULT_ZOOM': 17,
'MIN_ZOOM': 15,
'MAX_ZOOM': 19,
}
# Reorder admin screen
ADMIN_REORDER = (
("blog", ("posts", "images")),
("auth", ("groups", "users")),
("symposion_speakers", ("speaker")),
("teams", ("team", "membership")),
("proposals", ("talkproposal", "lightningtalkproposal")),
("symposion_schedule", ("schedule", "day", "room", "slotkind", "slotroom", "slot", "presentation")),
("symposion_sponsorship", ("sponsor", "sponsorlevel", "sponsorbenefit", "benefit")),
("account", ("accounts", "email")),
("constance", ("config")),
("sitetree", ("tree")),
("sites", ("sites")),
("boxes", ("boxes")),
("restcms", ("pages", "files")),
("symposion_proposals", ("proposalkind", "proposalsection")),
("symposion_conference", ("conference", "section")),
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Statement AST Node in TVM.
User do not need to deal with AST node directly.
But they can be helpful for developer to do quick proptyping.
While not displayed in the document and python file.
Each statement node have subfields that can be visited from python side.
.. code-block:: python
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
"""
from __future__ import absolute_import as _abs
from ._ffi.node import NodeBase, register_node
from . import make as _make
class Stmt(NodeBase):
pass
@register_node
class LetStmt(Stmt):
"""LetStmt node.
Parameters
----------
var : Var
The variable in the binding.
value : Expr
The value in to be binded.
body : Stmt
The body statement.
"""
def __init__(self, var, value, body):
self.__init_handle_by_constructor__(
_make.LetStmt, var, value, body)
@register_node
class AssertStmt(Stmt):
"""AssertStmt node.
Parameters
----------
condition : Expr
The assert condition.
message : Expr
The error message.
body : Stmt
The body statement.
"""
def __init__(self, condition, message, body):
self.__init_handle_by_constructor__(
_make.AssertStmt, condition, message, body)
@register_node
class ProducerConsumer(Stmt):
"""ProducerConsumer node.
Parameters
----------
func : Operation
The Operation.
is_producer : bool
Whether if the node is producer.
body : Stmt
The body statement.
"""
def __init__(self, func, is_producer, body):
self.__init_handle_by_constructor__(
_make.ProducerConsumer, func, is_producer, body)
@register_node
class For(Stmt):
"""For node.
Parameters
----------
loop_var : Var
The loop variable.
min_val : Expr
The begining value.
extent : Expr
The length of the loop.
for_type : int
The for type.
device_api : int
The device api type.
body : Stmt
The body statement.
"""
Serial = 0
Parallel = 1
Vectorized = 2
Unrolled = 3
def __init__(self,
loop_var,
min_val,
extent,
for_type,
device_api,
body):
self.__init_handle_by_constructor__(
_make.For, loop_var, min_val, extent,
for_type, device_api, body)
@register_node
class Store(Stmt):
"""Store node.
Parameters
----------
buffer_var : Var
The buffer Variable.
value : Expr
The value we want to store.
index : Expr
The index in the store expression.
predicate : Expr
The store predicate.
"""
def __init__(self, buffer_var, value, index, predicate):
self.__init_handle_by_constructor__(
_make.Store, buffer_var, value, index, predicate)
@register_node
class Provide(Stmt):
"""Provide node.
Parameters
----------
func : Operation
The operation to create the function.
value_index : int
The output value index
value : Expr
The value to be stored.
args : list of Expr
The index arguments of the Provide.
"""
def __init__(self, func, value_index, value, args):
self.__init_handle_by_constructor__(
_make.Provide, func, value_index, value, args)
@register_node
class Allocate(Stmt):
"""Allocate node.
Parameters
----------
buffer_var : Var
The buffer variable.
dtype : str
The data type of the buffer.
extents : list of Expr
The extents of the allocate
condition : Expr
The condition.
body : Stmt
The body statement.
"""
def __init__(self,
buffer_var,
dtype,
extents,
condition,
body):
self.__init_handle_by_constructor__(
_make.Allocate, buffer_var, dtype,
extents, condition, body)
@register_node
class AttrStmt(Stmt):
"""AttrStmt node.
Parameters
----------
node : Node
The node to annotate the attribute
attr_key : str
Attribute type key.
value : Expr
The value of the attribute
body : Stmt
The body statement.
"""
def __init__(self, node, attr_key, value, body):
self.__init_handle_by_constructor__(
_make.AttrStmt, node, attr_key, value, body)
@register_node
class Free(Stmt):
"""Free node.
Parameters
----------
buffer_var : Var
The buffer variable.
"""
def __init__(self, buffer_var):
self.__init_handle_by_constructor__(
_make.Free, buffer_var)
@register_node
class Realize(Stmt):
"""Realize node.
Parameters
----------
func : Operation
The operation to create the function.
value_index : int
The output value index
dtype : str
The data type of the operation.
bounds : list of range
The bound of realize
condition : Expr
The realize condition.
body : Stmt
The realize body
"""
def __init__(self,
func,
value_index,
dtype,
bounds,
condition,
body):
self.__init_handle_by_constructor__(
_make.Realize, func, value_index, dtype,
bounds, condition, body)
@register_node
class Block(Stmt):
"""Block node.
Parameters
----------
first : Stmt
The first statement.
rest : Stmt
The following statement.
"""
def __init__(self, first, rest):
self.__init_handle_by_constructor__(
_make.Block, first, rest)
@register_node
class IfThenElse(Stmt):
"""IfThenElse node.
Parameters
----------
condition : Expr
The expression
then_case : Stmt
The statement to execute if condition is true.
else_case : Stmt
The statement to execute if condition is false.
"""
def __init__(self, condition, then_case, else_case):
self.__init_handle_by_constructor__(
_make.IfThenElse, condition, then_case, else_case)
@register_node
class Evaluate(Stmt):
"""Evaluate node.
Parameters
----------
value : Expr
The expression to be evalued.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(
_make.Evaluate, value)
@register_node
class Prefetch(Stmt):
"""Prefetch node.
Parameters
----------
func : Operation
The operation to create the function.
value_index : int
The output value index
dtype : str
The data type to be prefetched.
bounds : list of Range
The bounds to be prefetched.
"""
def __init__(self, func, value_index, dtype, bounds):
self.__init_handle_by_constructor__(
_make.Prefetch, func, value_index, dtype, bounds)
def stmt_seq(*args):
"""Make sequence of statements
Parameters
----------
args : list of Expr or Var
List of statements to be combined as sequence.
Returns
-------
stmt : Stmt
The combined statement.
"""
ret = None
for value in args:
if not isinstance(value, Stmt):
value = Evaluate(value)
ret = value if ret is None else Block(ret, value)
return ret if ret else Evaluate(0)
def stmt_list(stmt):
"""Make list of stmt from blocks.
Parameters
----------
stmt : A block statement
Returns
-------
stmt_list : list of Stmt
The unpacked list of statements
"""
if isinstance(stmt, Block):
return stmt_list(stmt.first) + stmt_list(stmt.rest)
if isinstance(stmt, ProducerConsumer):
return stmt_list(stmt.body)
return [stmt]
_make.stmt_list = stmt_list
_make.stmt_seq = stmt_seq
|
|
import imghdr
import json
import logging
from django.conf import settings
from django.db.models import Q
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponseBadRequest, Http404)
from django.shortcuts import get_object_or_404, render
from django.views.decorators.clickjacking import xframe_options_sameorigin
from django.views.decorators.http import require_POST
from tower import ugettext as _
from kitsune.access.decorators import login_required
from kitsune.gallery import ITEMS_PER_PAGE
from kitsune.gallery.forms import ImageForm
from kitsune.gallery.models import Image, Video
from kitsune.gallery.utils import upload_image, check_media_permissions
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import paginate
from kitsune.upload.tasks import compress_image, generate_thumbnail
from kitsune.upload.utils import FileTooLargeError
from kitsune.wiki.tasks import schedule_rebuild_kb
log = logging.getLogger('k.gallery')
def gallery(request, media_type='image'):
"""The media gallery.
Filter can be set to 'images' or 'videos'.
"""
if media_type == 'image':
media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
elif media_type == 'video':
media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
else:
raise Http404
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
drafts = _get_drafts(request.user)
image = drafts['image'][0] if drafts['image'] else None
image_form = _init_media_form(ImageForm, request, image)
if request.method == 'POST':
image_form.is_valid()
return render(request, 'gallery/gallery.html', {
'media': media,
'media_type': media_type,
'image_form': image_form,
'submitted': request.method == 'POST'})
@login_required
@require_POST
def upload(request, media_type='image'):
"""Finalizes an uploaded draft."""
drafts = _get_drafts(request.user)
if media_type == 'image' and drafts['image']:
# We're publishing an image draft!
image_form = _init_media_form(ImageForm, request, drafts['image'][0])
if image_form.is_valid():
img = image_form.save(is_draft=None)
generate_thumbnail.delay(img, 'file', 'thumbnail')
compress_image.delay(img, 'file')
# Rebuild KB
schedule_rebuild_kb()
return HttpResponseRedirect(img.get_absolute_url())
else:
return gallery(request, media_type='image')
return HttpResponseBadRequest(u'Unrecognized POST request.')
@login_required
@require_POST
def cancel_draft(request, media_type='image'):
"""Delete an existing draft for the user."""
drafts = _get_drafts(request.user)
if media_type == 'image' and drafts['image']:
drafts['image'].delete()
drafts['image'] = None
else:
msg = _(u'Unrecognized request or nothing to cancel.')
content_type = None
if request.is_ajax():
msg = json.dumps({'status': 'error', 'message': msg})
content_type = 'application/json'
return HttpResponseBadRequest(msg, content_type=content_type)
if request.is_ajax():
return HttpResponse(json.dumps({'status': 'success'}),
content_type='application/json')
return HttpResponseRedirect(reverse('gallery.gallery', args=[media_type]))
def gallery_async(request):
"""AJAX endpoint to media gallery.
Returns an HTML list representation of the media.
"""
# Maybe refactor this into existing views and check request.is_ajax?
media_type = request.GET.get('type', 'image')
term = request.GET.get('q')
media_locale = request.GET.get('locale', settings.WIKI_DEFAULT_LANGUAGE)
if media_type == 'image':
media_qs = Image.objects
elif media_type == 'video':
media_qs = Video.objects
else:
raise Http404
media_qs = media_qs.filter(locale=media_locale)
if term:
media_qs = media_qs.filter(Q(title__icontains=term) |
Q(description__icontains=term))
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
return render(request, 'gallery/includes/media_list.html', {
'media_list': media})
def search(request, media_type):
"""Search the media gallery."""
term = request.GET.get('q')
if not term:
url = reverse('gallery.gallery', args=[media_type])
return HttpResponseRedirect(url)
filter = Q(title__icontains=term) | Q(description__icontains=term)
if media_type == 'image':
media_qs = Image.objects.filter(filter, locale=request.LANGUAGE_CODE)
elif media_type == 'video':
media_qs = Video.objects.filter(filter, locale=request.LANGUAGE_CODE)
else:
raise Http404
media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)
return render(request, 'gallery/search.html', {
'media': media,
'media_type': media_type,
'q': term})
@login_required
def delete_media(request, media_id, media_type='image'):
"""Delete media and redirect to gallery view."""
media, media_format = _get_media_info(media_id, media_type)
check_media_permissions(media, request.user, 'delete')
if request.method == 'GET':
# Render the confirmation page
return render(request, 'gallery/confirm_media_delete.html', {
'media': media,
'media_type': media_type,
'media_format': media_format})
# Handle confirm delete form POST
log.warning('User %s is deleting %s with id=%s' %
(request.user, media_type, media.id))
media.delete()
# Rebuild KB
schedule_rebuild_kb()
return HttpResponseRedirect(reverse('gallery.gallery', args=[media_type]))
@login_required
def edit_media(request, media_id, media_type='image'):
"""Edit media means only changing the description, for now."""
media, media_format = _get_media_info(media_id, media_type)
check_media_permissions(media, request.user, 'change')
if media_type == 'image':
media_form = _init_media_form(ImageForm, request, media,
('locale', 'title'))
else:
raise Http404
if request.method == 'POST' and media_form.is_valid():
media = media_form.save(update_user=request.user)
return HttpResponseRedirect(
reverse('gallery.media', args=[media_type, media_id]))
return render(request, 'gallery/edit_media.html', {
'media': media,
'media_format': media_format,
'form': media_form,
'media_type': media_type})
def media(request, media_id, media_type='image'):
"""The media page."""
media, media_format = _get_media_info(media_id, media_type)
return render(request, 'gallery/media.html', {
'media': media,
'media_format': media_format,
'media_type': media_type})
@login_required
@require_POST
@xframe_options_sameorigin
def upload_async(request, media_type='image'):
"""Upload images or videos from request.FILES."""
# TODO(paul): validate the Submit File on upload modal async
# even better, use JS validation for title length.
try:
if media_type == 'image':
file_info = upload_image(request)
else:
msg = _(u'Unrecognized media type.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': msg}))
except FileTooLargeError as e:
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': e.args[0]}))
if isinstance(file_info, dict) and 'thumbnail_url' in file_info:
schedule_rebuild_kb()
return HttpResponse(
json.dumps({'status': 'success', 'file': file_info}))
message = _(u'Could not upload your image.')
return HttpResponseBadRequest(
json.dumps({'status': 'error',
'message': unicode(message),
'errors': file_info}))
def _get_media_info(media_id, media_type):
"""Returns an image or video along with media format for the image."""
media_format = None
if media_type == 'image':
media = get_object_or_404(Image, pk=media_id)
try:
media_format = imghdr.what(media.file.path)
except UnicodeEncodeError:
pass
elif media_type == 'video':
media = get_object_or_404(Video, pk=media_id)
else:
raise Http404
return (media, media_format)
def _get_drafts(user):
"""Get video and image drafts for a given user."""
drafts = {'image': None, 'video': None}
if user.is_authenticated():
drafts['image'] = Image.objects.filter(creator=user, is_draft=True)
drafts['video'] = Video.objects.filter(creator=user, is_draft=True)
return drafts
def _init_media_form(form_cls, request=None, obj=None,
ignore_fields=()):
"""Initializes the media form with an Image/Video instance and POSTed data.
form_cls is a django ModelForm
Request method must be POST for POST data to be bound.
exclude_fields contains the list of fields to default to their current
value from the Image/Video object.
"""
post_data = None
initial = None
if request:
initial = {'locale': request.LANGUAGE_CODE}
file_data = None
if request.method == 'POST':
file_data = request.FILES
post_data = request.POST.copy()
if obj and ignore_fields:
for f in ignore_fields:
post_data[f] = getattr(obj, f)
return form_cls(post_data, file_data, instance=obj, initial=initial,
is_ajax=False)
|
|
#!/usr/bin/env python
# OpenVirteX control script
# Heavily based on FlowVisor's fvctl
#import local thrift files
import TenantServer
from ttypes import *
#import Thrift packages to connect to server
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
#import python utilities to parse arguments
import sys
from optparse import OptionParser
import urllib2
VERSION = '0.1'
def pa_none(args, cmd):
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=USAGE.format(cmd), description=ldesc)
(options, args) = parser.parse_args(args)
return (options, args)
def pa_createNetwork(args, cmd):
usage = "%s <mac_address> <primary_controller> <ip_range>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createNetwork(gopts, opts, args):
if len(args) != 5:
print "createNetwork : Must specify protocol, controllerIP, controllerPort, networkIP, mask"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
network_id = client.createVirtualNetwork(args[0], args[1], int(args[2]), args[3], int(args[4]))
client._iprot.trans.close()
if network_id:
print "Network has been created (network_id %s)." % str(network_id)
def pa_vlink(args, cmd):
usage = "%s <network_id> <dpid> <ports>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createVLink(gopts, opts, args):
if len(args) != 2:
print "createVLink : Must specify a (network_id, and a path string of all the physicalLinks that create a virtualLink)"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
linkId = client.createVirtualLink(int(args[0]), args[1])
client._iprot.trans.close()
if linkId:
print "Virtual link has been created"
def pa_vswitch(args, cmd):
usage = "%s <network_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createVSwitch(gopts, opts, args):
if len(args) != 2:
print "createVSwitch : Must specify (network_id and dpid,dpid,... - list of physical dpids which are associated with this dpid)"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
dpids = [str(dpid) for dpid in args[1].split(',')]
dpid = client.createVirtualSwitch(int(args[0]), dpids)
client._iprot.trans.close()
if dpid:
print "Virtual switch has been created (dpid %s)" % dpid
def pa_connectHost(args, cmd):
usage = "%s <mac> <dpid> <port>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_connectHost(gopts, opts, args):
if len(args) != 4:
print "connectHost : Must specify a tenantId, dpid, port and MAC address"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
# takes the tenantid, dpid, port, host mac address
port = client.createHost(int(args[0]), args[1], int(args[2]), args[3])
client._iprot.trans.close()
if port:
print "Host has been connected to edge"
def pa_bootNetwork(args, cmd):
usage = "%s <network_id>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_bootNetwork(gopts, opts, args):
if len(args) != 1:
print "bootNetwork : Must specify a network/tenant ID"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
result = client.startNetwork(int(args[0]))
client._iprot.trans.close()
if result:
print "Network has been booted"
def pa_createVSwitchRoute(args, cmd):
usage = "%s <network_id> <dpid> <inPort> <outPort> <route>" % USAGE.format(cmd)
(sdesc, ldesc) = DESCS[cmd]
parser = OptionParser(usage=usage, description=ldesc)
return parser.parse_args(args)
def do_createVSwitchRoute(gopts, opts, args):
if len(args) != 5:
print "createVSwitchRoute : Must specify a tenantID, dpid, ingress virt port, egress virt port, and a string of links"
sys.exit()
client = create_client(gopts.host, int(gopts.port))
route_id = client.createSwitchRoute(int(args[0]), args[1], args[2], args[3], args[4])
client._iprot.trans.close()
if route_id > 0:
print "Switch route has been created (route_id %s)." % str(route_id)
def pa_help(args, cmd):
usage = "%s <cmd>" % USAGE.format(cmd)
parser = OptionParser(usage=usage)
return parser.parse_args(args)
def do_help(gopts, opts, args):
if len(args) != 1:
raise IndexError
try:
(pa, func) = CMDS[args[0]]
pa(['--help'], args[0])
except KeyError, e:
print "Invalid command : %s is an unknown command." % args[0]
sys.exit()
def printVersion (option, opt, value, parser):
"""Print nvctl version and exit"""
print "nvctl-%s" % VERSION
sys.exit()
def printHelp (option, opt, value, parser):
"""Print nvctl help and exit"""
cmds = [x for x in CMDS.iterkeys()]
cmds.remove('help')
cmds.sort()
print(parser.format_help().strip())
print "\n Available commands are: "
for x in cmds:
(sdesc, ldesc) = DESCS[x]
print " {0:25} {1:10}".format(x, sdesc)
print "\n See '%s help <command>' for more info on a specific command." % sys.argv[0]
sys.exit()
CMDS = {
'createNetwork': (pa_createNetwork, do_createNetwork),
'createVSwitch': (pa_vswitch, do_createVSwitch),
'createVLink': (pa_vlink, do_createVLink),
'connectHost': (pa_connectHost, do_connectHost),
'bootNetwork': (pa_bootNetwork, do_bootNetwork),
'createVSwitchRoute':(pa_createVSwitchRoute, do_createVSwitchRoute),
'help' : (pa_help, do_help)
}
DESCS = {
'createNetwork' : ("Creates a virtual network",
("Creates a virtual network. Input: protocol, controllerIP, controller port, ip address, mask ")),
'createVLink' : ("Create virtual link",
("Create virtual link. Must specify a network_id and hops in the physical plane. srcDPID/port-dstDPID/port,srcDPID/port-dstDPID/port")),
'createVSwitch' : ("Create virtual switch",
("Create a virtual switch. Must specify a network_id, and a list of the physicalDPIDs that this contains")),
'connectHost' : ("Connect host to edge switch",
("Connect host to edge switch. Must specify a network_id, mac, dpid and port.")),
'bootNetwork' : ("Boot virtual network",
("Boot virtual network. Must specify a network_id.")),
'createVSwitchRoute' : ("Create a route through a virtual switch",
("Create a route through a virtual switch. Must specify a network_id, dpid, in_port, out_port, and physical link path between the two ports. DPID, and ingress/egress ports are those of the virtual switch. Format of port: DPID/port. Format of path: srcDPID/port-dstDPID/port,srcDPID/port-dstDPID/port")),
}
USAGE="%prog {}"
URL = "http://%s:%s"
def addCommonOpts (parser):
parser.add_option("-h", "--hostname", dest="host", default="localhost",
help="Specify the OpenVirteX host; default='localhost'")
parser.add_option("-p", "--port", dest="port", default="8080",
help="Specify the OpenVirteX web port; default=8080")
parser.add_option("-v", "--version", action="callback", callback=printVersion)
parser.add_option("--help", action="callback", callback=printHelp)
def parse_global_args (arglist):
usage = "%s [options] command [command_args]" % sys.argv[0]
args = []
while (len(arglist) != 0 and arglist[0] not in CMDS):
args.append(arglist[0])
arglist.pop(0)
parser = OptionParser(add_help_option=False, usage=usage)
addCommonOpts(parser)
(opts, pargs) = parser.parse_args(args)
return (opts, arglist, parser)
def create_client(host, port):
#Make socket
transport = TSocket.TSocket(host, port)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = TenantServer.Client(protocol)
# Connect!
transport.open()
return client
if __name__ == '__main__':
try:
(gopts, rargs, parser) = parse_global_args(sys.argv[1:])
if len(rargs) < 1:
printHelp(None, None, None, parser)
(parse_args, do_func) = CMDS[rargs[0]]
(opts, args) = parse_args(rargs[1:], rargs[0])
do_func(gopts, opts, args)
except ValueError, e:
print "the argument types being sent to the function %s are incorrect. Please double check them." % sys.argv[1]
function = sys.argv[1]
if function=='createNetwork':
print "createNetwork: string, string, short, string, short"
elif function=='createVSwitch':
print "createVSwitch: int, list<string>"
elif function=='connectHost':
print "connectHost: int, string, short, string"
elif function=='createVLink':
print "createVLink: int, string"
elif function=='bootNetwork':
print "bootNetwork: int"
elif function=='createSwitchRoute':
print "createVSwitchRoute: int, string, string, string, string"
except IndexError, e:
print "%s is an unknown command" % sys.argv[-1]
printHelp(None, None, None, parser)
|
|
# -- coding: utf-8 --
"""
Spotify Playmaker
:summary: A basic webapp that creates a Spotify playlist using a phrase.
:note: Initially based on selected code from the Flaskr demo app.
:author: Jonathan Sedar
:contact: jon.sedar\@gmail.com
:copyright: 2012 by Jon Sedar and Flaskr is (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
## Libraries ==================================================================
import os
from flask import Flask, request, session, g, redirect, url_for, abort \
,render_template, flash, make_response
from flask.ext.sqlalchemy import SQLAlchemy
from app_utilities import pygeoip, UserInput, TracklistCreator, QueryBuilder\
,SpotifyCaller, CustomError
from sqlalchemy import desc, func
from collections import namedtuple
# Deployment-invariant settings
API_URL = 'http://ws.spotify.com/search/1/track'
GEO_DB = 'geo_data/GeoIP.dat'
# Settings from config
SECRET_KEY = str(os.environ.get('SECRET_KEY'))
SQLALCHEMY_DATABASE_URI = str(os.environ.get('DATABASE_URL'))
MAX_TRACKS_PER_DOC = int(os.environ.get('MAX_TRACKS_PER_DOC')) #3
SHOW_N_MOST_RECENT_POEMS = int(os.environ.get('SHOW_N_MOST_RECENT_POEMS')) #10
# TODO: implement a users table and OAuth
DEFAULT_USERNAME = str(os.environ.get('DEFAULT_USERNAME'))
DEFAULT_PASSWORD = str(os.environ.get('DEFAULT_PASSWORD'))
## Setup ======================================================================
# Create and configure the app
app = Flask(__name__)
app.config.from_object(__name__)
hostIPaddress = '0.0.0.0'
gi = pygeoip.GeoIP(GEO_DB, pygeoip.MEMORY_CACHE)
spotifyCaller = SpotifyCaller(API_URL)
# Local-specific changes
if str(os.environ.get('PROD')) == 'False':
app.config['SQLALCHEMY_ECHO'] = True
app.debug = True
hostIPaddress = '127.0.0.1'
# Create you a database and models
db = SQLAlchemy(app)
class Poem(db.Model):
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
poem = db.Column(db.String(400),nullable=False)
user_loc = db.Column(db.String(2),nullable=True)
queries = db.relationship('Query', backref='poem',lazy='dynamic')
def __init__(self,poem,userLoc):
self.poem = poem
self.user_loc = userLoc
class Query(db.Model):
"""
Important note: json docs are stored as BLOBs which have been first
converted from unicode to ascii. For some reason unicode -> binary
has a TypeError. So if retrieving json docs, ensure to .decode('utf-8')
"""
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
poem_id = db.Column(db.Integer,db.ForeignKey("poem.id"),nullable=False)
query_string = db.Column(db.String(200),nullable=False)
json_doc = db.Column(db.LargeBinary(),nullable=False)
tracks = db.relationship('Track', backref='query',lazy='dynamic')
def __init__(self,poemID,tup):
self.poem_id = poemID
self.query_string = tup.query_string
self.json_doc = tup.doc.encode('utf-8')
class Track(db.Model):
id = db.Column(db.Integer, primary_key=True,autoincrement=True)
query_id = db.Column(db.Integer,db.ForeignKey("query.id"),nullable=False)
track = db.Column(db.String(200),nullable=False)
track_href = db.Column(db.String(50),nullable=False)
artist = db.Column(db.String(200),nullable=False)
artist_href = db.Column(db.String(50),nullable=False)
album = db.Column(db.String(200),nullable=False)
album_href = db.Column(db.String(50),nullable=False)
duration = db.Column(db.String(20),nullable=False)
def __init__(self,queryID,tup):
self.query_id = queryID
self.track = tup.track
self.track_href = tup.track_href
self.artist = tup.artist
self.artist_href = tup.artist_href
self.album = tup.album
self.album_href = tup.album_href
self.duration = tup.duration
## Runtime logic ==============================================================
@app.route('/')
def show_entries():
"""
Display the poems and their tracklists on the main page
"""
TrackTup = namedtuple('TrackTup',('i','track','track_href'\
,'artist','artist_href'\
,'album','album_href','duration'))
entries = []
chosenPoemIDs = db.session.query(Poem).order_by(desc(Poem.id)).limit(10).all()
for p in chosenPoemIDs: # top ten recent poems?
tracklist = []
tracks = db.session.query(Track).join(Query).filter_by(poem_id=p.id).all()
for t,i in zip(tracks,range(len(tracks))):
tup = TrackTup(i+1,t.track,t.track_href,t.artist,t.artist_href\
,t.album,t.album_href,t.duration)
tracklist.append(tup)
entries.append(dict(poem=p.poem,tracklist=tracklist))
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
"""
Allow logged-in users to post a poem and generate a tracklist
"""
if not session.get('logged_in'):
abort(401)
try:
userInput = UserInput(request,gi,app.logger)
except CustomError as e:
app.logger.error('{0}'.format(e.errorMsg))
flash('{0}'.format(e.userMsg),'error')
return redirect(url_for('show_entries'))
# Build query from poem
queryGroup = QueryBuilder(userInput.poem,app.logger)
# Create tracklist
creator = TracklistCreator(spotifyCaller,MAX_TRACKS_PER_DOC)
tracklist = creator.createTracklist(queryGroup,userInput,app.logger)
#app.logger.debug('Tracklist begins with: {0}'.format(tracklist.tracklist[0][:]))
if tracklist.full == False:
flash(u"Oh no! We couldn't generate a full tracklist based on "\
"your poem. Please try writing something else or ask for "\
"fewer tracks",'error')
else:
#app.logger.debug(userInput.poem)
poem = Poem(userInput.poem,userInput.userLoc)
db.session.add(poem)
db.session.commit()
for queryTup in tracklist.queriesDocsUsed:
query = Query(poem.id,queryTup)
db.session.add(query)
db.session.commit()
for trackTup in tracklist.tracklist:
if trackTup.query_string == queryTup.query_string:
track = Track(query.id, trackTup)
db.session.add(track)
db.session.commit()
flash(u"Thanks for your poem, hope you enjoy the playlist!")
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
# TODO consider providing Twitter OAuth to login
# http://terse-words.blogspot.ie/2012/07/using-twitter-as-your-flask-login.html
error = None
if request.method == 'POST':
if request.form['username'] != app.config['DEFAULT_USERNAME']:
error = u'Invalid username'
elif request.form['password'] != app.config['DEFAULT_PASSWORD']:
error = u'Invalid password'
else:
session['logged_in'] = True
flash(u"Hi {0}, you're now logged in".format(request.form['username'].decode('utf-8')))
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash(u"Bye! You're now logged out")
return redirect(url_for('show_entries'))
@app.route("/favicon.ico")
def favicon():
return app.send_static_file("music.ico")
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
#print 'type PROD is: {0}'.format(type(os.environ.get('PROD')))
app.run(host=hostIPaddress, port=port)
|
|
import argparse
import sys
import warnings
from gettext import gettext
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import py
import _pytest._io
from _pytest.compat import TYPE_CHECKING
from _pytest.config.exceptions import UsageError
if TYPE_CHECKING:
from typing import NoReturn
from typing_extensions import Literal
FILE_OR_DIR = "file_or_dir"
class Parser:
"""Parser for command line arguments and ini-file values.
:ivar extra_info: Dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
prog = None # type: Optional[str]
def __init__(
self,
usage: Optional[str] = None,
processopt: Optional[Callable[["Argument"], None]] = None,
) -> None:
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = [] # type: List[OptionGroup]
self._processopt = processopt
self._usage = usage
self._inidict = {} # type: Dict[str, Tuple[str, Optional[str], Any]]
self._ininames = [] # type: List[str]
self.extra_info = {} # type: Dict[str, Any]
def processoption(self, option: "Argument") -> None:
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(
self, name: str, description: str = "", after: Optional[str] = None
) -> "OptionGroup":
"""Get (or create) a named option Group.
:name: Name of the option group.
:description: Long description for --help output.
:after: Name of another group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.argparsing.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i + 1, group)
return group
def addoption(self, *opts: str, **attrs: Any) -> None:
"""Register a command line option.
:opts: Option names, can be short or long options.
:attrs: Same attributes which the ``add_argument()`` function of the
`argparse library <https://docs.python.org/library/argparse.html>`_
accepts.
After command line parsing, options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(
self,
args: Sequence[Union[str, py.path.local]],
namespace: Optional[argparse.Namespace] = None,
) -> argparse.Namespace:
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
strargs = [str(x) if isinstance(x, py.path.local) else x for x in args]
return self.optparser.parse_args(strargs, namespace=namespace)
def _getparser(self) -> "MyOptionParser":
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info, prog=self.prog)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*")
# bash like autocompletion for dirs (appending '/')
# Type ignored because typeshed doesn't know about argcomplete.
file_or_dir_arg.completer = filescompleter # type: ignore
return optparser
def parse_setoption(
self,
args: Sequence[Union[str, py.path.local]],
option: argparse.Namespace,
namespace: Optional[argparse.Namespace] = None,
) -> List[str]:
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return cast(List[str], getattr(parsedoption, FILE_OR_DIR))
def parse_known_args(
self,
args: Sequence[Union[str, py.path.local]],
namespace: Optional[argparse.Namespace] = None,
) -> argparse.Namespace:
"""Parse and return a namespace object with known arguments at this point."""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(
self,
args: Sequence[Union[str, py.path.local]],
namespace: Optional[argparse.Namespace] = None,
) -> Tuple[argparse.Namespace, List[str]]:
"""Parse and return a namespace object with known arguments, and
the remaining arguments unknown at this point."""
optparser = self._getparser()
strargs = [str(x) if isinstance(x, py.path.local) else x for x in args]
return optparser.parse_known_args(strargs, namespace=namespace)
def addini(
self,
name: str,
help: str,
type: Optional["Literal['pathlist', 'args', 'linelist', 'bool']"] = None,
default=None,
) -> None:
"""Register an ini-file option.
:name: Name of the ini-variable.
:type: Type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: Default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""Raised if an Argument instance is created with invalid or
inconsistent arguments."""
def __init__(self, msg: str, option: Union["Argument", str]) -> None:
self.msg = msg
self.option_id = str(option)
def __str__(self) -> str:
if self.option_id:
return "option {}: {}".format(self.option_id, self.msg)
else:
return self.msg
class Argument:
"""Class that mimics the necessary behaviour of optparse.Option.
It's currently a least effort implementation and ignoring choices
and integer prefixes.
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
"""
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
def __init__(self, *names: str, **attrs: Any) -> None:
"""Store parms in private vars for use in add_argument."""
self._attrs = attrs
self._short_opts = [] # type: List[str]
self._long_opts = [] # type: List[str]
if "%default" in (attrs.get("help") or ""):
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
DeprecationWarning,
stacklevel=3,
)
try:
typ = attrs["type"]
except KeyError:
pass
else:
# This might raise a keyerror as well, don't want to catch that.
if isinstance(typ, str):
if typ == "choice":
warnings.warn(
"`type` argument to addoption() is the string %r."
" For choices this is optional and can be omitted, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
# argparse expects a type here take it from
# the type of the first element
attrs["type"] = type(attrs["choices"][0])
else:
warnings.warn(
"`type` argument to addoption() is the string %r, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
attrs["type"] = Argument._typ_map[typ]
# Used in test_parseopt -> test_parse_defaultgetter.
self.type = attrs["type"]
else:
self.type = typ
try:
# Attribute existence is tested in Config._processopt.
self.default = attrs["default"]
except KeyError:
pass
self._set_opt_strings(names)
dest = attrs.get("dest") # type: Optional[str]
if dest:
self.dest = dest
elif self._long_opts:
self.dest = self._long_opts[0][2:].replace("-", "_")
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError as e:
self.dest = "???" # Needed for the error repr.
raise ArgumentError("need a long or short option", self) from e
def names(self) -> List[str]:
return self._short_opts + self._long_opts
def attrs(self) -> Mapping[str, Any]:
# Update any attributes set by processopt.
attrs = "default dest help".split()
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get("help"):
a = self._attrs["help"]
a = a.replace("%default", "%(default)s")
# a = a.replace('%prog', '%(prog)s')
self._attrs["help"] = a
return self._attrs
def _set_opt_strings(self, opts: Sequence[str]) -> None:
"""Directly from optparse.
Might not be necessary as this is passed to argparse later on.
"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt,
self,
)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self,
)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self,
)
self._long_opts.append(opt)
def __repr__(self) -> str:
args = [] # type: List[str]
if self._short_opts:
args += ["_short_opts: " + repr(self._short_opts)]
if self._long_opts:
args += ["_long_opts: " + repr(self._long_opts)]
args += ["dest: " + repr(self.dest)]
if hasattr(self, "type"):
args += ["type: " + repr(self.type)]
if hasattr(self, "default"):
args += ["default: " + repr(self.default)]
return "Argument({})".format(", ".join(args))
class OptionGroup:
def __init__(
self, name: str, description: str = "", parser: Optional[Parser] = None
) -> None:
self.name = name
self.description = description
self.options = [] # type: List[Argument]
self.parser = parser
def addoption(self, *optnames: str, **attrs: Any) -> None:
"""Add an option to this group.
If a shortened version of a long option is specified, it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords.
"""
conflict = set(optnames).intersection(
name for opt in self.options for name in opt.names()
)
if conflict:
raise ValueError("option names %s already added" % conflict)
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames: str, **attrs: Any) -> None:
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option: "Argument", shortupper: bool = False) -> None:
if not shortupper:
for opt in option._short_opts:
if opt[0] == "-" and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(
self,
parser: Parser,
extra_info: Optional[Dict[str, Any]] = None,
prog: Optional[str] = None,
) -> None:
self._parser = parser
argparse.ArgumentParser.__init__(
self,
prog=prog,
usage=parser._usage,
add_help=False,
formatter_class=DropShorterLongHelpFormatter,
allow_abbrev=False,
)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user.
self.extra_info = extra_info if extra_info else {}
def error(self, message: str) -> "NoReturn":
"""Transform argparse error message into UsageError."""
msg = "{}: error: {}".format(self.prog, message)
if hasattr(self._parser, "_config_source_hint"):
# Type ignored because the attribute is set dynamically.
msg = "{} ({})".format(msg, self._parser._config_source_hint) # type: ignore
raise UsageError(self.format_usage() + msg)
# Type ignored because typeshed has a very complex type in the superclass.
def parse_args( # type: ignore
self,
args: Optional[Sequence[str]] = None,
namespace: Optional[argparse.Namespace] = None,
) -> argparse.Namespace:
"""Allow splitting of positional arguments."""
parsed, unrecognized = self.parse_known_args(args, namespace)
if unrecognized:
for arg in unrecognized:
if arg and arg[0] == "-":
lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
for k, v in sorted(self.extra_info.items()):
lines.append(" {}: {}".format(k, v))
self.error("\n".join(lines))
getattr(parsed, FILE_OR_DIR).extend(unrecognized)
return parsed
if sys.version_info[:2] < (3, 9): # pragma: no cover
# Backport of https://github.com/python/cpython/pull/14316 so we can
# disable long --argument abbreviations without breaking short flags.
def _parse_optional(
self, arg_string: str
) -> Optional[Tuple[Optional[argparse.Action], str, Optional[str]]]:
if not arg_string:
return None
if not arg_string[0] in self.prefix_chars:
return None
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
if len(arg_string) == 1:
return None
if "=" in arg_string:
option_string, explicit_arg = arg_string.split("=", 1)
if option_string in self._option_string_actions:
action = self._option_string_actions[option_string]
return action, option_string, explicit_arg
if self.allow_abbrev or not arg_string.startswith("--"):
option_tuples = self._get_option_tuples(arg_string)
if len(option_tuples) > 1:
msg = gettext(
"ambiguous option: %(option)s could match %(matches)s"
)
options = ", ".join(option for _, option, _ in option_tuples)
self.error(msg % {"option": arg_string, "matches": options})
elif len(option_tuples) == 1:
(option_tuple,) = option_tuples
return option_tuple
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
if " " in arg_string:
return None
return None, arg_string, None
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""Shorten help for long options that differ only in extra hyphens.
- Collapse **long** options that are the same except for extra hyphens.
- Shortcut if there are only two options and one of them is a short one.
- Cache result on the action object as this is called at least 2 times.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Use more accurate terminal width.
if "width" not in kwargs:
kwargs["width"] = _pytest._io.get_terminal_width()
super().__init__(*args, **kwargs)
def _format_action_invocation(self, action: argparse.Action) -> str:
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != "-": # only optional arguments
return orgstr
res = getattr(
action, "_formatted_action_invocation", None
) # type: Optional[str]
if res:
return res
options = orgstr.split(", ")
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr # type: ignore
return orgstr
return_list = []
short_long = {} # type: Dict[str, str]
for option in options:
if len(option) == 2 or option[2] == " ":
continue
if not option.startswith("--"):
raise ArgumentError(
'long optional argument without "--": [%s]' % (option), option
)
xxoption = option[2:]
shortened = xxoption.replace("-", "")
if shortened not in short_long or len(short_long[shortened]) < len(
xxoption
):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options:
if len(option) == 2 or option[2] == " ":
return_list.append(option)
if option[2:] == short_long.get(option.replace("-", "")):
return_list.append(option.replace(" ", "=", 1))
formatted_action_invocation = ", ".join(return_list)
action._formatted_action_invocation = formatted_action_invocation # type: ignore
return formatted_action_invocation
def _split_lines(self, text, width):
"""Wrap lines after splitting on original newlines.
This allows to have explicit line breaks in the help text.
"""
import textwrap
lines = []
for line in text.splitlines():
lines.extend(textwrap.wrap(line.strip(), width))
return lines
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.