repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
danielecook/gist-alfred
|
refs/heads/master
|
github/GitTreeElement.py
|
5
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class GitTreeElement(github.GithubObject.NonCompletableGithubObject):
"""
This class represents GitTreeElements
"""
def __repr__(self):
return self.get__repr__({"sha": self._sha.value, "path": self._path.value})
@property
def mode(self):
"""
:type: string
"""
return self._mode.value
@property
def path(self):
"""
:type: string
"""
return self._path.value
@property
def sha(self):
"""
:type: string
"""
return self._sha.value
@property
def size(self):
"""
:type: integer
"""
return self._size.value
@property
def type(self):
"""
:type: string
"""
return self._type.value
@property
def url(self):
"""
:type: string
"""
return self._url.value
def _initAttributes(self):
self._mode = github.GithubObject.NotSet
self._path = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "mode" in attributes: # pragma no branch
self._mode = self._makeStringAttribute(attributes["mode"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
Therp/odoo
|
refs/heads/8.0
|
addons/portal_stock/__openerp__.py
|
437
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal Stock',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds access rules to your portal if stock and portal are installed.
==========================================================================================
""",
'author': 'OpenERP SA',
'depends': ['sale_stock','portal'],
'data': [
'security/portal_security.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cberry777/dd-agent
|
refs/heads/master
|
tests/checks/mock/test_riakcs.py
|
5
|
# stdlib
from socket import error
import unittest
# 3p
from mock import Mock
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest, Fixtures, load_check
class RiakCSTest(AgentCheckTest):
CHECK_NAME = "riakcs"
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.config = {"instances": [{
"access_id":"foo",
"access_secret": "bar"}]}
self.check = load_check(self.CHECK_NAME, self.config, {})
self.check._connect = Mock(return_value=(None, None, ["aggregation_key:localhost:8080"], []))
self.check._get_stats = Mock(return_value=self.check.load_json(
Fixtures.read_file('riakcs_in.json')))
def test_parser(self):
input_json = Fixtures.read_file('riakcs_in.json')
output_python = Fixtures.read_file('riakcs_out.python')
self.assertEquals(self.check.load_json(input_json), eval(output_python))
def test_metrics(self):
self.run_check(self.config)
expected = eval(Fixtures.read_file('riakcs_metrics.python'))
for m in expected:
self.assertMetric(m[0], m[2], m[3].get('tags', []))
def test_service_checks(self):
self.check = load_check(self.CHECK_NAME, self.config, {})
self.assertRaises(error, lambda: self.run_check(self.config))
self.assertEqual(len(self.service_checks), 1, self.service_checks)
self.assertServiceCheck(self.check.SERVICE_CHECK_NAME,
status=AgentCheck.CRITICAL,
tags=['aggregation_key:localhost:8080'])
class Riak21CSTest(AgentCheckTest):
CHECK_NAME = "riakcs"
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.config = {
"instances": [{
"access_id":"foo",
"access_secret": "bar",
"metrics": [
"request_pool_overflow",
"request_pool_size",
"request_pool_workers",
],
}],
}
self.check = load_check(self.CHECK_NAME, self.config, {})
self.check._connect = Mock(return_value=(
None,
None,
["aggregation_key:localhost:8080"],
self.config["instances"][0]["metrics"],
))
self.check._get_stats = Mock(return_value=self.check.load_json(
Fixtures.read_file('riakcs21_in.json')))
def test_21_parser(self):
input_json = Fixtures.read_file('riakcs21_in.json')
output_python = Fixtures.read_file('riakcs21_out.python')
self.assertEquals(self.check.load_json(input_json), eval(output_python))
def test_21_metrics(self):
self.run_check(self.config)
expected = eval(Fixtures.read_file('riakcs21_metrics.python'))
for m in expected:
self.assertMetric(m[0], m[2], m[3].get('tags', []),
metric_type=m[3]["type"])
# verify non-default (and not in config) metric is not sent
with self.assertRaises(AssertionError):
self.assertMetric("riakcs.bucket_policy_get_in_one")
|
jordiclariana/ansible
|
refs/heads/devel
|
lib/ansible/utils/encrypt.py
|
49
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
import tempfile
import multiprocessing
import time
import warnings
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
display.system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes
__all__ = ['do_encrypt']
_LOCK = multiprocessing.Lock()
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
if crypt._salt_is_bytes:
salt = to_bytes(salt, encoding='ascii', errors='strict')
else:
salt = to_text(salt, encoding='ascii', errors='strict')
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise AnsibleError("passlib must be installed to encrypt vars_prompt values")
# Hashes from passlib.hash should be represented as ascii strings of hex
# digits so this should not traceback. If it's not representable as such
# we need to traceback and then blacklist such algorithms because it may
# impact calling code.
return to_text(result, errors='strict')
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
# avoid race with multiple forks trying to create paths on host
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path):
# use a temp directory and rename to ensure the directory
# searched for only appears after permissions applied.
tmp_dir = tempfile.mkdtemp(dir=os.path.dirname(key_path))
os.chmod(tmp_dir, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
os.rename(tmp_dir, key_path)
elif not os.path.isdir(key_path):
raise AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
# avoid race with multiple forks trying to create key
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
# use temp file to ensure file only appears once it has
# desired contents and permissions
with tempfile.NamedTemporaryFile(mode='w', dir=os.path.dirname(key_path), delete=False) as fh:
tmp_key_path = fh.name
fh.write(str(key))
os.chmod(tmp_key_path, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
os.rename(tmp_key_path, key_path)
return key
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def keyczar_encrypt(key, msg):
return key.Encrypt(msg.encode('utf-8'))
def keyczar_decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise AnsibleError("decryption failed")
|
aetilley/scikit-learn
|
refs/heads/master
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
254
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
marcoantoniooliveira/labweb
|
refs/heads/master
|
oscar/lib/python2.7/site-packages/whoosh/filedb/compound.py
|
87
|
# Copyright 2011 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import errno
import os
import sys
from threading import Lock
from shutil import copyfileobj
try:
import mmap
except ImportError:
mmap = None
from whoosh.compat import BytesIO, memoryview_
from whoosh.filedb.structfile import BufferFile, StructFile
from whoosh.filedb.filestore import FileStorage, StorageError
from whoosh.system import emptybytes
from whoosh.util import random_name
class CompoundStorage(FileStorage):
readonly = True
def __init__(self, dbfile, use_mmap=True, basepos=0):
self._file = dbfile
self.is_closed = False
# Seek to the end to get total file size (to check if mmap is OK)
dbfile.seek(0, os.SEEK_END)
filesize = self._file.tell()
dbfile.seek(basepos)
self._diroffset = self._file.read_long()
self._dirlength = self._file.read_int()
self._file.seek(self._diroffset)
self._dir = self._file.read_pickle()
self._options = self._file.read_pickle()
self._locks = {}
self._source = None
use_mmap = (
use_mmap
and hasattr(self._file, "fileno") # check file is a real file
and filesize < sys.maxsize # check fit on 32-bit Python
)
if mmap and use_mmap:
# Try to open the entire segment as a memory-mapped object
try:
fileno = self._file.fileno()
self._source = mmap.mmap(fileno, 0, access=mmap.ACCESS_READ)
except (mmap.error, OSError):
e = sys.exc_info()[1]
# If we got an error because there wasn't enough memory to
# open the map, ignore it and fall through, we'll just use the
# (slower) "sub-file" implementation
if e.errno == errno.ENOMEM:
pass
else:
raise
else:
# If that worked, we can close the file handle we were given
self._file.close()
self._file = None
def __repr__(self):
return "<%s (%s)>" % (self.__class__.__name__, self._name)
def close(self):
if self.is_closed:
raise Exception("Already closed")
self.is_closed = True
if self._source:
try:
self._source.close()
except BufferError:
del self._source
if self._file:
self._file.close()
def range(self, name):
try:
fileinfo = self._dir[name]
except KeyError:
raise NameError("Unknown file %r" % (name,))
return fileinfo["offset"], fileinfo["length"]
def open_file(self, name, *args, **kwargs):
if self.is_closed:
raise StorageError("Storage was closed")
offset, length = self.range(name)
if self._source:
# Create a memoryview/buffer from the mmap
buf = memoryview_(self._source, offset, length)
f = BufferFile(buf, name=name)
elif hasattr(self._file, "subset"):
f = self._file.subset(offset, length, name=name)
else:
f = StructFile(SubFile(self._file, offset, length), name=name)
return f
def list(self):
return list(self._dir.keys())
def file_exists(self, name):
return name in self._dir
def file_length(self, name):
info = self._dir[name]
return info["length"]
def file_modified(self, name):
info = self._dir[name]
return info["modified"]
def lock(self, name):
if name not in self._locks:
self._locks[name] = Lock()
return self._locks[name]
@staticmethod
def assemble(dbfile, store, names, **options):
assert names, names
directory = {}
basepos = dbfile.tell()
dbfile.write_long(0) # Directory position
dbfile.write_int(0) # Directory length
# Copy the files into the compound file
for name in names:
if name.endswith(".toc") or name.endswith(".seg"):
raise Exception(name)
for name in names:
offset = dbfile.tell()
length = store.file_length(name)
modified = store.file_modified(name)
directory[name] = {"offset": offset, "length": length,
"modified": modified}
f = store.open_file(name)
copyfileobj(f, dbfile)
f.close()
CompoundStorage.write_dir(dbfile, basepos, directory, options)
@staticmethod
def write_dir(dbfile, basepos, directory, options=None):
options = options or {}
dirpos = dbfile.tell() # Remember the start of the directory
dbfile.write_pickle(directory) # Write the directory
dbfile.write_pickle(options)
endpos = dbfile.tell() # Remember the end of the directory
dbfile.flush()
dbfile.seek(basepos) # Seek back to the start
dbfile.write_long(dirpos) # Directory position
dbfile.write_int(endpos - dirpos) # Directory length
dbfile.close()
class SubFile(object):
def __init__(self, parentfile, offset, length, name=None):
self._file = parentfile
self._offset = offset
self._length = length
self._end = offset + length
self._pos = 0
self.name = name
self.closed = False
def close(self):
self.closed = True
def subset(self, position, length, name=None):
start = self._offset + position
end = start + length
name = name or self.name
assert self._offset >= start >= self._end
assert self._offset >= end >= self._end
return SubFile(self._file, self._offset + position, length, name=name)
def read(self, size=None):
if size is None:
size = self._length - self._pos
else:
size = min(size, self._length - self._pos)
if size < 0:
size = 0
if size > 0:
self._file.seek(self._offset + self._pos)
self._pos += size
return self._file.read(size)
else:
return emptybytes
def readline(self):
maxsize = self._length - self._pos
self._file.seek(self._offset + self._pos)
data = self._file.readline()
if len(data) > maxsize:
data = data[:maxsize]
self._pos += len(data)
return data
def seek(self, where, whence=0):
if whence == 0: # Absolute
pos = where
elif whence == 1: # Relative
pos = self._pos + where
elif whence == 2: # From end
pos = self._length - where
else:
raise ValueError
self._pos = pos
def tell(self):
return self._pos
class CompoundWriter(object):
def __init__(self, tempstorage, buffersize=32 * 1024):
assert isinstance(buffersize, int)
self._tempstorage = tempstorage
self._tempname = "%s.ctmp" % random_name()
self._temp = tempstorage.create_file(self._tempname, mode="w+b")
self._buffersize = buffersize
self._streams = {}
def create_file(self, name):
ss = self.SubStream(self._temp, self._buffersize)
self._streams[name] = ss
return StructFile(ss)
def _readback(self):
temp = self._temp
for name, substream in self._streams.items():
substream.close()
def gen():
for f, offset, length in substream.blocks:
if f is None:
f = temp
f.seek(offset)
yield f.read(length)
yield (name, gen)
temp.close()
self._tempstorage.delete_file(self._tempname)
def save_as_compound(self, dbfile):
basepos = dbfile.tell()
dbfile.write_long(0) # Directory offset
dbfile.write_int(0) # Directory length
directory = {}
for name, blocks in self._readback():
filestart = dbfile.tell()
for block in blocks():
dbfile.write(block)
directory[name] = {"offset": filestart,
"length": dbfile.tell() - filestart}
CompoundStorage.write_dir(dbfile, basepos, directory)
def save_as_files(self, storage, name_fn):
for name, blocks in self._readback():
f = storage.create_file(name_fn(name))
for block in blocks():
f.write(block)
f.close()
class SubStream(object):
def __init__(self, dbfile, buffersize):
self._dbfile = dbfile
self._buffersize = buffersize
self._buffer = BytesIO()
self.blocks = []
def tell(self):
return sum(b[2] for b in self.blocks) + self._buffer.tell()
def write(self, inbytes):
bio = self._buffer
buflen = bio.tell()
length = buflen + len(inbytes)
if length >= self._buffersize:
offset = self._dbfile.tell()
self._dbfile.write(bio.getvalue()[:buflen])
self._dbfile.write(inbytes)
self.blocks.append((None, offset, length))
self._buffer.seek(0)
else:
bio.write(inbytes)
def close(self):
bio = self._buffer
length = bio.tell()
if length:
self.blocks.append((bio, 0, length))
|
jetskijoe/headphones
|
refs/heads/master
|
lib/yaml/representer.py
|
360
|
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from error import *
from nodes import *
import datetime
import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
for base in cls.__bases__:
bases.extend(self.get_classobj_bases(base))
return bases
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
add_representer = classmethod(add_representer)
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
add_multi_representer = classmethod(add_multi_representer)
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
mapping.sort()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, unicode, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null',
u'null')
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
def represent_bool(self, data):
if data:
value = u'true'
else:
value = u'false'
return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = u'.nan'
elif data == self.inf_value:
value = u'.inf'
elif data == -self.inf_value:
value = u'-.inf'
else:
value = unicode(repr(data)).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if u'.' not in value and u'e' in value:
value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(unicode,
SafeRepresenter.represent_unicode)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(long,
SafeRepresenter.represent_long)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_str(self, data):
tag = None
style = None
try:
data = unicode(data, 'ascii')
tag = u'tag:yaml.org,2002:str'
except UnicodeDecodeError:
try:
data = unicode(data, 'utf-8')
tag = u'tag:yaml.org,2002:python/str'
except UnicodeDecodeError:
data = data.encode('base64')
tag = u'tag:yaml.org,2002:binary'
style = '|'
return self.represent_scalar(tag, data, style=style)
def represent_unicode(self, data):
tag = None
try:
data.encode('ascii')
tag = u'tag:yaml.org,2002:python/unicode'
except UnicodeEncodeError:
tag = u'tag:yaml.org,2002:str'
return self.represent_scalar(tag, data)
def represent_long(self, data):
tag = u'tag:yaml.org,2002:int'
if int(data) is not data:
tag = u'tag:yaml.org,2002:python/long'
return self.represent_scalar(tag, unicode(data))
def represent_complex(self, data):
if data.imag == 0.0:
data = u'%r' % data.real
elif data.real == 0.0:
data = u'%rj' % data.imag
elif data.imag > 0:
data = u'%r+%rj' % (data.real, data.imag)
else:
data = u'%r%rj' % (data.real, data.imag)
return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = u'%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
def represent_module(self, data):
return self.represent_scalar(
u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
def represent_instance(self, data):
# For instances of classic classes, we use __getinitargs__ and
# __getstate__ to serialize the data.
# If data.__getinitargs__ exists, the object must be reconstructed by
# calling cls(**args), where args is a tuple returned by
# __getinitargs__. Otherwise, the cls.__init__ method should never be
# called and the class instance is created by instantiating a trivial
# class and assigning to the instance's __class__ variable.
# If data.__getstate__ exists, it returns the state of the object.
# Otherwise, the state of the object is data.__dict__.
# We produce either a !!python/object or !!python/object/new node.
# If data.__getinitargs__ does not exist and state is a dictionary, we
# produce a !!python/object node . Otherwise we produce a
# !!python/object/new node.
cls = data.__class__
class_name = u'%s.%s' % (cls.__module__, cls.__name__)
args = None
state = None
if hasattr(data, '__getinitargs__'):
args = list(data.__getinitargs__())
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
return self.represent_sequence(
u'tag:yaml.org,2002:python/object/new:'+class_name, args)
value = {}
if args:
value['args'] = args
value['state'] = state
return self.represent_mapping(
u'tag:yaml.org,2002:python/object/new:'+class_name, value)
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copy_reg.dispatch_table:
reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = u'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(str,
Representer.represent_str)
Representer.add_representer(unicode,
Representer.represent_unicode)
Representer.add_representer(long,
Representer.represent_long)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.ClassType,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
Representer.represent_object)
|
vbannai/neutron
|
refs/heads/master
|
neutron/tests/unit/openvswitch/test_ovs_tunnel.py
|
2
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import mock
from oslo.config import cfg
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.agent import ovs_neutron_agent
from neutron.plugins.openvswitch.common import constants
from neutron.tests import base
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 42
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS)
LVM_FLAT = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
LVM_VLAN = ovs_neutron_agent.LocalVLANMapping(
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort:
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding:
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(base.BaseTestCase):
def setUp(self):
super(TunnelTest, self).setUp()
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
check_arp_responder_str = ('neutron.plugins.openvswitch.agent.'
'ovs_neutron_agent.OVSNeutronAgent.'
'_check_arp_responder_support')
self.mock_check_arp_resp = mock.patch(check_arp_responder_str).start()
self.mock_check_arp_resp.return_value = True
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.NET_MAPPING = {'net1': self.MAP_TUN_BRIDGE}
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_OFPORT = 33333
self.VETH_MTU = None
self.inta = mock.Mock()
self.intb = mock.Mock()
self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(),
self.TUN_BRIDGE: mock.Mock(),
self.MAP_TUN_BRIDGE: mock.Mock(),
}
self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start()
self.mock_bridge.side_effect = (lambda br_name, root_helper:
self.ovs_bridges[br_name])
self.mock_bridge_expected = [
mock.call(self.INT_BRIDGE, 'sudo'),
mock.call(self.MAP_TUN_BRIDGE, 'sudo'),
mock.call(self.TUN_BRIDGE, 'sudo'),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.get_local_port_mac.return_value = '000000000001'
self.mock_int_bridge_expected = [
mock.call.set_secure_mode(),
mock.call.get_local_port_mac(),
mock.call.delete_port('patch-tun'),
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.add_flow(priority=0, table=constants.CANARY_TABLE,
actions='drop')
]
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = None
self.mock_map_tun_bridge_expected = [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1, actions='normal'),
mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.intb),
]
self.mock_int_bridge.add_port.return_value = None
self.mock_int_bridge_expected += [
mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_port(self.inta)
]
self.inta_expected = [mock.call.link.set_up()]
self.intb_expected = [mock.call.link.set_up()]
self.mock_int_bridge_expected += [
mock.call.add_flow(priority=2, in_port=None, actions='drop')
]
self.mock_map_tun_bridge_expected += [
mock.call.add_flow(priority=2, in_port=None, actions='drop')
]
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge_expected = [
mock.call.reset_bridge(),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_int_bridge.add_patch_port.return_value = self.TUN_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.mock_tun_bridge_expected += [
mock.call.remove_all_flows(),
mock.call.add_flow(priority=1,
in_port=self.INT_OFPORT,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN),
mock.call.add_flow(priority=0, actions="drop"),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
dl_dst=UCAST_MAC,
actions="resubmit(,%s)" %
constants.UCAST_TO_TUN),
mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN,
dl_dst=BCAST_MAC,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
]
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.mock_tun_bridge_expected.append(
mock.call.add_flow(
table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop"))
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
self.mock_tun_bridge_expected += [
mock.call.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, self.INT_OFPORT)),
mock.call.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN),
mock.call.add_flow(table=constants.FLOOD_TO_TUN,
priority=0,
actions="drop")
]
self.device_exists = mock.patch.object(ip_lib, 'device_exists').start()
self.device_exists.return_value = True
self.device_exists_expected = [
mock.call(self.MAP_TUN_BRIDGE, 'sudo'),
mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'),
]
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipdevice_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE, 'sudo'),
mock.call().link.delete()
]
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.ipwrapper_expected = [
mock.call('sudo'),
mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE,
'phy-%s' % self.MAP_TUN_BRIDGE)
]
self.get_bridges = mock.patch.object(ovs_lib, 'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE]
self.get_bridges_expected = [
mock.call('sudo')
]
self.execute = mock.patch('neutron.agent.linux.utils.execute').start()
self.execute_expected = [mock.call(['/sbin/udevadm', 'settle',
'--timeout=10'])]
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(len(mock_obj.mock_calls), len(expected))
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_bridge, self.mock_bridge_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.device_exists, self.device_exists_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
self._verify_mock_calls()
# TODO(ethuleau): Initially, local ARP responder is be dependent to the
# ML2 l2 population mechanism driver.
# The next two tests use l2_pop flag to test ARP responder
def test_construct_with_arp_responder(self):
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU, l2_population=True,
arp_responder=True)
self.mock_tun_bridge_expected.insert(
5, mock.call.add_flow(table=constants.PATCH_LV_TO_TUN,
priority=1,
proto="arp",
dl_dst="ff:ff:ff:ff:ff:ff",
actions="resubmit(,%s)" %
constants.ARP_RESPONDER)
)
self.mock_tun_bridge_expected.insert(
12, mock.call.add_flow(table=constants.ARP_RESPONDER,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
)
self._verify_mock_calls()
def test_construct_without_arp_responder(self):
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU, l2_population=False,
arp_responder=True)
self._verify_mock_calls()
def test_construct_vxlan(self):
ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1',
self.NET_MAPPING,
'sudo', 2, ['vxlan'],
self.VETH_MTU)
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = ','.join(TUN_OFPORTS[p_const.TYPE_GRE].values())
self.mock_tun_bridge_expected += [
mock.call.mod_flow(table=constants.FLOOD_TO_TUN,
dl_vlan=LV_ID,
actions="strip_vlan,"
"set_tunnel:%s,output:%s" %
(LS_ID, ofports)),
mock.call.add_flow(table=constants.TUN_TABLE['gre'],
priority=1,
tun_id=LS_ID,
actions="mod_vlan_vid:%s,resubmit(,%s)" %
(LV_ID, constants.LEARN_FROM_TUN)),
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
action_string = 'strip_vlan,normal'
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LV_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=65535, actions=action_string))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_map_tun_bridge_expected.append(
mock.call.add_flow(priority=4, in_port=self.MAP_TUN_OFPORT,
dl_vlan=LV_ID, actions=action_string))
action_string = 'mod_vlan_vid:%s,normal' % LS_ID
self.mock_int_bridge_expected.append(
mock.call.add_flow(priority=3, in_port=self.INT_OFPORT,
dl_vlan=LV_ID, actions=action_string))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.delete_flows(
table=constants.TUN_TABLE['gre'], tun_id=LS_ID),
mock.call.delete_flows(dl_vlan=LVM.vlan)
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_FLAT.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=65535, in_port=self.INT_OFPORT))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_FLAT
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_FLAT.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.delete_flows(
in_port=self.MAP_TUN_OFPORT, dl_vlan=LVM_VLAN.vlan))
self.mock_int_bridge_expected.append(
mock.call.delete_flows(
dl_vlan=LV_ID, in_port=self.INT_OFPORT))
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.local_vlan_map[NET_UUID] = LVM_VLAN
a.reclaim_local_vlan(NET_UUID)
self.assertIn(LVM_VLAN.vlan, a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'),
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'tag', str(LVM.vlan)),
mock.call.delete_flows(in_port=VIF_PORT.ofport)
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, False)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.local_vlan_map[NET_UUID] = LVM
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'),
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', ovs_neutron_agent.DEAD_VLAN_TAG),
mock.call.add_flow(priority=2, in_port=VIF_PORT.ofport,
actions='drop')
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.available_local_vlans = set([LV_ID])
a.local_vlan_map[NET_UUID] = LVM
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-1', '10.0.10.1', '10.0.0.1',
'gre', 4789, True),
mock.call.add_flow(priority=1, in_port=tunnel_port,
actions='resubmit(,2)')
]
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.10.1',
tunnel_type=p_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1', self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
a.tunnel_update(
mock.sentinel.ctx, tunnel_id='1', tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply2 = {'current': set(['tap0']),
'added': set(['tap2']),
'removed': set([])}
reply3 = {'current': set(['tap2']),
'added': set([]),
'removed': set(['tap0'])}
self.mock_int_bridge_expected += [
mock.call.dump_flows_for_table(constants.CANARY_TABLE),
mock.call.dump_flows_for_table(constants.CANARY_TABLE)
]
with contextlib.nested(
mock.patch.object(log.ContextAdapter, 'exception'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'scan_ports'),
mock.patch.object(ovs_neutron_agent.OVSNeutronAgent,
'process_network_ports')
) as (log_exception, scan_ports, process_network_ports):
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
scan_ports.side_effect = [reply2, reply3]
process_network_ports.side_effect = [
False, Exception('Fake exception to get out of the loop')]
q_agent = ovs_neutron_agent.OVSNeutronAgent(self.INT_BRIDGE,
self.TUN_BRIDGE,
'10.0.0.1',
self.NET_MAPPING,
'sudo', 2, ['gre'],
self.VETH_MTU)
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
q_agent.daemon_loop()
except Exception:
pass
# FIXME(salv-orlando): There should not be assertions on log messages
log_exception.assert_called_once_with(
"Error while processing VIF ports")
scan_ports.assert_has_calls([
mock.call(set(), set()),
mock.call(set(['tap0']), set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap2'])}, False),
mock.call({'current': set(['tap2']),
'removed': set(['tap0']),
'added': set([])}, False)
])
self._verify_mock_calls()
class TunnelTestWithMTU(TunnelTest):
def setUp(self):
super(TunnelTestWithMTU, self).setUp()
self.VETH_MTU = 1500
self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
|
plotly/plotly.py
|
refs/heads/master
|
packages/python/plotly/plotly/validators/icicle/marker/colorbar/__init__.py
|
36
|
import sys
if sys.version_info < (3, 7):
from ._ypad import YpadValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xpad import XpadValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._title import TitleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._ticklabelposition import TicklabelpositionValidator
from ._ticklabeloverflow import TicklabeloverflowValidator
from ._tickformatstopdefaults import TickformatstopdefaultsValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._thicknessmode import ThicknessmodeValidator
from ._thickness import ThicknessValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._outlinewidth import OutlinewidthValidator
from ._outlinecolor import OutlinecolorValidator
from ._nticks import NticksValidator
from ._minexponent import MinexponentValidator
from ._lenmode import LenmodeValidator
from ._len import LenValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ypad.YpadValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xpad.XpadValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._title.TitleValidator",
"._tickwidth.TickwidthValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticktextsrc.TicktextsrcValidator",
"._ticktext.TicktextValidator",
"._ticksuffix.TicksuffixValidator",
"._ticks.TicksValidator",
"._tickprefix.TickprefixValidator",
"._tickmode.TickmodeValidator",
"._ticklen.TicklenValidator",
"._ticklabelposition.TicklabelpositionValidator",
"._ticklabeloverflow.TicklabeloverflowValidator",
"._tickformatstopdefaults.TickformatstopdefaultsValidator",
"._tickformatstops.TickformatstopsValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickcolor.TickcolorValidator",
"._tickangle.TickangleValidator",
"._tick0.Tick0Validator",
"._thicknessmode.ThicknessmodeValidator",
"._thickness.ThicknessValidator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showexponent.ShowexponentValidator",
"._separatethousands.SeparatethousandsValidator",
"._outlinewidth.OutlinewidthValidator",
"._outlinecolor.OutlinecolorValidator",
"._nticks.NticksValidator",
"._minexponent.MinexponentValidator",
"._lenmode.LenmodeValidator",
"._len.LenValidator",
"._exponentformat.ExponentformatValidator",
"._dtick.DtickValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
|
diogommartins/ryu
|
refs/heads/master
|
ryu/tests/unit/packet/test_cfm.py
|
23
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import inspect
import six
import struct
from nose.tools import *
from ryu.lib import addrconv
from ryu.lib.packet import cfm
LOG = logging.getLogger(__name__)
class Test_cfm(unittest.TestCase):
def setUp(self):
self.message = cfm.cc_message()
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def setUp_cc_message(self):
self.cc_message_md_lv = 1
self.cc_message_version = 1
self.cc_message_rdi = 1
self.cc_message_interval = 1
self.cc_message_seq_num = 123
self.cc_message_mep_id = 4
self.cc_message_md_name_format = 4
self.cc_message_md_name_length = 0
self.cc_message_md_name = b"hoge"
self.cc_message_short_ma_name_format = 2
self.cc_message_short_ma_name_length = 0
self.cc_message_short_ma_name = b"pakeratta"
self.cc_message_md_name_txfcf = 11
self.cc_message_md_name_rxfcb = 22
self.cc_message_md_name_txfcb = 33
self.cc_message_tlvs = [
cfm.sender_id_tlv(),
cfm.port_status_tlv(),
cfm.data_tlv(),
cfm.interface_status_tlv(),
cfm.reply_ingress_tlv(),
cfm.reply_egress_tlv(),
cfm.ltm_egress_identifier_tlv(),
cfm.ltr_egress_identifier_tlv(),
cfm.organization_specific_tlv(),
]
self.message = cfm.cc_message(
self.cc_message_md_lv,
self.cc_message_version,
self.cc_message_rdi,
self.cc_message_interval,
self.cc_message_seq_num,
self.cc_message_mep_id,
self.cc_message_md_name_format,
self.cc_message_md_name_length,
self.cc_message_md_name,
self.cc_message_short_ma_name_format,
self.cc_message_short_ma_name_length,
self.cc_message_short_ma_name,
self.cc_message_tlvs
)
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def setUp_loopback_message(self):
self.loopback_message_md_lv = 1
self.loopback_message_version = 1
self.loopback_message_transaction_id = 12345
self.loopback_message_tlvs = [
cfm.sender_id_tlv(),
cfm.port_status_tlv(),
cfm.data_tlv(),
cfm.interface_status_tlv(),
cfm.reply_ingress_tlv(),
cfm.reply_egress_tlv(),
cfm.ltm_egress_identifier_tlv(),
cfm.ltr_egress_identifier_tlv(),
cfm.organization_specific_tlv(),
]
self.message = cfm.loopback_message(
self.loopback_message_md_lv,
self.loopback_message_version,
self.loopback_message_transaction_id,
self.loopback_message_tlvs)
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def setUp_loopback_reply(self):
self.loopback_reply_md_lv = 1
self.loopback_reply_version = 1
self.loopback_reply_transaction_id = 12345
self.loopback_reply_tlvs = [
cfm.sender_id_tlv(),
cfm.port_status_tlv(),
cfm.data_tlv(),
cfm.interface_status_tlv(),
cfm.reply_ingress_tlv(),
cfm.reply_egress_tlv(),
cfm.ltm_egress_identifier_tlv(),
cfm.ltr_egress_identifier_tlv(),
cfm.organization_specific_tlv(),
]
self.message = cfm.loopback_reply(
self.loopback_reply_md_lv,
self.loopback_reply_version,
self.loopback_reply_transaction_id,
self.loopback_reply_tlvs)
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def setUp_link_trace_message(self):
self.link_trace_message_md_lv = 1
self.link_trace_message_version = 1
self.link_trace_message_use_fdb_only = 1
self.link_trace_message_transaction_id = 12345
self.link_trace_message_ttl = 123
self.link_trace_message_ltm_orig_addr = '11:22:33:44:55:66'
self.link_trace_message_ltm_targ_addr = '77:88:99:aa:cc:dd'
self.link_trace_message_tlvs = [
cfm.sender_id_tlv(),
cfm.port_status_tlv(),
cfm.data_tlv(),
cfm.interface_status_tlv(),
cfm.reply_ingress_tlv(),
cfm.reply_egress_tlv(),
cfm.ltm_egress_identifier_tlv(),
cfm.ltr_egress_identifier_tlv(),
cfm.organization_specific_tlv(),
]
self.message = cfm.link_trace_message(
self.link_trace_message_md_lv,
self.link_trace_message_version,
self.link_trace_message_use_fdb_only,
self.link_trace_message_transaction_id,
self.link_trace_message_ttl,
self.link_trace_message_ltm_orig_addr,
self.link_trace_message_ltm_targ_addr,
self.link_trace_message_tlvs)
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def setUp_link_trace_reply(self):
self.link_trace_reply_md_lv = 1
self.link_trace_reply_version = 1
self.link_trace_reply_use_fdb_only = 1
self.link_trace_reply_fwd_yes = 0
self.link_trace_reply_terminal_mep = 1
self.link_trace_reply_transaction_id = 5432
self.link_trace_reply_ttl = 123
self.link_trace_reply_relay_action = 3
self.link_trace_reply_tlvs = [
cfm.sender_id_tlv(),
cfm.port_status_tlv(),
cfm.data_tlv(),
cfm.interface_status_tlv(),
cfm.reply_ingress_tlv(),
cfm.reply_egress_tlv(),
cfm.ltm_egress_identifier_tlv(),
cfm.ltr_egress_identifier_tlv(),
cfm.organization_specific_tlv(),
]
self.message = cfm.link_trace_reply(
self.link_trace_reply_md_lv,
self.link_trace_reply_version,
self.link_trace_reply_use_fdb_only,
self.link_trace_reply_fwd_yes,
self.link_trace_reply_terminal_mep,
self.link_trace_reply_transaction_id,
self.link_trace_reply_ttl,
self.link_trace_reply_relay_action,
self.link_trace_reply_tlvs)
self.ins = cfm.cfm(self.message)
data = bytearray()
prev = None
self.buf = self.ins.serialize(data, prev)
def tearDown(self):
pass
def test_init(self):
eq_(str(self.message), str(self.ins.op))
def test_init_cc_message(self):
self.setUp_cc_message()
self.test_init()
def test_init_loopback_message(self):
self.setUp_loopback_message()
self.test_init()
def test_init_loopback_reply(self):
self.setUp_loopback_reply()
self.test_init()
def test_init_link_trace_message(self):
self.setUp_link_trace_message()
self.test_init()
def test_init_link_trace_reply(self):
self.setUp_link_trace_reply()
self.test_init()
def test_parser(self):
_res = self.ins.parser(six.binary_type(self.buf))
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(str(self.message), str(res.op))
def test_parser_with_cc_message(self):
self.setUp_cc_message()
self.test_parser()
def test_parser_with_loopback_message(self):
self.setUp_loopback_message()
self.test_parser()
def test_parser_with_loopback_reply(self):
self.setUp_loopback_reply()
self.test_parser()
def test_parser_with_link_trace_message(self):
self.setUp_link_trace_message()
self.test_parser()
def test_parser_with_link_trace_reply(self):
self.setUp_link_trace_reply()
self.test_parser()
def test_serialize(self):
pass
def test_serialize_with_cc_message(self):
self.setUp_cc_message()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ins.serialize(data, prev)
cc_message = cfm.cc_message.parser(six.binary_type(buf))
eq_(repr(self.message), repr(cc_message))
def test_serialize_with_loopback_message(self):
self.setUp_loopback_message()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ins.serialize(data, prev)
loopback_message = cfm.loopback_message.parser(six.binary_type(buf))
eq_(repr(self.message), repr(loopback_message))
def test_serialize_with_loopback_reply(self):
self.setUp_loopback_reply()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ins.serialize(data, prev)
loopback_reply = cfm.loopback_reply.parser(six.binary_type(buf))
eq_(repr(self.message), repr(loopback_reply))
def test_serialize_with_link_trace_message(self):
self.setUp_link_trace_message()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ins.serialize(data, prev)
link_trace_message = cfm.link_trace_message.parser(six.binary_type(buf))
eq_(repr(self.message), repr(link_trace_message))
def test_serialize_with_link_trace_reply(self):
self.setUp_link_trace_reply()
self.test_serialize()
data = bytearray()
prev = None
buf = self.ins.serialize(data, prev)
link_trace_reply = cfm.link_trace_reply.parser(six.binary_type(buf))
eq_(repr(self.message), repr(link_trace_reply))
def test_to_string(self):
cfm_values = {'op': self.message}
_cfm_str = ','.join(['%s=%s' % (k, cfm_values[k])
for k, v in inspect.getmembers(self.ins)
if k in cfm_values])
cfm_str = '%s(%s)' % (cfm.cfm.__name__, _cfm_str)
eq_(str(self.ins), cfm_str)
eq_(repr(self.ins), cfm_str)
def test_to_string_cc_message(self):
self.setUp_cc_message()
self.test_to_string()
def test_to_string_loopback_message(self):
self.setUp_loopback_message()
self.test_to_string()
def test_to_string_loopback_reply(self):
self.setUp_loopback_reply()
self.test_to_string()
def test_to_string_link_trace_message(self):
self.setUp_link_trace_message()
self.test_to_string()
def test_to_string_link_trace_reply(self):
self.setUp_link_trace_reply()
self.test_to_string()
def test_len(self):
pass
def test_len_cc_message(self):
self.setUp_cc_message()
eq_(len(self.ins), 0 + len(self.message))
def test_len_loopback_message(self):
self.setUp_loopback_message()
eq_(len(self.ins), 0 + len(self.message))
def test_len_loopback_reply(self):
self.setUp_loopback_reply()
eq_(len(self.ins), 0 + len(self.message))
def test_len_link_trace_message(self):
self.setUp_link_trace_message()
eq_(len(self.ins), 0 + len(self.message))
def test_len_link_trace_reply(self):
self.setUp_link_trace_reply()
eq_(len(self.ins), 0 + len(self.message))
def test_default_args(self):
pass
def test_json(self):
jsondict = self.ins.to_jsondict()
ins = cfm.cfm.from_jsondict(jsondict['cfm'])
eq_(str(self.ins), str(ins))
def test_json_with_cc_message(self):
self.setUp_cc_message()
self.test_json()
def test_json_with_loopback_message(self):
self.setUp_loopback_message()
self.test_json()
def test_json_with_loopback_reply(self):
self.setUp_loopback_reply()
self.test_json()
def test_json_with_link_trace_message(self):
self.setUp_link_trace_message()
self.test_json()
def test_json_with_link_trace_reply(self):
self.setUp_link_trace_reply()
self.test_json()
class Test_cc_message(unittest.TestCase):
def setUp(self):
self.md_lv = 1
self.version = 1
self.opcode = cfm.CFM_CC_MESSAGE
self.rdi = 1
self.interval = 5
self.first_tlv_offset = cfm.cc_message._TLV_OFFSET
self.seq_num = 2
self.mep_id = 2
self.md_name_format = cfm.cc_message._MD_FMT_CHARACTER_STRING
self.md_name_length = 3
self.md_name = b"foo"
self.short_ma_name_format = 2
self.short_ma_name_length = 8
self.short_ma_name = b"hogehoge"
self.tlvs = [
]
self.end_tlv = 0
self.ins = cfm.cc_message(
self.md_lv,
self.version,
self.rdi,
self.interval,
self.seq_num,
self.mep_id,
self.md_name_format,
self.md_name_length,
self.md_name,
self.short_ma_name_format,
self.short_ma_name_length,
self.short_ma_name,
self.tlvs
)
self.form = '!4BIH2B3s2B8s33x12x4xB'
self.buf = struct.pack(
self.form,
(self.md_lv << 5) | self.version,
self.opcode,
(self.rdi << 7) | self.interval,
self.first_tlv_offset,
self.seq_num,
self.mep_id,
self.md_name_format,
self.md_name_length,
self.md_name,
self.short_ma_name_format,
self.short_ma_name_length,
self.short_ma_name,
self.end_tlv
)
def tearDown(self):
pass
def test_init(self):
eq_(self.md_lv, self.ins.md_lv)
eq_(self.version, self.ins.version)
eq_(self.rdi, self.ins.rdi)
eq_(self.interval, self.ins.interval)
eq_(self.seq_num, self.ins.seq_num)
eq_(self.mep_id, self.ins.mep_id)
eq_(self.md_name_format, self.ins.md_name_format)
eq_(self.md_name_length, self.ins.md_name_length)
eq_(self.md_name, self.ins.md_name)
eq_(self.short_ma_name_format, self.ins.short_ma_name_format)
eq_(self.short_ma_name_length, self.ins.short_ma_name_length)
eq_(self.short_ma_name, self.ins.short_ma_name)
eq_(self.tlvs, self.ins.tlvs)
def test_parser(self):
_res = cfm.cc_message.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.rdi, res.rdi)
eq_(self.interval, res.interval)
eq_(self.seq_num, res.seq_num)
eq_(self.mep_id, res.mep_id)
eq_(self.md_name_format, res.md_name_format)
eq_(self.md_name_length, res.md_name_length)
eq_(self.md_name, res.md_name)
eq_(self.short_ma_name_format, res.short_ma_name_format)
eq_(self.short_ma_name_length, res.short_ma_name_length)
eq_(self.short_ma_name, res.short_ma_name)
eq_(self.tlvs, res.tlvs)
def test_parser_with_no_maintenance_domain_name_present(self):
form = '!4BIH3B8s37x12x4xB'
buf = struct.pack(
form,
(self.md_lv << 5) | self.version,
self.opcode,
(self.rdi << 7) | self.interval,
self.first_tlv_offset,
self.seq_num,
self.mep_id,
cfm.cc_message._MD_FMT_NO_MD_NAME_PRESENT,
self.short_ma_name_format,
self.short_ma_name_length,
self.short_ma_name,
self.end_tlv
)
_res = cfm.cc_message.parser(buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.rdi, res.rdi)
eq_(self.interval, res.interval)
eq_(self.seq_num, res.seq_num)
eq_(self.mep_id, res.mep_id)
eq_(cfm.cc_message._MD_FMT_NO_MD_NAME_PRESENT, res.md_name_format)
eq_(self.short_ma_name_format, res.short_ma_name_format)
eq_(self.short_ma_name_length, res.short_ma_name_length)
eq_(self.short_ma_name, res.short_ma_name)
eq_(self.tlvs, res.tlvs)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.rdi, res[2] >> 7)
eq_(self.interval, res[2] & 0x07)
eq_(self.first_tlv_offset, res[3])
eq_(self.seq_num, res[4])
eq_(self.mep_id, res[5])
eq_(self.md_name_format, res[6])
eq_(self.md_name_length, res[7])
eq_(self.md_name, res[8])
eq_(self.short_ma_name_format, res[9])
eq_(self.short_ma_name_length, res[10])
eq_(self.short_ma_name, res[11])
eq_(self.end_tlv, res[12])
def test_serialize_with_md_name_length_zero(self):
ins = cfm.cc_message(
self.md_lv,
self.version,
self.rdi,
self.interval,
self.seq_num,
self.mep_id,
self.md_name_format,
0,
self.md_name,
self.short_ma_name_format,
0,
self.short_ma_name,
self.tlvs
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.rdi, res[2] >> 7)
eq_(self.interval, res[2] & 0x07)
eq_(self.first_tlv_offset, res[3])
eq_(self.seq_num, res[4])
eq_(self.mep_id, res[5])
eq_(self.md_name_format, res[6])
eq_(self.md_name_length, res[7])
eq_(self.md_name, res[8])
eq_(self.short_ma_name_format, res[9])
eq_(self.short_ma_name_length, res[10])
eq_(self.short_ma_name, res[11])
eq_(self.end_tlv, res[12])
def test_serialize_with_no_maintenance_domain_name_present(self):
form = '!4BIH3B8s37x12x4xB'
ins = cfm.cc_message(
self.md_lv,
self.version,
self.rdi,
self.interval,
self.seq_num,
self.mep_id,
cfm.cc_message._MD_FMT_NO_MD_NAME_PRESENT,
0,
self.md_name,
self.short_ma_name_format,
0,
self.short_ma_name,
self.tlvs
)
buf = ins.serialize()
res = struct.unpack_from(form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.rdi, res[2] >> 7)
eq_(self.interval, res[2] & 0x07)
eq_(self.first_tlv_offset, res[3])
eq_(self.seq_num, res[4])
eq_(self.mep_id, res[5])
eq_(cfm.cc_message._MD_FMT_NO_MD_NAME_PRESENT, res[6])
eq_(self.short_ma_name_format, res[7])
eq_(self.short_ma_name_length, res[8])
eq_(self.short_ma_name, res[9])
eq_(self.end_tlv, res[10])
def test_len(self):
# 75 octet (If tlv does not exist)
eq_(75, len(self.ins))
def test_default_args(self):
ins = cfm.cc_message()
buf = ins.serialize()
res = struct.unpack_from(cfm.cc_message._PACK_STR, six.binary_type(buf))
eq_(res[0] >> 5, 0)
eq_(res[0] & 0x1f, 0)
eq_(res[1], 1)
eq_(res[2] >> 7, 0)
eq_(res[2] & 0x07, 4)
eq_(res[3], 70)
eq_(res[4], 0)
eq_(res[5], 1)
eq_(res[6], 4)
class Test_loopback_message(unittest.TestCase):
def setUp(self):
self.md_lv = 1
self.version = 1
self.opcode = cfm.CFM_LOOPBACK_MESSAGE
self.flags = 0
self.first_tlv_offset = cfm.loopback_message._TLV_OFFSET
self.transaction_id = 12345
self.tlvs = [
]
self.end_tlv = 0
self.ins = cfm.loopback_message(
self.md_lv,
self.version,
self.transaction_id,
self.tlvs,
)
self.form = '!4BIB'
self.buf = struct.pack(
self.form,
(self.md_lv << 5) | self.version,
self.opcode,
self.flags,
self.first_tlv_offset,
self.transaction_id,
self.end_tlv
)
def tearDown(self):
pass
def test_init(self):
eq_(self.md_lv, self.ins.md_lv)
eq_(self.version, self.ins.version)
eq_(self.transaction_id, self.ins.transaction_id)
eq_(self.tlvs, self.ins.tlvs)
def test_parser(self):
_res = cfm.loopback_message.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.transaction_id, res.transaction_id)
eq_(self.tlvs, res.tlvs)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.flags, res[2])
eq_(self.first_tlv_offset, res[3])
eq_(self.transaction_id, res[4])
eq_(self.end_tlv, res[5])
def test_len(self):
# 9 octet (If tlv does not exist)
eq_(9, len(self.ins))
def test_default_args(self):
ins = cfm.loopback_message()
buf = ins.serialize()
res = struct.unpack_from(cfm.loopback_message._PACK_STR,
six.binary_type(buf))
eq_(res[0] >> 5, 0)
eq_(res[0] & 0x1f, 0)
eq_(res[1], 3)
eq_(res[2], 0)
eq_(res[3], 4)
eq_(res[4], 0)
class Test_loopback_reply(unittest.TestCase):
def setUp(self):
self.md_lv = 1
self.version = 1
self.opcode = cfm.CFM_LOOPBACK_REPLY
self.flags = 0
self.first_tlv_offset = cfm.loopback_reply._TLV_OFFSET
self.transaction_id = 12345
self.tlvs = [
]
self.end_tlv = 0
self.ins = cfm.loopback_reply(
self.md_lv,
self.version,
self.transaction_id,
self.tlvs,
)
self.form = '!4BIB'
self.buf = struct.pack(
self.form,
(self.md_lv << 5) | self.version,
self.opcode,
self.flags,
self.first_tlv_offset,
self.transaction_id,
self.end_tlv
)
def tearDown(self):
pass
def test_init(self):
eq_(self.md_lv, self.ins.md_lv)
eq_(self.version, self.ins.version)
eq_(self.transaction_id, self.ins.transaction_id)
eq_(self.tlvs, self.ins.tlvs)
def test_parser(self):
_res = cfm.loopback_reply.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.transaction_id, res.transaction_id)
eq_(self.tlvs, res.tlvs)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.flags, res[2])
eq_(self.first_tlv_offset, res[3])
eq_(self.transaction_id, res[4])
eq_(self.end_tlv, res[5])
def test_len(self):
# 9 octet (If tlv does not exist)
eq_(9, len(self.ins))
def test_default_args(self):
ins = cfm.loopback_reply()
buf = ins.serialize()
res = struct.unpack_from(cfm.loopback_reply._PACK_STR, six.binary_type(buf))
eq_(res[0] >> 5, 0)
eq_(res[0] & 0x1f, 0)
eq_(res[1], 2)
eq_(res[2], 0)
eq_(res[3], 4)
eq_(res[4], 0)
class Test_link_trace_message(unittest.TestCase):
def setUp(self):
self.md_lv = 1
self.version = 1
self.opcode = cfm.CFM_LINK_TRACE_MESSAGE
self.use_fdb_only = 1
self.first_tlv_offset = cfm.link_trace_message._TLV_OFFSET
self.transaction_id = 12345
self.ttl = 55
self.ltm_orig_addr = "00:11:22:44:55:66"
self.ltm_targ_addr = "ab:cd:ef:23:12:65"
self.tlvs = [
]
self.end_tlv = 0
self.ins = cfm.link_trace_message(
self.md_lv,
self.version,
self.use_fdb_only,
self.transaction_id,
self.ttl,
self.ltm_orig_addr,
self.ltm_targ_addr,
self.tlvs
)
self.form = '!4BIB6s6sB'
self.buf = struct.pack(
self.form,
(self.md_lv << 5) | self.version,
self.opcode,
self.use_fdb_only << 7,
self.first_tlv_offset,
self.transaction_id,
self.ttl,
addrconv.mac.text_to_bin(self.ltm_orig_addr),
addrconv.mac.text_to_bin(self.ltm_targ_addr),
self.end_tlv
)
def tearDown(self):
pass
def test_init(self):
eq_(self.md_lv, self.ins.md_lv)
eq_(self.version, self.ins.version)
eq_(self.use_fdb_only, self.ins.use_fdb_only)
eq_(self.transaction_id, self.ins.transaction_id)
eq_(self.ttl, self.ins.ttl)
eq_(self.ltm_orig_addr, self.ins.ltm_orig_addr)
eq_(self.ltm_targ_addr, self.ins.ltm_targ_addr)
eq_(self.tlvs, self.ins.tlvs)
def test_parser(self):
_res = cfm.link_trace_message.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.use_fdb_only, res.use_fdb_only)
eq_(self.transaction_id, res.transaction_id)
eq_(self.ttl, res.ttl)
eq_(self.ltm_orig_addr, res.ltm_orig_addr)
eq_(self.ltm_targ_addr, res.ltm_targ_addr)
eq_(self.tlvs, res.tlvs)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.use_fdb_only, res[2] >> 7)
eq_(self.first_tlv_offset, res[3])
eq_(self.transaction_id, res[4])
eq_(self.ttl, res[5])
eq_(addrconv.mac.text_to_bin(self.ltm_orig_addr), res[6])
eq_(addrconv.mac.text_to_bin(self.ltm_targ_addr), res[7])
eq_(self.end_tlv, res[8])
def test_len(self):
# 22 octet (If tlv does not exist)
eq_(22, len(self.ins))
def test_default_args(self):
ins = cfm.link_trace_message()
buf = ins.serialize()
res = struct.unpack_from(cfm.link_trace_message._PACK_STR, six.binary_type(buf))
eq_(res[0] >> 5, 0)
eq_(res[0] & 0x1f, 0)
eq_(res[1], 5)
eq_(res[2] >> 7, 1)
eq_(res[3], 17)
eq_(res[4], 0)
eq_(res[5], 64)
eq_(res[6], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
eq_(res[7], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_link_trace_reply(unittest.TestCase):
def setUp(self):
self.md_lv = 1
self.version = 1
self.opcode = cfm.CFM_LINK_TRACE_REPLY
self.use_fdb_only = 1
self.fwd_yes = 0
self.terminal_mep = 1
self.first_tlv_offset = cfm.link_trace_reply._TLV_OFFSET
self.transaction_id = 12345
self.ttl = 55
self.relay_action = 2
self.ltm_orig_addr = "00:11:22:aa:bb:cc"
self.ltm_targ_addr = "53:45:24:64:ac:ff"
self.tlvs = [
]
self.end_tlv = 0
self.ins = cfm.link_trace_reply(
self.md_lv,
self.version,
self.use_fdb_only,
self.fwd_yes,
self.terminal_mep,
self.transaction_id,
self.ttl,
self.relay_action,
self.tlvs,
)
self.form = '!4BIBBB'
self.buf = struct.pack(
self.form,
(self.md_lv << 5) | self.version,
self.opcode,
(self.use_fdb_only << 7) | (self.fwd_yes << 6) |
(self.terminal_mep << 5),
self.first_tlv_offset,
self.transaction_id,
self.ttl,
self.relay_action,
self.end_tlv
)
def tearDown(self):
pass
def test_init(self):
eq_(self.md_lv, self.ins.md_lv)
eq_(self.version, self.ins.version)
eq_(self.use_fdb_only, self.ins.use_fdb_only)
eq_(self.fwd_yes, self.ins.fwd_yes)
eq_(self.terminal_mep, self.ins.terminal_mep)
eq_(self.transaction_id, self.ins.transaction_id)
eq_(self.ttl, self.ins.ttl)
eq_(self.relay_action, self.ins.relay_action)
eq_(self.tlvs, self.ins.tlvs)
def test_parser(self):
_res = cfm.link_trace_reply.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.md_lv, res.md_lv)
eq_(self.version, res.version)
eq_(self.use_fdb_only, self.ins.use_fdb_only)
eq_(self.fwd_yes, self.ins.fwd_yes)
eq_(self.terminal_mep, self.ins.terminal_mep)
eq_(self.transaction_id, res.transaction_id)
eq_(self.ttl, res.ttl)
eq_(self.relay_action, res.relay_action)
eq_(self.tlvs, res.tlvs)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self.md_lv, res[0] >> 5)
eq_(self.version, res[0] & 0x1f)
eq_(self.opcode, res[1])
eq_(self.use_fdb_only, res[2] >> 7 & 0x01)
eq_(self.fwd_yes, res[2] >> 6 & 0x01)
eq_(self.terminal_mep, res[2] >> 5 & 0x01)
eq_(self.first_tlv_offset, res[3])
eq_(self.transaction_id, res[4])
eq_(self.ttl, res[5])
eq_(self.relay_action, res[6])
eq_(self.end_tlv, res[7])
def test_len(self):
# 11 octet (If tlv does not exist)
eq_(11, len(self.ins))
def test_default_args(self):
ins = cfm.link_trace_reply()
buf = ins.serialize()
res = struct.unpack_from(cfm.link_trace_reply._PACK_STR, six.binary_type(buf))
eq_(res[0] >> 5, 0)
eq_(res[0] & 0x1f, 0)
eq_(res[1], 4)
eq_(res[2] >> 7, 1)
eq_(res[2] >> 6 & 0x01, 0)
eq_(res[2] >> 5 & 0x01, 1)
eq_(res[3], 6)
eq_(res[4], 0)
eq_(res[5], 64)
eq_(res[6], 1)
class Test_sender_id_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_SENDER_ID_TLV
self.length = 10
self.chassis_id_length = 1
self.chassis_id_subtype = 3
self.chassis_id = b"\x0a"
self.ma_domain_length = 2
self.ma_domain = b"\x04\x05"
self.ma_length = 3
self.ma = b"\x01\x02\x03"
self.ins = cfm.sender_id_tlv(
self.length,
self.chassis_id_length,
self.chassis_id_subtype,
self.chassis_id,
self.ma_domain_length,
self.ma_domain,
self.ma_length,
self.ma,
)
self.form = '!BHBB1sB2sB3s'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.chassis_id_length,
self.chassis_id_subtype,
self.chassis_id,
self.ma_domain_length,
self.ma_domain,
self.ma_length,
self.ma
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.chassis_id_length, self.ins.chassis_id_length)
eq_(self.chassis_id_subtype, self.ins.chassis_id_subtype)
eq_(self.chassis_id, self.ins.chassis_id)
eq_(self.ma_domain_length, self.ins.ma_domain_length)
eq_(self.ma_domain, self.ins.ma_domain)
eq_(self.ma_length, self.ins.ma_length)
eq_(self.ma, self.ins.ma)
def test_parser(self):
_res = cfm.sender_id_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.chassis_id_length, res.chassis_id_length)
eq_(self.chassis_id_subtype, res.chassis_id_subtype)
eq_(self.chassis_id, res.chassis_id)
eq_(self.ma_domain_length, res.ma_domain_length)
eq_(self.ma_domain, res.ma_domain)
eq_(self.ma_length, res.ma_length)
eq_(self.ma, res.ma)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.chassis_id_length, res[2])
eq_(self.chassis_id_subtype, res[3])
eq_(self.chassis_id, res[4])
eq_(self.ma_domain_length, res[5])
eq_(self.ma_domain, res[6])
eq_(self.ma_length, res[7])
eq_(self.ma, res[8])
def test_serialize_semi_normal_ptn1(self):
ins = cfm.sender_id_tlv(
chassis_id_subtype=self.chassis_id_subtype,
chassis_id=self.chassis_id,
ma_domain=self.ma_domain,
)
buf = ins.serialize()
form = '!BHBB1sB2sB'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(7, res[1])
eq_(self.chassis_id_length, res[2])
eq_(self.chassis_id_subtype, res[3])
eq_(self.chassis_id, res[4])
eq_(self.ma_domain_length, res[5])
eq_(self.ma_domain, res[6])
eq_(0, res[7])
def test_serialize_semi_normal_ptn2(self):
ins = cfm.sender_id_tlv(
ma_domain=self.ma_domain,
ma=self.ma,
)
buf = ins.serialize()
form = '!BHBB2sB3s'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(8, res[1])
eq_(0, res[2])
eq_(self.ma_domain_length, res[3])
eq_(self.ma_domain, res[4])
eq_(self.ma_length, res[5])
eq_(self.ma, res[6])
def test_serialize_semi_normal_ptn3(self):
ins = cfm.sender_id_tlv(
chassis_id_subtype=self.chassis_id_subtype,
chassis_id=self.chassis_id,
)
buf = ins.serialize()
form = '!BHBB1sB'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(4, res[1])
eq_(self.chassis_id_length, res[2])
eq_(self.chassis_id_subtype, res[3])
eq_(self.chassis_id, res[4])
eq_(0, res[5])
def test_serialize_semi_normal_ptn4(self):
ins = cfm.sender_id_tlv(
ma_domain=self.ma_domain,
)
buf = ins.serialize()
form = '!BHBB2sB'
res = struct.unpack_from(form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(5, res[1])
eq_(0, res[2])
eq_(self.ma_domain_length, res[3])
eq_(self.ma_domain, res[4])
eq_(0, res[5])
def test_serialize_with_length_zero(self):
ins = cfm.sender_id_tlv(
0,
0,
self.chassis_id_subtype,
self.chassis_id,
0,
self.ma_domain,
0,
self.ma,
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.chassis_id_length, res[2])
eq_(self.chassis_id_subtype, res[3])
eq_(self.chassis_id, res[4])
eq_(self.ma_domain_length, res[5])
eq_(self.ma_domain, res[6])
eq_(self.ma_length, res[7])
eq_(self.ma, res[8])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 10, len(self.ins))
def test_default_args(self):
ins = cfm.sender_id_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.sender_id_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_SENDER_ID_TLV)
eq_(res[1], 1)
eq_(res[2], 0)
class Test_port_status_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_PORT_STATUS_TLV
self.length = 1
self.port_status = 1
self.ins = cfm.port_status_tlv(
self.length,
self.port_status
)
self.form = '!BHB'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.port_status
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.port_status, self.ins.port_status)
def test_parser(self):
_res = cfm.port_status_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.port_status, res.port_status)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.port_status, res[2])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 1, len(self.ins))
def test_default_args(self):
ins = cfm.port_status_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.port_status_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_PORT_STATUS_TLV)
eq_(res[1], 1)
eq_(res[2], 2)
class Test_data_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_DATA_TLV
self.length = 3
self.data_value = b"\x01\x02\x03"
self.ins = cfm.data_tlv(
self.length,
self.data_value
)
self.form = '!BH3s'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.data_value,
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.data_value, self.ins.data_value)
def test_parser(self):
_res = cfm.data_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.data_value, res.data_value)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.data_value, res[2])
def test_serialize_with_length_zero(self):
ins = cfm.data_tlv(
0,
self.data_value
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.data_value, res[2])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 3, len(self.ins))
def test_default_args(self):
ins = cfm.data_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.data_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_DATA_TLV)
eq_(res[1], 0)
class Test_interface_status_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_INTERFACE_STATUS_TLV
self.length = 1
self.interface_status = 4
self.ins = cfm.interface_status_tlv(
self.length,
self.interface_status
)
self.form = '!BHB'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.interface_status
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.interface_status, self.ins.interface_status)
def test_parser(self):
_res = cfm.interface_status_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.interface_status, res.interface_status)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.interface_status, res[2])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 1, len(self.ins))
def test_default_args(self):
ins = cfm.interface_status_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.interface_status_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_INTERFACE_STATUS_TLV)
eq_(res[1], 1)
eq_(res[2], 1)
class Test_ltm_egress_identifier_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_LTM_EGRESS_IDENTIFIER_TLV
self.length = 8
self.egress_id_ui = 7
self.egress_id_mac = "11:22:33:44:55:66"
self.ins = cfm.ltm_egress_identifier_tlv(
self.length,
self.egress_id_ui,
self.egress_id_mac
)
self.form = '!BHH6s'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.egress_id_ui,
addrconv.mac.text_to_bin(self.egress_id_mac)
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.egress_id_ui, self.ins.egress_id_ui)
eq_(self.egress_id_mac, self.ins.egress_id_mac)
def test_parser(self):
_res = cfm.ltm_egress_identifier_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.egress_id_ui, res.egress_id_ui)
eq_(self.egress_id_mac, res.egress_id_mac)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.egress_id_ui, res[2])
eq_(addrconv.mac.text_to_bin(self.egress_id_mac), res[3])
def test_serialize_with_length_zero(self):
ins = cfm.ltm_egress_identifier_tlv(
0,
self.egress_id_ui,
self.egress_id_mac
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.egress_id_ui, res[2])
eq_(addrconv.mac.text_to_bin(self.egress_id_mac), res[3])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 8, len(self.ins))
def test_default_args(self):
ins = cfm.ltm_egress_identifier_tlv()
buf = ins.serialize()
res = struct.unpack_from(
cfm.ltm_egress_identifier_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_LTM_EGRESS_IDENTIFIER_TLV)
eq_(res[1], 8)
eq_(res[2], 0)
eq_(res[3], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_ltr_egress_identifier_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_LTR_EGRESS_IDENTIFIER_TLV
self.length = 16
self.last_egress_id_ui = 7
self.last_egress_id_mac = "11:22:33:44:55:66"
self.next_egress_id_ui = 5
self.next_egress_id_mac = "33:11:33:aa:bb:cc"
self.ins = cfm.ltr_egress_identifier_tlv(self.length,
self.last_egress_id_ui,
self.last_egress_id_mac,
self.next_egress_id_ui,
self.next_egress_id_mac
)
self.form = '!BHH6sH6s'
self.buf = struct.pack(
self.form,
self._type,
self.length,
self.last_egress_id_ui,
addrconv.mac.text_to_bin(self.last_egress_id_mac),
self.next_egress_id_ui,
addrconv.mac.text_to_bin(self.next_egress_id_mac))
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.last_egress_id_ui, self.ins.last_egress_id_ui)
eq_(self.last_egress_id_mac, self.ins.last_egress_id_mac)
eq_(self.next_egress_id_ui, self.ins.next_egress_id_ui)
eq_(self.next_egress_id_mac, self.ins.next_egress_id_mac)
def test_parser(self):
_res = cfm.ltr_egress_identifier_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.last_egress_id_ui, res.last_egress_id_ui)
eq_(self.last_egress_id_mac, res.last_egress_id_mac)
eq_(self.next_egress_id_ui, res.next_egress_id_ui)
eq_(self.next_egress_id_mac, res.next_egress_id_mac)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.last_egress_id_ui, res[2])
eq_(addrconv.mac.text_to_bin(self.last_egress_id_mac), res[3])
eq_(self.next_egress_id_ui, res[4])
eq_(addrconv.mac.text_to_bin(self.next_egress_id_mac), res[5])
def test_serialize_with_length_zero(self):
ins = cfm.ltr_egress_identifier_tlv(0,
self.last_egress_id_ui,
self.last_egress_id_mac,
self.next_egress_id_ui,
self.next_egress_id_mac
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.last_egress_id_ui, res[2])
eq_(addrconv.mac.text_to_bin(self.last_egress_id_mac), res[3])
eq_(self.next_egress_id_ui, res[4])
eq_(addrconv.mac.text_to_bin(self.next_egress_id_mac), res[5])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 16, len(self.ins))
def test_default_args(self):
ins = cfm.ltr_egress_identifier_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.ltr_egress_identifier_tlv._PACK_STR,
six.binary_type(buf))
eq_(res[0], cfm.CFM_LTR_EGRESS_IDENTIFIER_TLV)
eq_(res[1], 16)
eq_(res[2], 0)
eq_(res[3], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
eq_(res[4], 0)
eq_(res[5], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_organization_specific_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_ORGANIZATION_SPECIFIC_TLV
self.length = 10
self.oui = b"\xff\x12\x34"
self.subtype = 3
self.value = b"\x01\x02\x0f\x0e\x0d\x0c"
self.ins = cfm.organization_specific_tlv(self.length,
self.oui,
self.subtype,
self.value
)
self.form = '!BH3sB6s'
self.buf = struct.pack(self.form,
self._type,
self.length,
self.oui,
self.subtype,
self.value
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.oui, self.ins.oui)
eq_(self.subtype, self.ins.subtype)
eq_(self.value, self.ins.value)
def test_parser(self):
_res = cfm.organization_specific_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.oui, res.oui)
eq_(self.subtype, res.subtype)
eq_(self.value, res.value)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.oui, res[2])
eq_(self.subtype, res[3])
eq_(self.value, res[4])
def test_serialize_with_zero(self):
ins = cfm.organization_specific_tlv(0,
self.oui,
self.subtype,
self.value
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.oui, res[2])
eq_(self.subtype, res[3])
eq_(self.value, res[4])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 10, len(self.ins))
def test_default_args(self):
ins = cfm.organization_specific_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.organization_specific_tlv._PACK_STR,
six.binary_type(buf))
eq_(res[0], cfm.CFM_ORGANIZATION_SPECIFIC_TLV)
eq_(res[1], 4)
eq_(res[2], b"\x00\x00\x00")
eq_(res[3], 0)
class Test_reply_ingress_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_REPLY_INGRESS_TLV
self.length = 12
self.action = 2
self.mac_address = 'aa:bb:cc:56:34:12'
self.port_id_length = 3
self.port_id_subtype = 2
self.port_id = b"\x01\x04\x09"
self.ins = cfm.reply_ingress_tlv(self.length, self.action,
self.mac_address,
self.port_id_length,
self.port_id_subtype,
self.port_id
)
self.form = '!BHB6sBB3s'
self.buf = struct.pack(self.form,
self._type,
self.length,
self.action,
addrconv.mac.text_to_bin(self.mac_address),
self.port_id_length,
self.port_id_subtype,
self.port_id
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.action, self.ins.action)
eq_(self.mac_address, self.ins.mac_address)
eq_(self.port_id_length, self.ins.port_id_length)
eq_(self.port_id_subtype, self.ins.port_id_subtype)
eq_(self.port_id, self.ins.port_id)
def test_parser(self):
_res = cfm.reply_ingress_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.action, res.action)
eq_(self.mac_address, res.mac_address)
eq_(self.port_id_length, res.port_id_length)
eq_(self.port_id_subtype, res.port_id_subtype)
eq_(self.port_id, res.port_id)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.action, res[2])
eq_(addrconv.mac.text_to_bin(self.mac_address), res[3])
eq_(self.port_id_length, res[4])
eq_(self.port_id_subtype, res[5])
eq_(self.port_id, res[6])
def test_serialize_with_zero(self):
ins = cfm.reply_ingress_tlv(0,
self.action,
self.mac_address,
0,
self.port_id_subtype,
self.port_id
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.action, res[2])
eq_(addrconv.mac.text_to_bin(self.mac_address), res[3])
eq_(self.port_id_length, res[4])
eq_(self.port_id_subtype, res[5])
eq_(self.port_id, res[6])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 12, len(self.ins))
def test_default_args(self):
ins = cfm.reply_ingress_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.reply_ingress_tlv._PACK_STR, six.binary_type(buf))
eq_(res[0], cfm.CFM_REPLY_INGRESS_TLV)
eq_(res[1], 7)
eq_(res[2], 1)
eq_(res[3], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
class Test_reply_egress_tlv(unittest.TestCase):
def setUp(self):
self._type = cfm.CFM_REPLY_EGRESS_TLV
self.length = 12
self.action = 2
self.mac_address = 'aa:bb:cc:56:34:12'
self.port_id_length = 3
self.port_id_subtype = 2
self.port_id = b"\x01\x04\x09"
self.ins = cfm.reply_egress_tlv(self.length,
self.action,
self.mac_address,
self.port_id_length,
self.port_id_subtype,
self.port_id
)
self.form = '!BHB6sBB3s'
self.buf = struct.pack(self.form,
self._type,
self.length,
self.action,
addrconv.mac.text_to_bin(self.mac_address),
self.port_id_length,
self.port_id_subtype,
self.port_id
)
def tearDown(self):
pass
def test_init(self):
eq_(self.length, self.ins.length)
eq_(self.action, self.ins.action)
eq_(self.mac_address, self.ins.mac_address)
eq_(self.port_id_length, self.ins.port_id_length)
eq_(self.port_id_subtype, self.ins.port_id_subtype)
eq_(self.port_id, self.ins.port_id)
def test_parser(self):
_res = cfm.reply_ingress_tlv.parser(self.buf)
if type(_res) is tuple:
res = _res[0]
else:
res = _res
eq_(self.length, res.length)
eq_(self.action, res.action)
eq_(self.mac_address, res.mac_address)
eq_(self.port_id_length, res.port_id_length)
eq_(self.port_id_subtype, res.port_id_subtype)
eq_(self.port_id, res.port_id)
def test_serialize(self):
buf = self.ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.action, res[2])
eq_(addrconv.mac.text_to_bin(self.mac_address), res[3])
eq_(self.port_id_length, res[4])
eq_(self.port_id_subtype, res[5])
eq_(self.port_id, res[6])
def test_serialize_with_zero(self):
ins = cfm.reply_egress_tlv(0,
self.action,
self.mac_address,
0,
self.port_id_subtype,
self.port_id
)
buf = ins.serialize()
res = struct.unpack_from(self.form, six.binary_type(buf))
eq_(self._type, res[0])
eq_(self.length, res[1])
eq_(self.action, res[2])
eq_(addrconv.mac.text_to_bin(self.mac_address), res[3])
eq_(self.port_id_length, res[4])
eq_(self.port_id_subtype, res[5])
eq_(self.port_id, res[6])
def test_len(self):
# tlv_length = type_len + length_len + value_len
eq_(1 + 2 + 12, len(self.ins))
def test_default_args(self):
ins = cfm.reply_egress_tlv()
buf = ins.serialize()
res = struct.unpack_from(cfm.reply_egress_tlv._PACK_STR,
six.binary_type(buf))
eq_(res[0], cfm.CFM_REPLY_EGRESS_TLV)
eq_(res[1], 7)
eq_(res[2], 1)
eq_(res[3], addrconv.mac.text_to_bin('00:00:00:00:00:00'))
|
ampyche/ampyche
|
refs/heads/master
|
amp-cgi-bin/getters/getplaylistsongsforpopup.py
|
1
|
#!/usr/bin/python3
import cgi
import json
import sqlite3 as lite
class GetPlaylistSongsForPopup():
def __init__(self):
self.pll_db_loc = "/usr/share/ampyche/db/ampychePlaylist.db"
def _make_a_list(self, genlist):
agen = []
for gen in genlist:
gen = ' '.join(gen)
agen.append(gen)
agen.sort()
return agen
def _format_input(self, a_list):
count = 0
meeco = ""
for a in a_list:
count += 1
strcount = str(count)
b = "radio" + strcount
c = "<input type='radio' id='%s'" %b
d = " name='%s'" %b
e = " value='off'><label for='%s'" %b
f = ">%s</label>" %a
g = c + d + e + f
meeco = meeco + g
return meeco
def _get_songs_for_playlist_popup(self, plname):
con = lite.connect(self.pll_db_loc)
cur = con.cursor()
cur.execute("SELECT song FROM playlistsongs WHERE playlistname=?", [plname])
plsongs = cur.fetchall()
con.close()
plpsongs = self._make_a_list(plsongs)
return plpsongs
pl = cgi.FieldStorage()
pllname = pl.getvalue("playlistname")
taz = GetPlaylistSongsForPopup()
pluto = taz._get_songs_for_playlist_popup(pllname)
print("Content-Type: application/json\n\n")
print(json.dumps(pluto, sort_keys=True, indent=4))
|
chenbaihu/grpc
|
refs/heads/master
|
src/python/src/grpc/framework/face/testing/coverage.py
|
41
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Governs coverage for the tests of the Face layer of RPC Framework."""
import abc
# These classes are only valid when inherited by unittest.TestCases.
# pylint: disable=invalid-name
class BlockingCoverage(object):
"""Specification of test coverage for blocking behaviors."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def testSuccessfulUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulStreamRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSequentialInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredStreamRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedStreamRequestStreamResponse(self):
raise NotImplementedError()
class FullCoverage(BlockingCoverage):
"""Specification of test coverage for non-blocking behaviors."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def testParallelInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledStreamRequestStreamResponse(self):
raise NotImplementedError()
|
vrv/tensorflow
|
refs/heads/master
|
tensorflow/tools/test/system_info.py
|
170
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import app
from tensorflow.tools.test import system_info_lib
def main(unused_args):
config = system_info_lib.gather_machine_configuration()
print(config)
if __name__ == "__main__":
app.run()
|
adaptivelogic/django-cms
|
refs/heads/refactor-viewperms
|
cms/plugins/flash/migrations/0004_table_rename.py
|
33
|
from south.db import db
from django.db import models
from cms.plugins.flash.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'flash.flash': {
'Meta': {'object_name': 'Flash', 'db_table': "'cmsplugin_flash'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['flash']
|
mdworks2016/work_development
|
refs/heads/master
|
Python/20_Third_Certification/venv/lib/python3.7/site-packages/pip/_internal/utils/virtualenv.py
|
3
|
from __future__ import absolute_import
import logging
import os
import re
import site
import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional
logger = logging.getLogger(__name__)
_INCLUDE_SYSTEM_SITE_PACKAGES_REGEX = re.compile(
r"include-system-site-packages\s*=\s*(?P<value>true|false)"
)
def _running_under_venv():
# type: () -> bool
"""Checks if sys.base_prefix and sys.prefix match.
This handles PEP 405 compliant virtual environments.
"""
return sys.prefix != getattr(sys, "base_prefix", sys.prefix)
def _running_under_regular_virtualenv():
# type: () -> bool
"""Checks if sys.real_prefix is set.
This handles virtual environments created with pypa's virtualenv.
"""
# pypa/virtualenv case
return hasattr(sys, 'real_prefix')
def running_under_virtualenv():
# type: () -> bool
"""Return True if we're running inside a virtualenv, False otherwise.
"""
return _running_under_venv() or _running_under_regular_virtualenv()
def _get_pyvenv_cfg_lines():
# type: () -> Optional[List[str]]
"""Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines
Returns None, if it could not read/access the file.
"""
pyvenv_cfg_file = os.path.join(sys.prefix, 'pyvenv.cfg')
try:
with open(pyvenv_cfg_file) as f:
return f.read().splitlines() # avoids trailing newlines
except IOError:
return None
def _no_global_under_venv():
# type: () -> bool
"""Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion
PEP 405 specifies that when system site-packages are not supposed to be
visible from a virtual environment, `pyvenv.cfg` must contain the following
line:
include-system-site-packages = false
Additionally, log a warning if accessing the file fails.
"""
cfg_lines = _get_pyvenv_cfg_lines()
if cfg_lines is None:
# We're not in a "sane" venv, so assume there is no system
# site-packages access (since that's PEP 405's default state).
logger.warning(
"Could not access 'pyvenv.cfg' despite a virtual environment "
"being active. Assuming global site-packages is not accessible "
"in this environment."
)
return True
for line in cfg_lines:
match = _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX.match(line)
if match is not None and match.group('value') == 'false':
return True
return False
def _no_global_under_regular_virtualenv():
# type: () -> bool
"""Check if "no-global-site-packages.txt" exists beside site.py
This mirrors logic in pypa/virtualenv for determining whether system
site-packages are visible in the virtual environment.
"""
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_site_packages_file = os.path.join(
site_mod_dir, 'no-global-site-packages.txt',
)
return os.path.exists(no_global_site_packages_file)
def virtualenv_no_global():
# type: () -> bool
"""Returns a boolean, whether running in venv with no system site-packages.
"""
# PEP 405 compliance needs to be checked first since virtualenv >=20 would
# return True for both checks, but is only able to use the PEP 405 config.
if _running_under_venv():
return _no_global_under_venv()
if _running_under_regular_virtualenv():
return _no_global_under_regular_virtualenv()
return False
|
balthamos/plover
|
refs/heads/master
|
plover/gui/lookup.py
|
7
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
import wx
from wx.lib.utils import AdjustRectToScreen
import sys
from plover.steno import normalize_steno
import plover.gui.util as util
TITLE = 'Plover: Lookup'
class LookupDialog(wx.Dialog):
BORDER = 3
TRANSLATION_TEXT = 'Text:'
other_instances = []
def __init__(self, parent, engine, config):
pos = (config.get_lookup_frame_x(),
config.get_lookup_frame_y())
wx.Dialog.__init__(self, parent, wx.ID_ANY, TITLE,
pos, wx.DefaultSize,
wx.DEFAULT_DIALOG_STYLE, wx.DialogNameStr)
self.config = config
# components
self.translation_text = wx.TextCtrl(self, style=wx.TE_PROCESS_ENTER)
cancel = wx.Button(self, id=wx.ID_CANCEL)
self.listbox = wx.ListBox(self, size=wx.Size(210, 200))
# layout
global_sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label=self.TRANSLATION_TEXT)
sizer.Add(label,
flag=wx.TOP | wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(self.translation_text,
flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
sizer.Add(cancel,
flag=wx.TOP | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
border=self.BORDER)
global_sizer.Add(sizer)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.listbox,
flag=wx.ALL | wx.FIXED_MINSIZE,
border=self.BORDER)
global_sizer.Add(sizer)
self.SetAutoLayout(True)
self.SetSizer(global_sizer)
global_sizer.Fit(self)
global_sizer.SetSizeHints(self)
self.Layout()
self.SetRect(AdjustRectToScreen(self.GetRect()))
# events
# The reason for the focus event here is to skip focus on tab traversal
# of the buttons. But it seems that on windows this prevents the button
# from being pressed. Leave this commented out until that problem is
# resolved.
#button.Bind(wx.EVT_SET_FOCUS, self.on_button_gained_focus)
cancel.Bind(wx.EVT_BUTTON, self.on_close)
#cancel.Bind(wx.EVT_SET_FOCUS, self.on_button_gained_focus)
self.translation_text.Bind(wx.EVT_TEXT, self.on_translation_change)
self.translation_text.Bind(wx.EVT_SET_FOCUS, self.on_translation_gained_focus)
self.translation_text.Bind(wx.EVT_KILL_FOCUS, self.on_translation_lost_focus)
self.translation_text.Bind(wx.EVT_TEXT_ENTER, self.on_close)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_MOVE, self.on_move)
self.engine = engine
# TODO: add functions on engine for state
self.previous_state = self.engine.translator.get_state()
# TODO: use state constructor?
self.engine.translator.clear_state()
self.translation_state = self.engine.translator.get_state()
self.engine.translator.set_state(self.previous_state)
self.last_window = util.GetForegroundWindow()
# Now that we saved the last window we'll close other instances. This
# may restore their original window but we've already saved ours so it's
# fine.
for instance in self.other_instances:
instance.Close()
del self.other_instances[:]
self.other_instances.append(self)
def on_close(self, event=None):
self.engine.translator.set_state(self.previous_state)
try:
util.SetForegroundWindow(self.last_window)
except:
pass
self.other_instances.remove(self)
self.Destroy()
def on_translation_change(self, event):
# TODO: normalize dict entries to make reverse lookup more reliable with
# whitespace.
translation = event.GetString().strip()
self.listbox.Clear()
if translation:
d = self.engine.get_dictionary()
strokes_list = d.reverse_lookup(translation)
if strokes_list:
entries = ('/'.join(x) for x in strokes_list)
for str in entries:
self.listbox.Append(str)
else:
self.listbox.Append('No entries')
self.GetSizer().Layout()
def on_translation_gained_focus(self, event):
self.engine.translator.set_state(self.translation_state)
def on_translation_lost_focus(self, event):
self.engine.translator.set_state(self.previous_state)
def on_button_gained_focus(self, event):
self.strokes_text.SetFocus()
def on_move(self, event):
pos = self.GetScreenPositionTuple()
self.config.set_lookup_frame_x(pos[0])
self.config.set_lookup_frame_y(pos[1])
event.Skip()
def _normalized_strokes(self):
strokes = self.strokes_text.GetValue().upper().replace('/', ' ').split()
strokes = normalize_steno('/'.join(strokes))
return strokes
def Show(parent, engine, config):
dialog_instance = LookupDialog(parent, engine, config)
dialog_instance.Show()
dialog_instance.Raise()
dialog_instance.translation_text.SetFocus()
util.SetTopApp()
|
mesheven/pyOCD
|
refs/heads/master
|
pyocd/flash/__init__.py
|
3
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
ashhher3/cvxpy
|
refs/heads/master
|
cvxpy/atoms/elementwise/log1p.py
|
11
|
"""
Copyright 2013 Steven Diamond, Eric Chu
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.atoms.elementwise.log import log
import scipy
import numpy as np
class log1p(log):
"""Elementwise :math:`\log (1 + x)`.
"""
def __init__(self, x):
super(log1p, self).__init__(x)
@log.numpy_numeric
def numeric(self, values):
"""Returns the elementwise natural log of x+1.
"""
return scipy.special.log1p(values[0])
def sign_from_args(self):
"""The same sign as the argument.
"""
return self.args[0]._dcp_attr.sign
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
x = arg_objs[0]
ones = lu.create_const(np.mat(np.ones(x.size)), x.size)
xp1 = lu.sum_expr([x, ones])
return log.graph_implementation([xp1], size, data)
|
MechanisM/musicdb
|
refs/heads/master
|
contrib/south/tests/circular_a/migrations/0001_first.py
|
174
|
from south.db import db
from django.db import models
class Migration:
depends_on = [('circular_b', '0001_first')]
def forwards(self):
pass
def backwards(self):
pass
|
simonpatrick/bite-project
|
refs/heads/master
|
server/crawlers/issuetracker_crawler_test.py
|
17
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests testing.chronos.bite.server.crawlers.issuetracker_crawler
TODO(alexto): Aadd more unit tests to exercise issuetracker_crawler handler.
"""
__author__ = 'alexto@google.com (Alexis O. Torres)'
from .pyglib import app
from .testing.pybase import googletest
from crawlers import issuetracker_crawler
class IssueTrackerCrawlerTest(googletest.TestCase):
"""Tests the IssueTrackerCrawler."""
def testImports(self):
# This is just a blank test that only ensures that all the imports are
# working correctly. If we get to this point, then the test
# passes.
pass
def main(unused_):
googletest.main()
if __name__ == '__main__':
app.run()
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_3/django/contrib/gis/gdal/tests/test_geom.py
|
154
|
from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \
OGRException, OGRIndexError, SpatialReference, CoordTransform, \
GDAL_VERSION
from django.utils import unittest
from django.contrib.gis.geometry.test_data import TestDataMixin
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n"
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print "\nEND - expecting IllegalArgumentException; safe to ignore.\n"
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
if GDAL_VERSION <= (1, 4, 1): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
import cPickle
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = cPickle.loads(cPickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
leonardowolf/bookfree
|
refs/heads/master
|
flask/lib/python2.7/site-packages/wheel/test/test_install.py
|
455
|
# Test wheel.
# The file has the following contents:
# hello.pyd
# hello/hello.py
# hello/__init__.py
# test-1.0.data/data/hello.dat
# test-1.0.data/headers/hello.dat
# test-1.0.data/scripts/hello.sh
# test-1.0.dist-info/WHEEL
# test-1.0.dist-info/METADATA
# test-1.0.dist-info/RECORD
# The root is PLATLIB
# So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS.
import wheel.tool
import wheel.pep425tags
from wheel.install import WheelFile
from tempfile import mkdtemp
import shutil
import os
THISDIR = os.path.dirname(__file__)
TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl')
def check(*path):
return os.path.exists(os.path.join(*path))
def test_install():
tempdir = mkdtemp()
def get_supported():
return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')]
whl = WheelFile(TESTWHEEL, context=get_supported)
assert whl.supports_current_python(get_supported)
try:
locs = {}
for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'):
locs[key] = os.path.join(tempdir, key)
os.mkdir(locs[key])
whl.install(overrides=locs)
assert len(os.listdir(locs['purelib'])) == 0
assert check(locs['platlib'], 'hello.pyd')
assert check(locs['platlib'], 'hello', 'hello.py')
assert check(locs['platlib'], 'hello', '__init__.py')
assert check(locs['data'], 'hello.dat')
assert check(locs['headers'], 'hello.dat')
assert check(locs['scripts'], 'hello.sh')
assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD')
finally:
shutil.rmtree(tempdir)
def test_install_tool():
"""Slightly improve coverage of wheel.install"""
wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
|
tjsavage/tmrwmedia
|
refs/heads/master
|
django/contrib/gis/admin/__init__.py
|
637
|
# Getting the normal admin routines, classes, and `site` instance.
from django.contrib.admin import autodiscover, site, AdminSite, ModelAdmin, StackedInline, TabularInline, HORIZONTAL, VERTICAL
# Geographic admin options classes and widgets.
from django.contrib.gis.admin.options import GeoModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
try:
from django.contrib.gis.admin.options import OSMGeoAdmin
HAS_OSM = True
except ImportError:
HAS_OSM = False
|
1tush/reviewboard
|
refs/heads/master
|
reviewboard/webapi/tests/mixins_comment.py
|
1
|
from __future__ import unicode_literals
from reviewboard.webapi.tests.mixins import test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class BaseCommentListMixin(object):
@test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
comment_text = '`This` is a **test**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['text'] = comment_text
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class BaseCommentItemMixin(object):
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
if comment.rich_text:
self.assertEqual(item_rsp['rich_text'], 'markdown')
else:
self.assertEqual(item_rsp['rich_text'], 'plain')
@test_template
def test_put_with_text_type_markdown_and_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text specified
"""
self._test_put_with_text_type_and_text('markdown')
@test_template
def test_put_with_text_type_plain_and_text(self):
"""Testing the PUT <URL> API with text_type=plain and text specified"""
self._test_put_with_text_type_and_text('plain')
@test_template
def test_put_with_text_type_markdown_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text not specified escapes text
"""
self._test_put_with_text_type_and_not_text(
'markdown',
'`Test` **diff** comment',
r'\`Test\` \*\*diff\*\* comment')
@test_template
def test_put_with_text_type_plain_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=plain and text not specified
"""
self._test_put_with_text_type_and_not_text(
'plain',
r'\`Test\` \*\*diff\*\* comment',
'`Test` **diff** comment')
@test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.rich_text = True
reply_comment.save()
if 'text_type' in data:
del data['text_type']
data.update({
'text': '`This` is **text**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], 'markdown')
self.assertEqual(comment_rsp['text'], '\\`This\\` is \\*\\*text\\*\\*')
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_text(self, text_type):
comment_text = '`Test` **diff** comment'
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data['text_type'] = text_type
data['text'] = comment_text
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_not_text(self, text_type, text,
expected_text):
self.assertIn(text_type, ('markdown', 'plain'))
rich_text = (text_type == 'markdown')
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.text = text
reply_comment.rich_text = not rich_text
reply_comment.save()
data['text_type'] = text_type
if 'text' in data:
del data['text']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], expected_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class CommentListMixin(ExtraDataListMixin, BaseCommentListMixin):
pass
class CommentItemMixin(ExtraDataItemMixin, BaseCommentItemMixin):
pass
class CommentReplyListMixin(BaseCommentListMixin):
pass
class CommentReplyItemMixin(BaseCommentItemMixin):
pass
|
IllusionRom-deprecated/android_platform_external_chromium_org_tools_gyp
|
refs/heads/master
|
test/subdirectory/gyptest-SYMROOT-default.py
|
399
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
|
parente/clique
|
refs/heads/master
|
Mixin/Stability.py
|
1
|
'''
Defines a class that is notified whenever an event occurs in the process with
which it is associated.
@author: Peter Parente <parente@cs.unc.edu>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
import Mixer, pyAA, System
from View.Task import Container
import win32com.client, weakref, pythoncom, time, ctypes
SPIN_DELAY = 0.01 #0.01
SAFE_WAIT = 0.1 #0.5
# create an instance of WSH Shell
ws_shell = win32com.client.Dispatch("WScript.Shell")
# get a reference to the user32 DLL
user32 = ctypes.windll.user32
class StabilityMixin(Mixer.ClassMixer):
'''
Defines prefix and suffix methods that check-in with a stability manager and
perturb it. Also defines an initialization method that establishes a reference
to the proper stability manager instance. This class is used to add stability
checks to pyAA.AccessibleObject.
@ivar stable: Stability manager for the process that created this object
@type stable: weakref.proxy for L{UIA.StabilityWatcher}
'''
def LeftClick(self):
'''
Performs a real left click on this AcccessibleObject. This method only
exists to support broken applications that do not respond properly to the
MSAA DoDefaultAction and Select methods and for some reason require a real
mouse click to activate.
'''
l = self.Location
# move to position
user32.SetCursorPos(l[0]+1, l[1]+l[3]-1)
# left down
user32.mouse_event(0x8000|0x02, 0, 0, 0, None)
# left up
user32.mouse_event(0x8000|0x04, 0, 0, 0, None)
def SendKeys(self, keys):
'''
Inject a key press into the input queue of the focused window.
@param keys: Keys to press
@type keys: string
'''
ws_shell.SendKeys(keys)
def CheckStability(self):
'''
Checks the last time an event occurred in a process and compares it against
a threshold. Blocks until the threshold has been passed.
'''
stab = self.GetStabilityWatcher()
if stab is None: return
while 1:
# sleep and pump messages
System.Sleep(SPIN_DELAY)
# check if the interface is stable
if (time.time() - stab.LastTime) > SAFE_WAIT:
break
stab.LastStable = time.time()
def Disturb(self, name):
'''Disturbs the last event time in the stability manager.'''
stab = self.GetStabilityWatcher()
if stab is None: return
stab.Disturb()
def GetStabilityWatcher(self):
'''
Locates the stability manager for this process by getting a reference to the
singleton Container.Manager object and asking it for the program associated
with the process ID of this object. The program is then queried for its
stability watcher.
@todo: stability should be a weakref; strong until process problem fixed
'''
try:
return self.stable
except AttributeError:
pass
# get the stability watcher for this object
try:
# get the process ID
pid, tid = self.ProcessID
except pyAA.Error:
return None
pm = Container.ProgramManager()
self.stable = pm.GetStabilityWatcher(pid)
return self.stable
def CheckWrapper(self, name, prefix):
'''
Builds a method wrapper that checks with the stability manager before
executing the original method.
@param name: Original method name
@type name: string
@param prefix: Prefix used to rename the original method
@type prefix: string
@return: Method wrapping the original method
@rtype: callable
'''
def Prototype(self, *args, **kwargs):
self.CheckStability()
return getattr(self, prefix+name)(*args, **kwargs)
return Prototype
def DisturbWrapper(self, name, prefix):
'''
Builds a method wrapper that disturbs the stability manager after executing
the original method.
@param name: Original method name
@type name: string
@param prefix: Prefix used to rename the original method
@type prefix: string
@return: Method wrapping the original method
@rtype: callable
'''
def Prototype(self, *args, **kwargs):
r = getattr(self, prefix+name)(*args, **kwargs)
self.Disturb(name)
return r
return Prototype
def InitializeWrapper(self, name, prefix):
'''
Builds a method wrapper that checks establihes a reference to the stability
manager after executing the class constructor.
@param name: Original method name
@type name: string
@param prefix: Prefix used to rename the original method
@type prefix: string
@return: Method wrapping the original method
@rtype: callable
'''
def Prototype(self, *args, **kwargs):
getattr(self, prefix+name)(*args, **kwargs)
self.GetStabilityWatcher()
return Prototype
|
fast-project/fast-lib
|
refs/heads/master
|
vendor/mosquitto-1.4.12/test/broker/01-connect-invalid-id-0.py
|
10
|
#!/usr/bin/env python
# Test whether a CONNECT with a zero length client id results in the correct CONNACK packet.
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=2)
cmd = ['../../src/mosquitto', '-p', '1888']
broker = mosq_test.start_broker(filename=os.path.basename(__file__), cmd=cmd)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.close()
rc = 0
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
|
jrowan/zulip
|
refs/heads/master
|
zerver/webhooks/ifttt/tests.py
|
43
|
# -*- coding: utf-8 -*-
from zerver.lib.test_classes import WebhookTestCase
class IFTTTHookTests(WebhookTestCase):
STREAM_NAME = 'ifttt'
URL_TEMPLATE = "/api/v1/external/ifttt?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'ifttt'
def test_ifttt_when_subject_and_body_are_correct(self):
# type: () -> None
expected_subject = u"Email sent from email@email.com"
expected_message = u"Email subject: Subject"
self.send_and_test_stream_message('correct_subject_and_body', expected_subject, expected_message)
|
javierTerry/odoo
|
refs/heads/8.0
|
openerp/addons/test_new_api/__openerp__.py
|
204
|
# -*- coding: utf-8 -*-
{
'name': 'Test New API',
'version': '1.0',
'category': 'Tests',
'description': """A module to test the new API.""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'installable': True,
'auto_install': False,
'data': [
'ir.model.access.csv',
'views.xml',
'demo_data.xml',
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Kalyzee/edx-platform
|
refs/heads/master
|
lms/djangoapps/course_wiki/plugins/markdownedx/__init__.py
|
275
|
# Make sure wiki_plugin.py gets run.
from course_wiki.plugins.markdownedx.wiki_plugin import ExtendMarkdownPlugin
|
iansf/pstar
|
refs/heads/master
|
pstar/pstar.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`pstar` class implementations.
Import like this:
```python
from pstar import *
```
or like this:
```python
from pstar import defaultpdict, frozenpset, pdict, plist, pset, ptuple, pstar
```
"""
import collections
from collections import defaultdict
import inspect
from multiprocessing.dummy import Pool
import operator
import sys
import types
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
from qj import qj
# pylint: disable=line-too-long,invalid-name,g-explicit-length-test,broad-except,g-long-lambda
def _compatible_metaclass(meta, *bases):
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('_temporary_class', None, {})
class _SyntaxSugar(type):
def __getitem__(cls, key):
return cls(key)
def __add__(cls, other):
return _SyntaxSugar.__mul__(cls, other, depth=1)
def __radd__(cls, other):
return _SyntaxSugar.__rmul__(cls, other, depth=1)
def __sub__(cls, other):
return other - pstar + cls
def __rsub__(cls, other):
return _SyntaxSugar.__rtruediv__(cls, other, depth=1)
def __mul__(cls, other, depth=-1):
keys = plist(pstar.cls_map().keys())
cls_map = (
keys.zip(keys).uproot().pdict() # Map all classes to themselves
).update({cls.__mro__[1]: cls}) # Map this pstar class's superclass to this pstar class
assert len(keys) == len(cls_map) # We better not have dropped any classes
return pstar(other, cls_map, depth)
def __rmul__(cls, other, depth=-1):
return _SyntaxSugar.__mul__(cls, other, depth)
def __truediv__(cls, other):
return other / pstar * cls
def __rtruediv__(cls, other, depth=-1):
keys = plist(pstar.cls_map().keys())
cls_map = (
keys.zip(keys).uproot().pdict() # Map all classes to themselves
).update({cls: cls.__mro__[1]}) # Map this pstar class to its superclass
assert len(keys) == len(cls_map) # We better not have dropped any classes
return pstar(other, cls_map, depth)
if sys.version_info[0] < 3:
__div__, __rdiv__ = __truediv__, __rtruediv__
KeyValue = collections.namedtuple('KeyValue', 'key value')
################################################################################
################################################################################
################################################################################
# pdict class
################################################################################
################################################################################
################################################################################
class pdict(_compatible_metaclass(_SyntaxSugar, dict)):
"""`dict` subclass where everything is automatically a property.
Examples:
Use with dot notation or subscript notation:
```python
pd = pdict()
pd.foo = 1
assert (pd['foo'] == pd.foo == 1)
```
`list` subscripts also work and return a `plist` of the corresponding keys:
```python
pd = pdict(foo=1, bar=2)
assert (pd[['foo', 'bar']].aslist() == [1, 2])
```
Setting with a `list` subscript also works, using a single element or a matching
`list` for the values:
```python
pd = pdict()
pd[['foo', 'bar']] = 1
assert (pd[['foo', 'bar']].aslist() == [1, 1])
pd[['foo', 'bar']] = [1, 2]
assert (pd[['foo', 'bar']].aslist() == [1, 2])
```
`update` returns `self`, rather than `None`, to support chaining:
```python
pd = pdict(foo=1, bar=2)
pd.update(bar=3).baz = 4
assert (pd.bar == 3)
assert ('baz' in pd.keys())
assert (pd.baz == 4)
```
**Conversion:**
You can convert from `pdict` to `dict` and back using arithmetic operations on
the `pdict` `class` itself, for convenience:
```python
d1 = {'foo': 1, 'bar': 2}
pd = pdict * d1
assert (type(d1) == dict)
assert (type(pd) == pdict)
assert (pd == d1)
d2 = pd / pdict
assert (type(d2) == dict)
assert (d2 == d1)
```
See `pstar.pstar` for more details on conversion.
"""
def __init__(self, *a, **kw):
"""Initialize `pdict`.
Examples:
```python
pd1 = pdict(foo=1, bar=2.0, baz='three')
pd2 = pdict({'foo': 1, 'bar': 2.0, 'baz': 'three'})
assert (pd1 == pd2)
```
Args:
*a: Positional arguments passed through to `dict()`.
**kw: Keyword arguments passed through to `dict()`.
Returns:
`None`. `pdict` is initialized.
"""
dict.__init__(self, *a, **kw)
self.__dict__ = self
def __getitem__(self, key):
"""Subscript operation. Keys can be any normal `dict` keys or `list`s of such keys.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (pd['foo'] == pd.foo == 1)
assert (pd[['foo', 'bar', 'baz']].aslist() == [1, 2.0, 'three'])
```
When indexing with a `list`, the returned `plist` is rooted at a `plist` of
`KeyValue` `namedtuple`s, making it easy to recover the keys that gave the values, and
allows the `plist` to be turned back into a corresponding `pdict`:
```python
assert (pd[['foo', 'baz']].root().aslist() ==
[('foo', 1), ('baz', 'three')])
assert (pd[['foo', 'baz']].pdict() ==
dict(foo=1, baz='three'))
```
Args:
key: Any `hash`able object, or a `list` of `hash`able objects.
Returns:
Either the value held at `key`, or a `plist` of values held at each key in the `list`
of keys, when called with a `list` of keys.
"""
if isinstance(key, list):
return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))
else:
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
"""Subscript assignment operation. Keys and values can be scalars or `list`s.
Examples:
`pdict` assignment works normally for any `hash`able `key`:
```python
pd = pdict()
pd['foo'] = 1
assert (pd.foo == pd['foo'] == 1)
```
`pdict` assignment can also work with a `list` of `hash`able `key`s:
```python
pd[['bar', 'baz']] = plist[2.0, 'three']
assert (pd.bar == pd['bar'] == 2.0)
assert (pd.baz == pd['baz'] == 'three')
```
Args:
key: Any `hash`able object, or a `list` of `hash`able objects.
value: Any value, or a `plist` of values that matches the shape of `key`, if it
is a `list`.
Returns:
`self`, to allow chaining with direct calls to `pdict.__setitem__`.
"""
if isinstance(key, list):
value = _ensure_len(len(key), value)
for k, v in zip(key, value):
dict.__setitem__(self, k, v)
else:
dict.__setitem__(self, key, value)
return self
def __str__(self):
"""Readable string representation of `self`.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (str(pd) ==
"{'bar': 2.0, 'baz': 'three', 'foo': 1}")
```
Returns:
If the keys in `self` are sortable, returns a string with key/value pairs
sorted by key. Otherwise, returns a normal `dict.__str__`
representation.
"""
try:
delim = ', ' if len(self) < 8 else ',\n '
s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())
return '{' + s + '}'
except Exception:
return dict.__repr__(self)
__repr__ = __str__
def update(self, *a, **kw):
"""Update `self`. **Returns `self` to allow chaining.**
Examples:
```python
pd = pdict()
assert (pd.update(foo=1, bar=2.0).foo == 1)
assert (pd.bar == 2.0)
assert (pd.update({'baz': 'three'}).baz == 'three')
```
Args:
*a: Positional args passed to `dict.update`.
**kw: Keyword args pass to `dict.update`.
Returns:
`self` to allow chaining.
"""
dict.update(self, *a, **kw)
return self
def copy(self):
"""Copy `self` to new `defaultpdict`. Performs a shallow copy.
Examples:
```python
pd1 = pdict(foo=1, bar=2.0, baz='three')
pd2 = pd1.copy()
assert (pd2 == pd1)
assert (pd2 is not pd1)
```
Returns:
A `pdict` that is a shallow copy of `self`.
"""
return pdict(dict.copy(self))
def peys(self):
"""Get `self.keys()` as a sorted `plist`.
In the common case of a `pdict` with sortable keys, it is often convenient
to rely on the sort-order of the keys for a variety of operations that would
otherwise require explicit looping.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (pd.peys().aslist() == ['bar', 'baz', 'foo'])
pd_str = pdict()
pd_str[pd.peys()] = pd.palues().pstr() # Converts the values to strings.
assert (pd_str ==
dict(foo='1', bar='2.0', baz='three'))
```
Returns:
`plist` of keys in sorted order.
"""
return plist(sorted(self.keys()))
def palues(self):
"""Equivalent to `self.values()`, but returns a `plist` with values sorted as in `self.peys()`.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (pd.palues().aslist() ==
[2.0, 'three', 1])
```
The `plist` returned is rooted at a corresponding `plist` of `KeyValue` `namedtuple`s,
allowing easy recovery of an equivalent `pdict`, possibly after modifications to the
values:
```python
pd_str = (pd.palues().pstr() + ' foo').pdict()
assert (pd_str ==
dict(foo='1 foo', bar='2.0 foo', baz='three foo'))
```
Returns:
`plist` of values from `self`, in the same order given by `self.peys()`.
The `root()` of the `plist` is `KeyValue` `namedtuple`s from `self`.
"""
return self[self.peys()]
def pitems(self):
"""Equivalent to `self.items()`, but returns a `plist` with items sorted as in `self.peys()`.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (pd.pitems().aslist() ==
[('bar', 2.0), ('baz', 'three'), ('foo', 1)])
assert (pd.pitems().key.aslist() ==
pd.peys().aslist())
assert (pd.pitems().value.aslist() ==
pd.palues().aslist())
```
In the example above, note that the items are `KeyValue` `namedtuple`s,
so the first element can be accessed with `.key` and the second with `.value`.
Returns:
`plist` of items from `self`, in the same order given by `self.peys()`.
"""
return self.palues().root()
def qj(self, *a, **kw):
"""Call the `qj` logging function with `self` as the value to be logged. All other arguments are passed through to `qj`.
`qj` is a debug logging function. Calling `pdict.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
pd.qj('pd').update(baz=3).qj('pd now')
assert (pd.baz == 3)
# Logs:
# qj: <calling_module> calling_function: pd <2910>: {'bar': 2.0, 'baz': 'three', 'foo': 1}
# qj: <calling_module> calling_function: pd now <2910>: {'bar': 2.0, 'baz': 3, 'foo': 1}
```
Returns:
`self`, as processed by the arguments supplied to `qj`.
"""
depth = kw.pop('_depth', 0) + 2
return qj(self, _depth=depth, *a, **kw)
def rekey(self, map_or_fn=None, inplace=False, **kw):
"""Change the keys of `self` or a copy while keeping the same values.
Convenience method for renaming keys in a `pdict`. Passing a `dict` mapping
old keys to new keys allows easy selective renaming, as any key not in the
`dict` will be unchanged. Passing a `callable` requires you to return a unique
value for every key in `self`.
Examples:
```python
pd = pdict(foo=1, bar=2.0, baz='three')
assert (pd.rekey(foo='floo') ==
dict(floo=1, bar=2.0, baz='three'))
assert (pd.foo == 1) # pd is unmodified by default.
pd.rekey(dict(bar='car'), True)
assert ('bar' not in pd)
assert (pd.car == 2.0)
pd.rekey(lambda k: 'far' if k == 'car' else k, True)
assert ('car' not in pd)
assert (pd.far == 2.0)
```
Args:
map_or_fn: `dict` mapping current keys to new keys, or `callable` taking a single
argument (the key) and returning a new key, or `None`, in which case
`**kw` should map keys to new keys.
inplace: Boolean (default: `False`). If `True`, updates the keys of `self`. If
`False`, returns a new `pdict`.
**kw: Additional keys to rekey. Convenience for existing keys that are valid
identifiers.
Returns:
`self` if `inplace` evaluates to `True`, otherwise a new `pdict`. The keys will
be changed, but the values will remain the same.
Raises:
ValueError: If `map_or_fn` isn't a `dict` or a `callable` or `None`.
ValueError: If `map_or_fn` fails to generate a unique key for every key in `self`.
"""
if not inplace:
return self.copy().rekey(map_or_fn, inplace=True, **kw)
if map_or_fn is None:
map_or_fn = kw
if isinstance(map_or_fn, dict):
if map_or_fn is not kw:
map_or_fn.update(kw)
func = lambda k: map_or_fn.get(k, k)
else:
func = map_or_fn
if not callable(func):
raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'
% (str(type(map_or_fn)), str(map_or_fn)))
keys = self.peys()
new_keys = keys.apply(func).puniq()
if len(keys) != len(new_keys):
raise ValueError('rekey map must return the same number of unique keys as the original pdict. '
'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))
vals = self.palues().uproot()
self.clear()
self[new_keys] = vals
return self
################################################################################
################################################################################
################################################################################
# defaultpdict class
################################################################################
################################################################################
################################################################################
class defaultpdict(_compatible_metaclass(_SyntaxSugar, defaultdict)):
"""`defaultdict` subclass where everything is automatically a property.
Examples:
Use with dot notation or subscript notation:
```python
pd = defaultpdict()
pd.foo = 1
assert (pd['foo'] == pd.foo == 1)
```
Set the desired default constructor as normal to avoid having to construct
individual values:
```python
pd = defaultpdict(int)
assert (pd.foo == 0)
```
`list` subscripts also work and return a `plist` of the corresponding keys:
```python
pd = defaultpdict(foo=1, bar=2)
assert (pd[['foo', 'bar']].aslist() == [1, 2])
```
Setting with a `list` subscript also works, using a single element or a matching
`list` for the values:
```python
pd = defaultpdict()
pd[['foo', 'bar']] = 1
assert (pd[['foo', 'bar']].aslist() == [1, 1])
pd[['foo', 'bar']] = [1, 2]
assert (pd[['foo', 'bar']].aslist() == [1, 2])
```
`update` returns `self`, rather than `None`, to support chaining:
```python
pd = defaultpdict(foo=1, bar=2)
pd.update(bar=3).baz = 4
assert (pd.bar == 3)
assert ('baz' in pd.keys())
```
Nested `defaultpdict`s make nice lightweight objects:
```python
pd = defaultpdict(lambda: defaultpdict(list))
pd.foo = 1
pd.stats.bar.append(2)
assert (pd['foo'] == 1)
assert (pd.stats.bar == [2])
```
**Conversion:**
You can convert from `defaultpdict` to `defaultdict` and back using arithmetic operations on
the `defaultpdict` `class` itself, for convenience:
```python
d1 = defaultdict(int, {'foo': 1, 'bar': 2})
pd = defaultpdict * d1
assert (type(d1) == defaultdict)
assert (type(pd) == defaultpdict)
assert (pd == d1)
d2 = pd / defaultpdict
assert (type(d2) == defaultdict)
assert (d2 == d1)
```
See `pstar.pstar` for more details on conversion.
"""
def __init__(self, *a, **kw):
"""Initialize `defaultpdict`.
Examples:
```python
pd = defaultpdict(int)
assert (pd.foo == 0)
pd.bar += 10
assert (pd.bar == 10)
pd = defaultpdict(lambda: defaultpdict(list))
pd.foo.bar = 20
assert (pd == dict(foo=dict(bar=20)))
pd.stats.bar.append(2)
assert (pd.stats.bar == [2])
```
Args:
*a: Positional arguments passed through to `defaultdict()`.
**kw: Keyword arguments pass through to `defaultdict()`.
Returns:
`None`. `defaultpdict` is initialized.
"""
defaultdict.__init__(self, *a, **kw)
def __getattr__(self, name):
"""Override `getattr`. If `name` starts with '_', attempts to find that attribute on `self`. Otherwise, looks for a field of that name in `self`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd.foo == 1)
assert (pd.__module__.startswith('pstar'))
```
Args:
name: A field name or property name on `self`.
Returns:
Value at `self.<name>` or `self[name]`.
"""
if name.startswith('_'):
return defaultdict.__getattribute__(self, name)
return self[name]
def __setattr__(self, name, value):
"""Attribute assignment operation. Forwards to subscript assignment.
Permits `pdict`-style field assignment.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
pd.floo = 4.0
assert (pd.floo == pd['floo'] == 4.0)
```
Args:
name: Any `hash`able value or list of `hash`able values, as in `__setitem__`,
but generally just a valid identifier string provided by the compiler.
value: Any value, or `plist` of values of the same length as the corresponding list in
`name`.
Returns:
`self` to allow chaining through direct calls to `defaultpdict.__setattr__(...)`.
"""
self[name] = value
return self
def __getitem__(self, key):
"""Subscript operation. Keys can be any normal `dict` keys or `list`s of such keys.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd['foo'] == pd.foo == 1)
assert (pd[['foo', 'bar', 'baz']].aslist() == [1, 2.0, 'three'])
```
When indexing with a `list`, the returned `plist` is rooted at a `plist` of
`KeyValue` `namedtuple`s, making it easy to recover the keys that gave the values, and
allows the `plist` to be turned back into a corresponding `pdict`:
```python
assert (pd[['foo', 'baz']].root().aslist() ==
[('foo', 1), ('baz', 'three')])
assert (pd[['foo', 'baz']].pdict() ==
dict(foo=1, baz='three'))
```
Args:
key: Any `hash`able object, or a `list` of `hash`able objects.
Returns:
Either the value held at `key`, or a `plist` of values held at each key in the `list`
of keys, when called with a `list` of keys.
"""
if isinstance(key, list):
return plist([self[k] for k in key], root=plist([KeyValue(k, self[k]) for k in key]))
else:
return defaultdict.__getitem__(self, key)
def __setitem__(self, key, value):
"""Subscript assignment operation. Keys and values can be scalars or `list`s.
Examples:
`defaultpdict` assignment works normally for any `hash`able `key`:
```python
pd = defaultpdict(int)
pd['foo'] = 1
assert (pd.foo == pd['foo'] == 1)
```
`defaultpdict` assignment can also work with a `list` of `hash`able `key`s:
```python
pd[['bar', 'baz']] = plist[2.0, 'three']
assert (pd.bar == pd['bar'] == 2.0)
assert (pd.baz == pd['baz'] == 'three')
```
Args:
key: Any `hash`able object, or a `list` of `hash`able objects.
value: Any value, or a `plist` of values that matches the shape of `key`, if it
is a `list`.
Returns:
`self`, to allow chaining with direct calls to `defaultpdict.__setitem__(...)`.
"""
if isinstance(key, list):
value = _ensure_len(len(key), value)
for k, v in zip(key, value):
defaultdict.__setitem__(self, k, v)
else:
defaultdict.__setitem__(self, key, value)
return self
def __str__(self):
"""Readable string representation of `self`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (str(pd) ==
"{'bar': 2.0, 'baz': 'three', 'foo': 1}")
```
Returns:
If the keys in `self` are sortable, returns a string with key/value pairs
sorted by key. Otherwise, returns a normal `defaultdict.__str__`
representation.
"""
try:
delim = ', ' if len(self) < 8 else ',\n '
s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())
return '{' + s + '}'
except Exception:
return defaultdict.__repr__(self)
__repr__ = __str__
def update(self, *a, **kw):
"""Update `self`. **Returns `self` to allow chaining.**
Examples:
```python
pd = defaultpdict(int)
assert (pd.update(foo=1, bar=2.0).foo == 1)
assert (pd.bar == 2.0)
assert (pd.update({'baz': 'three'}).baz == 'three')
```
Args:
*a: Positional args passed to `defaultdict.update`.
**kw: Keyword args passed to `defaultdict.update`.
Returns:
`self` to allow chaining.
"""
defaultdict.update(self, *a, **kw)
return self
def copy(self):
"""Copy `self` to new `defaultpdict`. Performs a shallow copy.
Examples:
```python
pd1 = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
pd2 = pd1.copy()
assert (pd2 == pd1)
assert (pd2 is not pd1)
```
Returns:
A `defaultpdict` that is a shallow copy of `self`.
"""
return defaultdict.copy(self)
def peys(self):
"""Get `self.keys()` as a sorted `plist`.
In the common case of a `defaultpdict` with sortable keys, it is often convenient
to rely on the sort-order of the keys for a variety of operations that would
otherwise require explicit looping.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd.peys().aslist() == ['bar', 'baz', 'foo'])
pd_str = pdict()
pd_str[pd.peys()] = pd.palues().pstr() # Converts the values to strings.
assert (pd_str ==
dict(foo='1', bar='2.0', baz='three'))
```
Returns:
`plist` of keys in sorted order.
"""
return plist(sorted(self.keys()))
def palues(self):
"""Equivalent to `self.values()`, but returns a `plist` with values sorted as in `self.peys()`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd.palues().aslist() ==
[2.0, 'three', 1])
```
The `plist` returned is rooted at a corresponding `plist` of `KeyValue` `namedtuple`s,
allowing easy recovery of an equivalent `pdict`, possibly after modifications to the
values:
```python
pd_str = (pd.palues().pstr() + ' foo').pdict()
assert (pd_str ==
dict(foo='1 foo', bar='2.0 foo', baz='three foo'))
```
Returns:
`plist` of values from `self`, in the same order given by `self.peys()`.
The `root()` of the `plist` is `KeyValue` `namedtuple`s from `self`.
"""
return self[self.peys()]
def pitems(self):
"""Equivalent to `self.items()`, but returns a `plist` with items sorted as in `self.peys()`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd.pitems().aslist() ==
[('bar', 2.0), ('baz', 'three'), ('foo', 1)])
assert (pd.pitems().key.aslist() ==
pd.peys().aslist())
assert (pd.pitems().value.aslist() ==
pd.palues().aslist())
```
In the example above, note that the items are `KeyValue` `namedtuple`s,
so the first element can be accessed with `.key` and the second with `.value`.
Returns:
`plist` of items from `self`, in the same order given by `self.peys()`.
"""
return self.palues().root()
def qj(self, *a, **kw):
"""Call the `qj` logging function with `self` as the value to be logged. All other arguments are passed through to `qj`.
`qj` is a debug logging function. Calling `defaultpdict.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
pd.qj('pd').update(baz=3).qj('pd now')
assert (pd.baz == 3)
# Logs:
# qj: <calling_module> calling_function: pd <2910>: {'bar': 2.0, 'baz': 'three', 'foo': 1}
# qj: <calling_module> calling_function: pd now <2910>: {'bar': 2.0, 'baz': 3, 'foo': 1}
```
Returns:
`self`, as processed by the arguments supplied to `qj`.
"""
depth = kw.pop('_depth', 0) + 2
return qj(self, _depth=depth, *a, **kw)
def rekey(self, map_or_fn=None, inplace=False, **kw):
"""Change the keys of `self` or a copy while keeping the same values.
Convenience method for renaming keys in a `defaultpdict`. Passing a `dict` mapping
old keys to new keys allows easy selective renaming, as any key not in the
`dict` will be unchanged. Passing a `callable` requires you to return a unique
value for every key in `self`.
Examples:
```python
pd = defaultpdict(int).update(foo=1, bar=2.0, baz='three')
assert (pd.rekey(foo='floo') ==
dict(floo=1, bar=2.0, baz='three'))
assert (pd.foo == 1) # pd is unmodified by default.
pd.rekey(dict(bar='car'), True)
assert ('bar' not in pd)
assert (pd.car == 2.0)
pd.rekey(lambda k: 'far' if k == 'car' else k, True)
assert ('car' not in pd)
assert (pd.far == 2.0)
```
Args:
map_or_fn: `dict` mapping current keys to new keys, or `callable` taking a single
argument (the key) and returning a new key, or `None`, in which case
`**kw` should map keys to new keys.
inplace: Boolean (default: `False`). If `True`, updates the keys of `self`. If
`False`, returns a new `defaultpdict`.
**kw: Additional keys to rekey. Convenience for existing keys that are valid
identifiers.
Returns:
`self` if `inplace` evaluates to `True`, otherwise a new `defaultpdict`. The keys will
be changed, but the values will remain the same.
Raises:
ValueError: If `map_or_fn` isn't a `dict` or a `callable` or `None`.
ValueError: If `map_or_fn` fails to generate a unique key for every key in `self`.
"""
if not inplace:
return self.copy().rekey(map_or_fn, inplace=True, **kw)
if map_or_fn is None:
map_or_fn = kw
if isinstance(map_or_fn, dict):
if map_or_fn is not kw:
map_or_fn.update(kw)
func = lambda k: map_or_fn.get(k, k)
else:
func = map_or_fn
if not callable(func):
raise ValueError('`map_or_fn` must be a dict, a callable, or None. Received %s: %s'
% (str(type(map_or_fn)), str(map_or_fn)))
keys = self.peys()
new_keys = keys.apply(func).puniq()
if len(keys) != len(new_keys):
raise ValueError('rekey map must return the same number of unique keys as the original pdict. '
'Only found %d of %d expected keys.' % (len(new_keys), len(keys)))
vals = self.palues().uproot()
self.clear()
self[new_keys] = vals
return self
################################################################################
################################################################################
################################################################################
# frozenpset class
################################################################################
################################################################################
################################################################################
class frozenpset(_compatible_metaclass(_SyntaxSugar, frozenset)):
"""Placeholder `frozenset` subclass. Mostly unimplemented.
You can construct `frozenpset`s in the normal manners for `frozenset`s:
```python
ps = frozenpset([1, 2.0, 'three'])
ps = frozenpset({1, 2.0, 'three'})
```
`frozenpset` also supports a convenience constructor from a `list` literal:
```python
ps = frozenpset[1, 2.0, 'three']
```
**Conversion:**
You can convert from `frozenpset` to `frozenset` and back using arithmetic
operations on the `frozenpset` `class` itself, for convenience:
```python
s1 = frozenset([1, 2.0, 'three'])
ps = frozenpset * s1
assert (type(s1) == frozenset)
assert (type(ps) == frozenpset)
assert (ps == s1)
s2 = ps / frozenpset
assert (type(s2) == frozenset)
assert (s2 == s1)
```
See `pstar.pstar` for more details on conversion.
"""
def qj(self, *a, **kw):
"""Call the `qj` logging function with `self` as the value to be logged. All other arguments are passed through to `qj`.
`qj` is a debug logging function. Calling `frozenpset.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
ps = frozenpset([1, 2.0, 'three'])
ps.qj('ps')
# Logs:
# qj: <calling_module> calling_function: ps <2910>: frozenpset({1, 2.0, 'three'})
```
Returns:
`self`, as processed by the arguments supplied to `qj`.
"""
depth = kw.pop('_depth', 0) + 2
return qj(self, _depth=depth, *a, **kw)
################################################################################
################################################################################
################################################################################
# pset class
################################################################################
################################################################################
################################################################################
class pset(_compatible_metaclass(_SyntaxSugar, set)):
"""Placeholder `set` subclass. Mostly unimplemented.
You can construct `pset`s in the normal manners for `set`s:
```python
ps = pset([1, 2.0, 'three'])
ps = pset({1, 2.0, 'three'})
```
`pset` also supports a convenience constructor from a `list` literal:
```python
ps = pset[1, 2.0, 'three']
```
**Conversion:**
You can convert from `pset` to `set` and back using arithmetic
operations on the `pset` `class` itself, for convenience:
```python
s1 = set([1, 2.0, 'three'])
ps = pset * s1
assert (type(s1) == set)
assert (type(ps) == pset)
assert (ps == s1)
s2 = ps / pset
assert (type(s2) == set)
assert (s2 == s1)
```
See `pstar.pstar` for more details on conversion.
"""
def qj(self, *a, **kw):
"""Call the `qj` logging function with `self` as the value to be logged. All other arguments are passed through to `qj`.
`qj` is a debug logging function. Calling `pset.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
ps = pset([1, 2.0, 'three'])
ps.qj('ps')
# Logs:
# qj: <calling_module> calling_function: ps <2910>: pset({1, 2.0, 'three'})
```
Returns:
`self`, as processed by the arguments supplied to `qj`.
"""
depth = kw.pop('_depth', 0) + 2
return qj(self, _depth=depth, *a, **kw)
################################################################################
################################################################################
################################################################################
# ptuple class
################################################################################
################################################################################
################################################################################
class ptuple(_compatible_metaclass(_SyntaxSugar, tuple)):
"""Placeholder `tuple` subclass. Mostly unimplemented.
You can construct `ptuple`s in the normal manner for `tuple`s:
```python
pt = ptuple((1, 2.0, 'three'))
```
`ptuple` also supports a convenience constructor from a `list` literal:
```python
pt = ptuple[1, 2.0, 'three']
```
**Conversion:**
You can convert from `ptuple` to `tuple` and back using arithmetic
operations on the `ptuple` `class` itself, for convenience:
```python
t1 = tuple([1, 2.0, 'three'])
pt = ptuple * t1
assert (type(t1) == tuple)
assert (type(pt) == ptuple)
assert (pt == t1)
t2 = pt / ptuple
assert (type(t2) == tuple)
assert (t2 == t1)
```
See `pstar.pstar` for more details on conversion.
"""
def qj(self, *a, **kw):
"""Call the `qj` logging function with `self` as the value to be logged. All other arguments are passed through to `qj`.
`qj` is a debug logging function. Calling `ptuple.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
pt = ptuple([1, 2.0, 'three'])
pt.qj('pt')
# Logs:
# qj: <calling_module> calling_function: pt <2910>: (1, 2.0, 'three')
```
Returns:
`self`, as processed by the arguments supplied to `qj`.
"""
depth = kw.pop('_depth', 0) + 2
return qj(self, _depth=depth, *a, **kw)
################################################################################
################################################################################
################################################################################
# plist method builder functions.
################################################################################
################################################################################
################################################################################
def _build_comparator(op, merge_op, shortcut, return_root_if_empty_other):
"""Builds a plist comparator operation.
Args:
op: Comparison operation, such as operator.__eq__.
merge_op: Set-like operation for merging sets of intermediate results, such
as operator.__and__.
shortcut: Function to call to shortcut comparison if `self is other`.
return_root_if_empty_other: Boolean for how to handle `other` being an empty
list. If `True`, `self.__root__` is returned. If
`False`, an empty plist is returned.
Returns:
comparator: The comparison function.
"""
def comparator(self, other, return_inds=False):
"""`plist` comparison operator. **Comparisons filter plists.**
**IMPORTANT:** `plist` comparisons all filter the `plist` and return a new
`plist`, rather than a truth value.
`comparator` is not callable directly from `plist`. It implements the various
python comparison operations: `==`, `<`, `>`, etc. The comparison operators
can be called directly with their corresponding 'magic' functions,
`plist.__eq__`, `plist.__lt__`, `plist.__gt__`, etc., but are generally just
called implicitly.
Examples:
`plist` comparators can filter on leaf values:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
zero_bars = foos.bar == 0
assert (zero_bars.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}])
nonzero_bars = foos.bar != 0
assert (nonzero_bars.aslist() ==
[{'foo': 1, 'bar': 1}])
```
They can also filter on other plists so long as the structures are
compatible:
```python
assert ((foos == zero_bars).aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}])
assert ((foos.foo > foos.bar).aslist() ==
[{'foo': 2, 'bar': 0}])
```
The same is true when comparing against lists with compatible structure:
```python
assert ((foos.foo == [0, 1, 3]).aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1}])
```
This all generalizes naturally to plists that have been grouped:
```python
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
nonzero_by_bar_foo = by_bar_foo.bar > 0
assert (nonzero_by_bar_foo.aslist() ==
[[[],
[]],
[[{'bar': 1, 'foo': 1}]]])
zero_by_bar_foo = by_bar_foo.foo != nonzero_by_bar_foo.foo
assert (zero_by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[]]])
assert ((by_bar_foo.foo == [[[0], [3]], [[1]]]).aslist() ==
[[[{'foo': 0, 'bar': 0}],
[]],
[[{'foo': 1, 'bar': 1}]]])
```
Lists with incompatible structure are compared to `self` one-at-a-time,
resulting in set-like filtering where the two sets are merged with an 'or':
```python
assert ((foos.foo == [0, 1, 3, 4]).aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1}])
assert ((by_bar_foo.foo == [0, 1, 3, 4]).aslist() ==
[[[{'foo': 0, 'bar': 0}],
[]],
[[{'foo': 1, 'bar': 1}]]])
```
When comparing against an empty list, `==` always returns an empty list, but
all other comparisons return `self`:
```python
assert ((foos.foo == []).aslist() == [])
assert ((foos.foo < []).aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert ((by_bar_foo == nonzero_by_bar_foo).aslist() ==
[[[],
[]],
[[{'foo': 1, 'bar': 1}]]])
assert ((by_bar_foo.foo > nonzero_by_bar_foo.foo).aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[]]])
```
Note that `plist.nonempty` can be used to remove empty internal `plist`s
after filtering a grouped `plist`:
```python
assert ((by_bar_foo == nonzero_by_bar_foo).nonempty(-1).aslist() ==
[[[{'foo': 1, 'bar': 1}]]])
```
Args:
other: Object to compare against.
return_inds: Optional bool. When `True`, causes the comparison to return
the plist indices of the matching items. When `False`
(the default), causes the comparison to return a plist of the
matching values.
Returns:
A new plist, filtered from `self` and `other` according to the operation
provided to `_build_comparator`, if `return_inds` is `False`. Otherwise,
returns the corresponding indices into self.
"""
if self is other:
return shortcut(self, return_inds)
inds = []
if isinstance(other, list):
if len(self) == len(other):
for i, (x, o) in enumerate(zip(self, other)):
if isinstance(x, plist):
child_inds = comparator(x, o, return_inds=True)
inds.append(child_inds)
elif op(x, o):
inds.append(i)
elif len(other) > 0:
inds = comparator(self, other[0], return_inds=True)
for o in other[1:]:
inds = _merge_indices(inds, comparator(self, o, return_inds=True), merge_op)
else:
# len(other) == 0
if return_inds:
inds = self.lfill(pepth=-1) if return_root_if_empty_other else []
else:
return self.__root__ if return_root_if_empty_other else plist()
else:
for i, x in enumerate(self):
if isinstance(x, plist):
child_inds = comparator(x, other, return_inds=True)
inds.append(child_inds)
elif op(x, other):
inds.append(i)
if return_inds:
return inds
return self.__root__[inds]
return comparator
def _build_logical_op(op):
"""Builds a `plist` logical operation.
Args:
op: Logical operation, such as operator.__and__.
Returns:
logical_op: The logical operation function.
"""
def logical_op(self, other):
"""`plist` logical operation. **Logical operations perform set operations on `plist`s.**
**IMPORTANT:** `plist` logical operations between two `plist`s perform `set` operations
on the two `plist`s. Logical operations between a `plist` and any other type attempts
to perform that operation on the values in the `plist` and `other` itself.
`logical_op` is not callable directly from `plist`. It implements the various
python logical operations: `&`, `|`, `^`, etc. The logical operators
can be called directly with their corresponding 'magic' functions,
`plist.__and__`, `plist.__or__`, `plist.__xor__`, etc., but are generally just
called implicitly.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
assert (((foos.bar == 0) & (foos.baz == 3)).aslist() ==
[{'baz': 3, 'foo': 0, 'bar': 0}])
assert (((foos.bar == 0) | (foos.baz == 3)).aslist() ==
[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}])
assert (((foos.bar == 0) ^ (foos.baz == 3)).aslist() ==
[{'bar': 0, 'baz': 5, 'foo': 2}])
by_bar = foos.bar.groupby()
assert (((by_bar.bar == 0) & (by_bar.bar == 1)).aslist() ==
[[], []])
assert (((by_bar.bar == 0) & (by_bar.bar <= 1)).aslist() ==
[[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])
assert (((by_bar.baz == 3) | (by_bar.baz == 6)).aslist() ==
[[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])
assert (((by_bar.baz == 6) | (by_bar.baz <= 4)).aslist() ==
[[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])
assert (((by_bar.baz == 3) ^ (by_bar.baz == 6)).aslist() ==
[[{'bar': 0, 'baz': 3, 'foo': 0}], [{'bar': 1, 'baz': 6, 'foo': 1}]])
assert (((by_bar.baz == 6) ^ (by_bar.bar <= 4)).aslist() ==
[[{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 5, 'foo': 2}], []])
```
Logical operations can be applied element-wise if `other` is not a `plist`:
```python
assert ((foos.baz & 1).aslist() ==
[1, 0, 1])
assert ((by_bar.baz | 1).aslist() ==
[[3, 5], [7]])
assert ((1 ^ by_bar.baz).aslist() ==
[[2, 4], [7]])
```
Args:
other: Object to perform the logical operation with.
Returns:
New `plist`, merging `self` and `other` according to the operation provided
to `_build_logical_op`.
"""
if isinstance(other, plist):
if len(self) == len(other):
try:
return plist([op(x, o) for x, o in zip(self, other)])
except Exception:
pass
self_flat = self.ungroup(-1)
other_flat = other.ungroup(-1)
ids = op(set([id(x) for x in self_flat]),
set([id(x) for x in other_flat]))
if op is operator.__and__ or op is operator.__iand__:
return plist([x for x in self_flat if id(x) in ids]) # Don't pass root -- we are uprooting
else:
return plist(
[ids.remove(id(x)) or x for x in self_flat if id(x) in ids] +
[ids.remove(id(x)) or x for x in other_flat if id(x) in ids]
) # Don't pass root -- we are uprooting
else:
return plist([op(x, other) for x in self], root=self.__root__)
return logical_op
def _build_binary_op(op):
"""Builds a plist binary operation.
Args:
op: Binary operation, such as operator.__add__.
Returns:
binary_op: The binary operation function.
"""
def binary_op(self, other):
"""`plist` binary operation; applied element-wise to `self`.
`binary_op` is not callable directly from `plist`. It implements the various
python binary operations: `+`, `-`, `*`, etc. The binary operators
can be called directly with their corresponding 'magic' functions,
`plist.__add__`, `plist.__sub__`, `plist.__mul__`, etc., but are generally just
called implicitly.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
assert ((foos.foo + foos.baz).aslist() ==
[3, 7, 7])
assert ((2 * (foos.foo + 7)).aslist() ==
[14, 16, 18])
by_bar = foos.bar.groupby()
assert ((by_bar.foo + by_bar.baz).aslist() ==
[[3, 7], [7]])
assert ((2 * (by_bar.foo + 7)).aslist() ==
[[14, 18], [16]])
```
The only binary operation that doesn't work as expected is string interpolation:
`'foo: %d' % foos.foo`. This can't work as expected because python handles that
operation in a special manner. However, `+` works on `plist`s of strings, as
does `plist.apply('{}'.format)`:
```python
assert (('foo: ' + foos.foo.pstr() + ' bar: ' + foos.bar.pstr()).aslist() ==
['foo: 0 bar: 0', 'foo: 1 bar: 1', 'foo: 2 bar: 0'])
assert (foos.foo.apply('foo: {} bar: {}'.format, foos.bar).aslist() ==
['foo: 0 bar: 0', 'foo: 1 bar: 1', 'foo: 2 bar: 0'])
assert (('foo: ' + by_bar.foo.pstr() + ' bar: ' + by_bar.bar.pstr()).aslist() ==
[['foo: 0 bar: 0', 'foo: 2 bar: 0'], ['foo: 1 bar: 1']])
assert (by_bar.foo.apply('foo: {} bar: {}'.format, by_bar.bar).aslist() ==
['foo: [0, 2] bar: [0, 0]', 'foo: [1] bar: [1]'])
assert (by_bar.foo.apply_('foo: {} bar: {}'.format, by_bar.bar).aslist() ==
[['foo: 0 bar: 0', 'foo: 2 bar: 0'], ['foo: 1 bar: 1']])
```
Note the difference between the final two examples using `apply()` vs. `apply_()` on
grouped `plist`s.
Args:
other: Object to perform the binary operation with.
Returns:
A new plist, where each element of `self` had the operation passed to
`_build_binary_op` applied to it and `other`, or the corresponding element
of `other`, if the lengths of `self` and `other` match.
"""
if (other is pstar
or other is defaultpdict
or other is frozenpset
or other is pdict
or other is plist
or other is pset
or other is ptuple
):
if sys.version_info[0] < 3:
name = op.__name__.replace('__', '__r', 1)
else:
name = '__r%s__' % op.__name__
return getattr(other.__class__, name)(other, self)
if isinstance(other, plist):
if len(self) == len(other):
return plist([op(x, o) for x, o in zip(self, other)], root=self.__root__)
return plist([op(x, other) for x in self], root=self.__root__)
return binary_op
def _build_binary_rop(op):
"""Builds a plist binary operation where the plist is only the right side.
Args:
op: Left-side binary operation, such as operator.__add__.
Returns:
binary_rop: The corresponding right-side binary operation function.
"""
def binary_rop(self, other):
if (other is pstar
or other is defaultpdict
or other is frozenpset
or other is pdict
or other is plist
or other is pset
or other is ptuple
):
# The plist.__r<op>__ methods should never be hit during conversion for valid conversions.
raise NotImplementedError('Operation %s is not supported as a pstar conversion method.' % op.__name__)
return plist([op(other, x) for x in self], root=self.__root__)
return binary_rop
def _build_binary_ops(op, iop):
"""Builds all three variants of plist binary operation: op, rop, and iop.
Args:
op: Binary operation, such as operator.__add__.
iop: Binary assignment operation, such as operator.__iadd__.
Returns:
The plist binary operation and its right-side and assignment variants.
"""
return _build_binary_op(op), _build_binary_rop(op), _build_binary_op(iop)
def _build_unary_op(op):
"""Builds a plist unary operation.
Args:
op: Unary operation, such as operator.__neg__.
Returns:
unary_op: The unary operation function.
"""
def unary_op(self):
"""`plist` unary operation; applied element-wise to `self`.
`unary_op` is not callable directly from `plist`. It implements the various
python unary operations: `-`, `~`, `abs`, etc. The unary operators
can be called directly with their corresponding 'magic' functions,
`plist.__neg__`, `plist.__invert__`, `plist.__abs__`, etc., but are generally just
called implicitly.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
assert ((-foos.foo).aslist() ==
[0, -1, -2])
assert ((~foos.foo).aslist() ==
[-1, -2, -3])
by_bar = foos.bar.groupby()
assert ((-by_bar.foo).aslist() ==
[[0, -2], [-1]])
assert ((~by_bar.foo).aslist() ==
[[-1, -3], [-2]])
```
Returns:
A new `plist`, where each element of `self` had the operation passed to
`_build_unary_op` applied to it.
"""
return plist([op(x) for x in self], root=self.__root__)
return unary_op
################################################################################
################################################################################
################################################################################
# plist helper functions and constants.
################################################################################
################################################################################
################################################################################
if sys.version_info[0] < 3:
STRING_TYPES = types.StringTypes
PLIST_CALL_ATTR_CALL_PEPTH_DELTA = 1
else:
STRING_TYPES = str
PLIST_CALL_ATTR_CALL_PEPTH_DELTA = 2
NONCALLABLE_ATTRS = ['__class__', '__dict__', '__doc__', '__module__']
def _call_attr(_pobj, _pname, _pattr, *_pargs, **_pkwargs):
"""Recursive function to call the desired attribute.
Args:
_pobj: Object that the attribute will be called on. May not be a plist
if `pepth != 0`.
_pname: Name of the attribute being called.
_pattr: Bound attribute found by a `__getattribute__` or `getattr` call.
*_pargs: Arguments passed directly to the attribute.
**_pkwargs: Keyword arguments passed directly to the attribute, except
`pepth`, `call_pepth`, and `psplit`, which are removed.
`pepth` tracks the desired depth in the plist of the
attribute. When `pepth == 0`, the attribute is called or
returned (for non-callable attributes).
`call_pepth` tracks the actual depth the call occurs at. It is
only passed on to a known list of plist methods that need it
in order to correctly handle stack frames between the original
caller and the final call.
`psplit` causes calling to happen in parallel, with the same
semantics as in `plist.apply`.
Returns:
Either the value of the attribute, if the attribute is a known
non-callable attribute, or the value of calling the attribute with the
provided arguments.
"""
pepth = _pkwargs.pop('pepth', 0)
call_pepth = _pkwargs.pop('call_pepth', 0)
psplit = _pkwargs.pop('psplit', 0)
if pepth != 0:
if not isinstance(_pobj, plist):
if _pname in NONCALLABLE_ATTRS:
return _pattr
return _pattr(*_pargs, **_pkwargs)
pargs = [_ensure_len(len(_pobj), a, strict=True) for a in _pargs]
pkwargs = {
k: _ensure_len(len(_pobj), v, strict=True) for k, v in _pkwargs.items()
}
try:
attrs = [list.__getattribute__(x, _pname) if isinstance(x, list) else getattr(x, _pname) for x in _pobj]
if psplit > 0 and isinstance(_pobj, plist):
pool = _get_thread_pool(psplit, len(_pobj))
call_args = [pdict(x=x, i=i) for i, x in enumerate(_pobj)]
map_func = lambda ca: _call_attr(ca.x,
_pname, attrs[ca.i],
pepth=pepth - 1,
call_pepth=0, # It's not possible to get the proper stack frame when spinning off threads, so don't bother tracking it.
psplit=psplit,
*[a[ca.i] for a in pargs],
**{k: v[ca.i] for k, v in pkwargs.items()})
pl = plist(pool.map(map_func, call_args, chunksize=_get_thread_chunksize(psplit, len(_pobj))), root=_pobj.__root__)
pool.close()
return pl
return plist([_call_attr(x,
_pname,
attrs[i],
pepth=pepth - 1,
call_pepth=call_pepth + PLIST_CALL_ATTR_CALL_PEPTH_DELTA,
psplit=psplit,
*[a[i] for a in pargs],
**{k: v[i] for k, v in pkwargs.items()})
for i, x in enumerate(_pobj)],
root=_pobj.__root__)
except Exception as e:
if pepth > 0:
raise e
if isinstance(_pobj, plist) and _pname in ['qj', 'me']:
result = _pattr(call_pepth=call_pepth, *_pargs, **_pkwargs)
elif psplit > 0 and isinstance(_pobj, plist) and _pname == 'apply':
result = _pattr(psplit=psplit, *_pargs, **_pkwargs)
elif _pname == 'qj':
depth = _pkwargs.pop('_depth', 0) + call_pepth + PLIST_CALL_ATTR_CALL_PEPTH_DELTA + (sys.version_info[0] < 3)
result = _pattr(_depth=depth, *_pargs, **_pkwargs)
elif _pname in NONCALLABLE_ATTRS:
return _pattr
else:
result = _pattr(*_pargs, **_pkwargs)
if result is None and isinstance(_pobj, plist):
return _pobj
return result
def _ensure_len(length, x, strict=False):
"""Convert `x` to a `list` of length `length` if necessary and return it.
This function is the core of `plist` 'deepcasting', which is conceptually
similar to 'broadcasting' in `numpy` and `tensorflow`, but is intentionally much
more permissive. Deepcasting relies on the fact that most functions will
crash if they receive a `list` when they were expecting a scalar value. Allowing
the called function to crash, rather than crashing in `plist`, allows `plist` to
be optimistic, and avoids `plist` having to guess how a user-supplied function
is meant to be called.
Args:
length: int.
x: object to convert.
strict: Boolean. If `True`, only `plist`s are returned without being wrapped.
`list`s and other iterables of the correct length are still returned
wrapped in a new `list` of the correct length. Defaults to `False`,
which means that `list`s and other iterables of the correct length
are returned unchanged.
Returns:
`x` if `x` is a non-string sequence and `len(x) == length`.
Otherwise a list with `length` copies of `x`.
"""
if ((strict
and isinstance(x, plist) and len(x) == length)
or (not strict
and not isinstance(x, type)
and not isinstance(x, STRING_TYPES)
and not isinstance(x, tuple)
and hasattr(x, '__len__')
and len(x) == length)):
return x
return [x for _ in range(length)]
def _merge_indices(left, right, op):
"""Merge index arrays using set operation `op`.
This is the core of the filtering that happens in the plist comparators.
Args:
left: List of integer indices.
right: List of integer indices.
op: Set operation to merge the two lists. E.g., operator.__and__.
Returns:
List containing merged indices.
"""
try:
left_empty_or_ints = len(left) == 0 or plist(left).all(isinstance, int)
right_empty_or_ints = len(right) == 0 or plist(right).all(isinstance, int)
if left_empty_or_ints and right_empty_or_ints:
sl = set(left)
sr = set(right)
return sorted(list(op(sl, sr)))
except Exception:
pass
try:
return [_merge_indices(left[i], right[i], op) for i in range(max(len(left), len(right)))]
except Exception:
pass
if isinstance(left, list) and isinstance(right, list):
return left.extend(right) or left
return [left, right]
def _successor(v):
"""Returns a successor/predecessor object starting at value v."""
s = pdict(v=v, p=lambda: s.update(v=s.v - 1).v, s=lambda: s.update(v=s.v + 1).v)
return s
MAX_THREADS = 25
def _get_thread_pool(psplit, obj_len):
return Pool(psplit if psplit > 1 else min(MAX_THREADS, obj_len))
def _get_thread_chunksize(psplit, obj_len):
return max(1, obj_len // psplit) if psplit > 1 else 1
################################################################################
################################################################################
################################################################################
# plist class
################################################################################
################################################################################
################################################################################
class plist(_compatible_metaclass(_SyntaxSugar, list)):
"""`list` subclass for powerful, concise data processing.
**Homogeneous access:**
`plist` is the natural extension of object-orientation to homogeneous lists of
arbitrary objects. With `plist`, you can treat a list of objects of the same
type as if they are a single object of that type, in many (but not all)
circumstances.
```python
pl = plist['abc', 'def', 'ghi']
assert ((pl + ' -> ' + pl.upper()).aslist() ==
['abc -> ABC', 'def -> DEF', 'ghi -> GHI'])
```
**Indexing:**
Indexing `plist`s is meant to be both powerful and natural, while accounting
the fact that the elements of the `plist` may need to be indexed as well.
See `__getitem__`, `__setitem__`, and `__delitem__` for more details.
Indexing into the `plist` itself:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
# Basic scalar indexing:
assert (foos[0] ==
dict(foo=0, bar=0))
# plist slice indexing:
assert (foos[:2].aslist() ==
[dict(foo=0, bar=0), dict(foo=1, bar=1)])
# plist int list indexing:
assert (foos[[0, 2]].aslist() ==
[dict(foo=0, bar=0), dict(foo=2, bar=0)])
```
Indexing into the elements of the `plist`:
```python
# Basic scalar indexing:
assert (foos['foo'].aslist() ==
[0, 1, 2])
# tuple indexing
assert (foos[('foo', 'bar')].aslist() ==
[(0, 0), (1, 1), (2, 0)])
# list indexing
assert (foos[['foo', 'bar', 'bar']].aslist() ==
[0, 1, 0])
```
Indexing into the elementes of the `plist` when the elements are indexed by
`int`s, `slice`s, or other means that confict with `plist` indexing:
```python
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# Basic scalar indexing:
assert (pl._[0].aslist() ==
[1, 4, 7])
# slice indexing (note the use of the 3-argument version of slicing):
assert (pl._[:2:1].aslist() ==
[[1, 2], [4, 5], [7, 8]])
# list indexing:
pl = pl.np()
assert (pl._[[True, False, True]].apply(list).aslist() ==
[[1, 3], [4, 6], [7, 9]])
```
**`root` and `uproot`:**
`plist`s all have a root object. For newly created `plist`s, the root is `self`,
but as computations are performed on the `plist`, the root of the resulting
`plist`s almost always remain the original `plist`:
```python
pl = plist[1, 2, 3]
# plist operations don't modify the original (except where natural)!
assert ((pl + 5) is not pl)
assert ((pl + 5).root() is pl)
```
In some cases, you don't want to maintain the original root. To reset the root
to `self`, simply call `uproot`:
```python
pl2 = pl + 5
assert (pl2.root() is not pl2)
assert (pl2.uproot().root() is pl2)
assert (pl2.root() is pl2)
```
See `root` and `uproot` for more details.
**Filtering:**
`plist` overrides comparison operations to provide filtering. This is reasonable,
since an empty `plist` is a `False` value, just like an empty `list`, so a filter
that filters everything is equivalent to the comparison failing.
Filtering always returns the root of the `plist`, which allows you to filter a
`plist` on arbitrary values computed from the root, and then proceed with your
computation on the (filtered) original data.
See `comparator` and `filter` for more details.
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
# Filtering on a property:
zero_bars = foos.bar == 0
# The result is a plist of the original pdicts, correctly filtered:
assert (zero_bars.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}])
# filter can take any function to filter by, but it defaults to bool():
nonzero_bars = foos.bar.filter()
assert (nonzero_bars.aslist() ==
[{'foo': 1, 'bar': 1}])
```
**Grouping and Sorting:**
Just as with filtering, you can group and sort a `plist` on any arbitrary
value computed from the `plist`.
This shows a basic grouping by a property of the data. Note that `groupby`
returns the root, just like filtering:
```python
foos = plist([pdict(foo=0, bar=1), pdict(foo=1, bar=0), pdict(foo=2, bar=1)])
# Note that the `bar == 1` group comes before the `bar == 0` group. The ordering
# is determined by the sort order of the plist.
assert (foos.bar.groupby().aslist() ==
[[{'bar': 1, 'foo': 0}, {'bar': 1, 'foo': 2}], [{'bar': 0, 'foo': 1}]])
# Note that foos is unchanged:
assert (foos.aslist() ==
[{'bar': 1, 'foo': 0}, {'bar': 0, 'foo': 1}, {'bar': 1, 'foo': 2}])
```
In contrast, sorting a `plist` modifies the order of both the current `plist` and
its root, but returns the current `plist` instead of the root:
```python
assert (foos.bar.sortby().aslist() ==
[0, 1, 1])
assert (foos.aslist() ==
[{'bar': 0, 'foo': 1}, {'bar': 1, 'foo': 0}, {'bar': 1, 'foo': 2}])
```
This distinction between the behavios of `groupby` and `sortby` permits natural
chaining of the two when sorted groups are desired. It also ensures that
`plist`s computed from the same root will be ordered in the same way.
```python
foos = plist([pdict(foo=0, bar=1), pdict(foo=1, bar=0), pdict(foo=2, bar=1)])
assert (foos.bar.sortby().groupby().aslist() ==
[[{'bar': 0, 'foo': 1}], [{'bar': 1, 'foo': 0}, {'bar': 1, 'foo': 2}]])
```
See `groupby` and `sortby` for more details.
**Function Application and Multiple Arguments:**
The most prominent case where you can't treat a `plist` as a single object is
when you need to pass a single object to some function that isn't a property of
the elements of the `plist`. In this case, just use `apply`:
```python
pl = plist['abc', 'def', 'ghi']
assert (pl.apply('foo: {}'.format).aslist() ==
['foo: abc', 'foo: def', 'foo: ghi'])
```
Where `apply` shines (and all calls to `plist` element functions) is when dealing
with multi-argument functions. In this case, you will often find that you want to
call the function with parallel values from parallel `plist`s. That is easy and
natural to do, just like calling the function with corresponding non-`plist`
values:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos.baz = 'abc' * foos.foo
# Do a multi-argument string format with plist.apply:
assert (foos.foo.apply('foo: {} bar: {} baz: {baz}'.format, foos.bar, baz=foos.baz).aslist() ==
['foo: 0 bar: 0 baz: ', 'foo: 1 bar: 1 baz: abc', 'foo: 2 bar: 0 baz: abcabc'])
# Do the same string format directly using the plist as the format string:
assert (('foo: ' + foos.foo.pstr() + ' bar: {} baz: {baz}').format(foos.bar, baz=foos.baz).aslist() ==
['foo: 0 bar: 0 baz: ', 'foo: 1 bar: 1 baz: abc', 'foo: 2 bar: 0 baz: abcabc'])
```
See `__call__`, `apply`, and `reduce` for more details.
"""
__slots__ = ['__root__', '__pepth__']
def __init__(self, *args, **kwargs):
"""Constructs plist.
Examples:
```python
# Empty plists:
pl = plist()
pl = plist([])
# Convenience constructor for list literals:
pl = plist[1, 2, 3]
pl = plist[1,] # Note the trailing comma, which is required for 1-element lists.
# Initialization from other lists or plists:
pl = plist(['a', 'b', 'c'])
pl = plist(pl)
# Initialization from iterables:
pl = plist(range(5))
pl = plist([i for i in range(5)])
pl = plist((i for i in range(5)))
# Passing root (advanced usage -- not generally necessary):
pl = plist([1, 2, 3], root=plist(['a', 'b', 'c']))
```
Args:
*args: Passed directly to `list` constructor.
**kwargs: Keyword arguments passed directly to `list` constructor after
exctracting `root` if present. `root` must be a `plist`, and
will be used as the root of `self`.
Returns:
`None`. `plist` is initialized.
"""
self.__pepth__ = 0
self.__root__ = kwargs.pop('root', self)
list.__init__(self, *args, **kwargs)
##############################################################################
##############################################################################
##############################################################################
# Private methods.
##############################################################################
##############################################################################
##############################################################################
def __getattribute__(self, name):
"""Returns a plist of the attribute for self, or for each element.
If `name` exists as an attribute of plist, that attribute is returned.
Otherwise, removes trailing underscores from `name` (apart from those
normally part of a `__*__` name), and uses the count of underscores to
indicate how deep into the plist `name` should be searched for. Attempts
to find the modified `name` on plist first, and then looks for `name` on
each element of self.
When attempting to find `name` on the elements of self, first it checks
if the elements all have `name` as an attribute. If so, it returns that
attribute (`[getattr(x, name) for x in self]`). Otherwise, it attempts to
return `name` as an index of each element (`[x[name] for x in self]`).
Examples:
A `plist` of `list`s has `append` methods at two levels -- the `plist`
and the contained `list`s. To chose `list.append` them, you can add
an '_' to the method name:
```python
pl = plist[[1, 2, 3], [4, 5, 6]]
pl.append([7, 8, 9])
assert (pl.aslist() ==
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
pl.append_(10)
assert (pl.aslist() ==
[[1, 2, 3, 10], [4, 5, 6, 10], [7, 8, 9, 10]])
```
Grouped `plist`s also have methods that you might want to call at different
depths. Adding an '_' for each layer of the `plist` you want to skip
allows you to control which depth the method is executed at:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.foo.apply(str).aslist() ==
['[0, 2]', '[1]'])
assert (by_bar.foo.apply_(str).aslist() ==
[['0', '2'], ['1']])
# (Note that it is better to use `plist.pstr` to get string representation of
# leaf elements:)
assert (by_bar.foo.pstr().aslist() ==
[['0', '2'], ['1']])
```
Args:
name: Name of the attribute.
Returns:
Bound `plist` attribute, or `plist` of bound attributes of the elements
of `self`.
Raises:
AttributeError: If `name` is is a reserved member of the elements of `self`.
AttributeError: If `name` is not found on `self` or the elements of `self`.
"""
if name == '__root__' or name == '__pepth__':
return list.__getattribute__(self, name)
if not name.endswith('___') and name.startswith('__') and name.endswith('__'):
raise AttributeError('plist objects cannot call reserved members of their elements: \'%s\'' % name)
try:
return plist.__getattr__(self, name)
except AttributeError:
pass
if ((name.startswith('__') and name.endswith('___'))
or (not name.startswith('__') and name.endswith('_'))):
# Allows calling one level deeper by adding '_' to the end of a property name. This is recursive, so '__' on the end goes two levels deep, etc.
# Works for both regular properties (foos.bar_) and private properties (foos.__len___).
try:
starting_unders = 2 if name.startswith('__') else 0 # We don't care about single starting underscores for this count
ending_unders = 0
for i in range(len(name) - 1, 0, -1):
if name[i] == '_':
ending_unders += 1
else:
break
ending_unders -= starting_unders
return plist.__getattr__(self, name[:-ending_unders], _pepth=ending_unders)
except AttributeError:
pass
try:
if plist.all(self, hasattr, name):
return plist([getattr(x, name) for x in self], root=self.__root__)
return plist([x[name] for x in self], root=self.__root__)
except Exception as e:
raise AttributeError('plist children raised exceptions attempting to get attribute \'%s\' (%s)' % (name, str(e)))
##############################################################################
# __get*__
##############################################################################
def __getattr__(self, name, _pepth=0):
"""Recursively attempt to get the attribute `name`.
Handles getting attributes from `self`, rather than from elements of `self`,
which is handled in `__getattribute__`. The only exception is for
requests to method names that are present on both `plist` and its leaf
elements, for example if the leaves are all `list`s, and a sufficiently high
`_pepth` value, or `_pepth < 0`, in which case the final calls will be
executed on the leaf elements.
The attribute gets wrapped in a callable that handles any requested recursion,
as specified by having called `self._` immediately previously, or due to
trailing '_' in the name that were detected by `__getattribute__`.
Args:
name: Attribute name.
_pepth: plist depth at which the found attribute should be applied.
If _pepth < 0, the attribute is applied as deep as possible, which
may be on the deepest non-plist children. This permits calling,
for example, list methods on lists nested inside of plists.
If _pepth > 0, the attribute is applied after that many recursive
calls, and any exception generated is propogated back.
Returns:
Either the value of the attribute, for known non-callable attributes like
`__class__`, or a callable wrapping the final attributes.
"""
attr = list.__getattribute__(self, name)
pepth_local = list.__getattribute__(self, '__pepth__')
if pepth_local:
_pepth = pepth_local
self.__pepth__ = 0
if _pepth:
wrap = lambda *a, **k: _call_attr(self, name, attr, pepth=_pepth, *a, **k)
else:
wrap = lambda *a, **k: _call_attr(self, name, attr, *a, **k)
if name in NONCALLABLE_ATTRS or name in ['_', '__']:
return wrap()
return wrap
def __getitem__(self, key):
"""Returns a new `plist` using a variety of indexing styles.
Examples:
Indexing into the `plist` itself:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
# Basic scalar indexing:
assert (foos[0] ==
dict(foo=0, bar=0))
# plist slice indexing:
assert (foos[:2].aslist() ==
[dict(foo=0, bar=0), dict(foo=1, bar=1)])
# plist int list indexing:
assert (foos[[0, 2]].aslist() ==
[dict(foo=0, bar=0), dict(foo=2, bar=0)])
```
Indexing into the elements of the `plist`:
```python
# Basic scalar indexing:
assert (foos['foo'].aslist() ==
[0, 1, 2])
# tuple indexing
assert (foos[('foo', 'bar')].aslist() ==
[(0, 0), (1, 1), (2, 0)])
# list indexing
assert (foos[['foo', 'bar', 'bar']].aslist() ==
[0, 1, 0])
```
Indexing into the elementes of the `plist` when the elements are indexed by
`int`s, `slice`s, or other means that confict with `plist` indexing:
```python
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
# Basic scalar indexing:
assert (pl._[0].aslist() ==
[1, 4, 7])
# slice indexing (note the use of the 3-argument version of slicing):
assert (pl._[:2:1].aslist() ==
[[1, 2], [4, 5], [7, 8]])
# list indexing:
pl = pl.np()
assert (pl._[[True, False, True]].apply(list).aslist() ==
[[1, 3], [4, 6], [7, 9]])
```
Args:
key: The key to index by.
`key` can be applied to `self` directly as:
A `list` of `int`s: Returns a `plist` using those `int`s as indices.
A `slice`: Returns a `plist` based on the `slice`.
An `int`: Returns the value at that index (may not be a `plist`).
`key` can be applied to elements of `self` individually:
A generic `list`:
Returns a `plist` using the elements of `key` in order on the
elements of `self`.
A `tuple` when the elements of `self` can be indexed by `tuple`:
Returns a `plist` applying that `tuple` to each element of `self`.
A `tuple`, otherwise:
Returns a `plist` where each element of the new `plist` is a `tuple`
of each value in the `key` `tuple` applied to each element of
`self`. E.g., `foo[('bar', 'baz')]` might return
`plist([(1, 2), (3, 4), ...])`.
Anything else:
Returns a `plist` of the `key` applied to each of its elements.
Returns:
A `plist` based on the order of attempting to apply `key` described above.
Raises:
TypeError: If `key` fails to be applied directly to `self` and fails to be
applied to its elements individually.
"""
if self.__pepth__ != 0:
return plist.__getattr__(self, '__getitem__')(key)
try:
if (isinstance(key, list)
and plist(key).all(isinstance, int)):
return plist([self[k] for k in key]) # Don't pass root -- we are uprooting
elif isinstance(key, slice):
if self is self.__root__:
return plist(list.__getitem__(self, key))
return plist(list.__getitem__(self, key), root=plist(list.__getitem__(self.__root__, key)))
else:
return list.__getitem__(self, key)
except TypeError as first_exception:
try:
if isinstance(key, list):
return plist([self[i][k] for i, k in enumerate(key)]) # Don't pass root -- we are uprooting
if isinstance(key, tuple):
try:
return plist([x[key] for x in self], root=self.__root__)
except Exception:
return plist([tuple(x[k] for k in key) for x in self], root=self.__root__)
return plist([x[key] for x in self], root=self.__root__)
except Exception as second_exception:
raise TypeError('Failed to apply index to self or elements.\nself exception: %s\nelements exception: %s' % (str(first_exception), str(second_exception)))
def __getslice__(self, i, j):
"""Delegates to `__getitem__` whenever possible. For compatibility with python 2.7.
Avoid using `__getslice__` whenever possible in python 2.7, as the bytecode compiler
assumes that the slice is for the given object on the stack, and modifies negative
indices relative to that object's length. In `plist`s and other dynamic apis like
`numpy`, that assumption can cause undetectable and unrecoverable errors.
To avoid the errors caused by this api in python 2.7, simply use three argument
slices instead of two; e.g., `plist[::1]`.
Examples:
The following examples are safe uses of slicing with `plist`s:
```python
pl = plist['abc', 'def', 'ghi']
assert (pl[:2:1].aslist() ==
['abc', 'def'])
assert (pl._[:2:1].aslist() ==
['ab', 'de', 'gh'])
```
The following example will log a warning -- even though it appears to work, the
underlying bytecode is incorrect:
```python
assert (pl._[:2].aslist() ==
['ab', 'de', 'gh'])
# Logs:
# qj: <pstar> __getslice__: WARNING! <1711>: (multiline log follows)
# Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!
# Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.
# This avoids the broken code path in the python compiler.
```
Args:
i, j: Beginning and ending indices of `slice`.
Returns:
`plist` slice of `self`.
"""
if self.__pepth__ != 0:
if '__warned__' not in plist.__getslice__.__dict__:
qj('Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!\n'
'Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.\n'
'This avoids the broken code path in the python compiler.', 'WARNING!')
plist.__getslice__.__dict__['__warned__'] = True
return plist.__getattr__(self, '__getslice__')(i, j)
try:
if self is self.__root__:
return plist(list.__getslice__(self, i, j))
return plist(list.__getslice__(self, i, j), root=plist(list.__getslice__(self.__root__, i, j)))
except Exception:
return plist.__getitem__(self, slice(i, j))
##############################################################################
# __set*__
##############################################################################
def __setattr__(self, name, val):
"""Sets an attribute on a `plist` or its elements to `val`.
This delegates almost entirely to the elements of `self`, allowing natural
assignments of attributes.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
# Assignment to an existing attribute:
foos.foo += 1
assert (foos.foo.aslist() ==
[1, 2, 3])
# Scalar assignment to a new attribute:
foos.baz = -1
assert (foos.baz.aslist() ==
[-1, -1, -1])
# plist assignment to an attribute:
foos.baz *= foos.foo + foos.bar
assert (foos.baz.aslist() ==
[-1, -3, -3])
```
All of the same things work naturally on a grouped `plist` as well:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
# Assignment to an existing attribute:
by_bar.foo += 1
assert (by_bar.foo.aslist() ==
[[1, 3], [2]])
# Scalar assignment to a new attribute:
by_bar.baz = -1
assert (by_bar.baz.aslist() ==
[[-1, -1], [-1]])
# plist assignment to an attribute:
by_bar.baz *= by_bar.foo + by_bar.bar
assert (by_bar.baz.aslist() ==
[[-1, -3], [-3]])
```
Args:
name: Name of the attribute to set.
val: Value to set the attribute to. If `val` is a `plist` and its length
matches `len(self)`, the elements of `val` are set on the elements of
`self`. Otherwise, the elements of `self` are all set to `val`.
Returns:
`self`, in order to allow chaining through `plist.__setattr__(name, val)`.
"""
if name == '__root__' or name == '__pepth__':
list.__setattr__(self, name, val)
elif self.__pepth__ != 0:
return plist.__getattr__(self, '__setattr__')(name, val)
else:
lval = _ensure_len(len(self), val)
for i, x in enumerate(self):
x.__setattr__(name, lval[i])
return self
def __setitem__(self, key, val):
"""Sets items of `self` using a variety of indexing styles.
Examples:
Indexing into the `plist` itself:
```python
# Basic scalar indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos[0] = 13
assert (foos.aslist() ==
[13, dict(foo=1, bar=1), dict(foo=2, bar=0)])
# plist slice indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos[:2] = plist[12, 13]
assert (foos.aslist() ==
[12, 13, dict(foo=2, bar=0)])
# plist int list indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos[[0, 2]] = plist[12, 13]
assert (foos.aslist() ==
[12, dict(foo=1, bar=1), 13])
```
Indexing into the elements of the `plist`:
```python
# Basic scalar indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos['foo'] = plist[4, 5, 6]
assert (foos.aslist() ==
[dict(foo=4, bar=0), dict(foo=5, bar=1), dict(foo=6, bar=0)])
# list indexing
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos[['foo', 'bar', 'bar']] = plist[4, 5, 6]
assert (foos.aslist() ==
[dict(foo=4, bar=0), dict(foo=1, bar=5), dict(foo=2, bar=6)])
```
Indexing into the elementes of the `plist` when the elements are indexed by
`int`s, `slice`s, or other means that confict with `plist` indexing:
```python
# Basic scalar indexing:
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
pl._[0] = 13
assert (pl.aslist() ==
[[13, 2, 3], [13, 5, 6], [13, 8, 9]])
# slice indexing (note the use of the 3-argument version of slicing):
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
pl._[:2:1] = pl._[1:3:1]
assert (pl.aslist() ==
[[2, 3, 3], [5, 6, 6], [8, 9, 9]])
# list indexing:
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]].np()
pl._[[True, False, True]] = plist[[5, 6], [7, 8], [9, 0]]
assert (pl.apply(list).aslist() ==
[[5, 2, 6], [7, 5, 8], [9, 8, 0]])
```
Args:
key: The key to index by.
`key` can be applied to `self` directly as:
A `list` of `int`s: Sets items using those `int`s as indices.
A `slice`: Sets items based on the `slice`.
An `int`: Sets the item at that index.
`key` can be applied to elements of `self` individually:
A generic `list`:
Sets the items of `self` using the elements of `key` in order.
A `tuple` when the elements of `self` can be indexed by `tuple`:
Sets the elements of `self` using that `tuple` to index into each
element.
A `tuple`, otherwise:
Sets the elements of `self` using each element of the `key`
`tuple` on each element. E.g., `foo[('bar', 'baz')] = 1`
will set the `bar` and `baz` keys of `foo` to `1`.
Anything else:
Sets the elements of `self` indexed by `key` to `val`.
val: Value to assign. If `val` is a `plist` and its length matches either
`len(self)` (in most cases described above for `key`) or `len(key)`,
each element of `val` is applied to each corresponding element of
`self` or `self[k]`.
Returns:
`self`, in order to allow chaining through `plist.__setitem__(key, val)`.
Raises:
TypeError: If `key` fails to be applied directly to `self` and fails to be
applied to its elements individually.
"""
if self.__pepth__ != 0:
return plist.__getattr__(self, '__setitem__')(key, val)
try:
if (isinstance(key, list)
and plist(key).all(isinstance, int)):
lval = _ensure_len(len(key), val)
for i, k in enumerate(key):
operator.__setitem__(self, k, lval[i])
elif isinstance(key, slice):
lval = val
if not isinstance(val, collections.Iterable):
slice_len = len([i for i in range(*key.indices(len(self)))])
lval = _ensure_len(slice_len, val)
list.__setitem__(self, key, lval)
else:
list.__setitem__(self, key, val)
except Exception as first_exception:
try:
if isinstance(key, list):
lval = _ensure_len(len(key), val)
for i, k in enumerate(key):
operator.__setitem__(self[i], k, lval[i])
elif isinstance(key, tuple):
lval = _ensure_len(len(self), val)
try:
for i, x in enumerate(self):
operator.__setitem__(x, key, lval[i])
except Exception:
for i, x in enumerate(self):
for j, k in enumerate(key):
operator.__setitem__(x, k, lval[i][j])
else:
lval = _ensure_len(len(self), val)
for i, x in enumerate(self):
operator.__setitem__(x, key, lval[i])
except Exception as second_exception:
raise TypeError('Failed to apply index to self or elements.\nself exception: %s\nelements exception: %s' % (str(first_exception), str(second_exception)))
# Allow chaining of set ops when using apply('__setitem__', k, v) and apply(operators.__setitem__, k, v)
return self
def __setslice__(self, i, j, sequence):
"""Delegates to `__setitem__` whenever possible. For compatibility with python 2.7.
Avoid using `__setslice__` whenever possible in python 2.7, as the bytecode compiler
assumes that the slice is for the given object on the stack, and modifies negative
indices relative to that object's length. In `plist`s and other dynamic apis like
`numpy`, that assumption can cause undetectable and unrecoverable errors.
To avoid the errors caused by this api in python 2.7, simply use three argument
slices instead of two; e.g., `plist[::1]`.
Examples:
The following examples are safe uses of slicing with `plist`s:
```python
pl = plist['abc', 'def', 'ghi']
pl[:2:1] = plist['dec', 'abf']
assert (pl.aslist() ==
['dec', 'abf', 'ghi'])
# Turn strings into mutable lists:
pl = pl.apply(list)
# Change slices of the lists:
pl._[:2:1] = pl._[1:3:1]
# Turn the lists back into strings
pl = pl.apply(''.join)
assert (pl.aslist() ==
['ecc', 'bff', 'hii'])
```
The following example will log a warning -- even though it appears to work, the
underlying bytecode is incorrect:
```python
pl = pl.apply(list)
pl._[:2] = plist['ab', 'de', 'gh']
pl = pl.apply(''.join)
assert (pl.aslist() ==
['abc', 'def', 'ghi'])
# Logs:
# qj: <pstar> __setslice__: WARNING! <1711>: (multiline log follows)
# Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!
# Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.
# This avoids the broken code path in the python compiler.
```
Args:
i, j: Beginning and ending indices of `slice`.
sequence: `iterable` object to assign to the slice.
Returns:
`self`, to permit chaining through direct calls to `plist.__setslice__`.
"""
if self.__pepth__ != 0:
if '__warned__' not in plist.__setslice__.__dict__:
qj('Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!\n'
'Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.\n'
'This avoids the broken code path in the python compiler.', 'WARNING!')
plist.__setslice__.__dict__['__warned__'] = True
return plist.__getattr__(self, '__setslice__')(i, j, sequence)
try:
list.__setslice__(self, i, j, sequence)
except Exception:
plist.__setitem__(self, slice(i, j), sequence)
return self
##############################################################################
# __del*__
##############################################################################
def __delattr__(self, name):
"""Deletes an attribute on elements of `self`.
This delegates entirely to the elements of `self`, allowing natural
deletion of attributes.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos.foo
assert (foos.aslist() ==
[{'bar': 0}, {'bar': 1}, {'bar': 0}])
# Deletion works on grouped plists as well:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
# Assignment to an existing attribute:
del by_bar.foo
assert (by_bar.aslist() ==
[[{'bar': 0}, {'bar': 0}], [{'bar': 1}]])
```
Args:
name: Name of the attribute to delete.
Returns:
`self`, in order to allow chaining through `plist.__delattr__(name)`.
"""
if self.__pepth__ != 0:
return plist.__getattr__(self, '__delattr__')(name)
for x in self:
x.__delattr__(name)
return self
def __delitem__(self, key):
"""Deletes items of `self` using a variety of indexing styles.
Examples:
Indexing into the `plist` itself:
```python
# Basic scalar indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos[0]
assert (foos.aslist() ==
[dict(foo=1, bar=1), dict(foo=2, bar=0)])
# plist slice indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos[:2]
assert (foos.aslist() ==
[dict(foo=2, bar=0)])
# plist int list indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos[[0, 2]]
assert (foos.aslist() ==
[dict(foo=1, bar=1)])
```
Indexing into the elements of the `plist`:
```python
# Basic scalar indexing:
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos['foo']
assert (foos.aslist() ==
[dict(bar=0), dict(bar=1), dict(bar=0)])
# tuple indexing
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos[('foo', 'bar')]
assert (foos.aslist() ==
[dict(), dict(), dict()])
# list indexing
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
del foos[['foo', 'bar', 'bar']]
assert (foos.aslist() ==
[dict(bar=0), dict(foo=1), dict(foo=2)])
```
Indexing into the elementes of the `plist` when the elements are indexed by
`int`s, `slice`s, or other means that confict with `plist` indexing:
```python
# Basic scalar indexing:
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
del pl._[0]
assert (pl.aslist() ==
[[2, 3], [5, 6], [8, 9]])
# slice indexing (note the use of the 3-argument version of slicing):
pl = plist[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
del pl._[:2:1]
assert (pl.aslist() ==
[[3], [6], [9]])
```
Args:
key: The key to index by.
`key` can be applied to `self` directly as:
A `list` of `int`s: Deletes from `self` using those `int`s as indices.
A `slice`: Deletes from `self` based on the `slice`.
An `int`: Deletes the value at that index.
`key` can be applied to elements of `self` individually:
A generic `list`:
Deletes from the elements of `self` using the elements of `key`
in order on the elements of `self`.
A `tuple` when the elements of `self` can be indexed by `tuple`:
Deletes from the elements of `self` by applying that `tuple` to each
element of `self`.
A `tuple`, otherwise:
Deletes from the elements of `self` where each element gets each
element in the `key` `tuple` deleted. E.g., `del foo[('bar', 'baz')]`
deletes all `'bar'` and `'baz'` keys from each element of `foo`.
Anything else:
Deletes `key` from each of its elements.
Returns:
`self`, in order to allow chaining through `plist.__delitem__(key)`.
Raises:
TypeError: If `key` fails to be applied directly to `self` and fails to be
applied to its elements individually.
"""
if self.__pepth__ != 0:
return plist.__getattr__(self, '__delitem__')(key)
try:
if (isinstance(key, list)
and plist(key).all(isinstance, int)):
for k in sorted(key, reverse=True):
operator.__delitem__(self, k)
else:
# Handles slices and ints. Other key types will fail.
list.__delitem__(self, key)
except Exception as first_exception:
try:
if isinstance(key, list):
for i, k in enumerate(key):
operator.__delitem__(self[i], k)
elif isinstance(key, tuple):
try:
for x in self:
operator.__delitem__(x, key)
except Exception:
for x in self:
for k in key:
operator.__delitem__(x, k)
else:
for x in self:
operator.__delitem__(x, key)
except Exception as second_exception:
raise TypeError('Failed to apply index to self or elements.\nself exception: %s\nelements exception: %s' % (str(first_exception), str(second_exception)))
# Allow chaining of set ops when using apply('__delitem__', k) and apply(operators.__delitem__, k)
return self
def __delslice__(self, i, j):
"""Delegates to `__delitem__` whenever possible. For compatibility with python 2.7.
Avoid using `__delslice__` whenever possible in python 2.7, as the bytecode compiler
assumes that the slice is for the given object on the stack, and modifies negative
indices relative to that object's length. In `plist`s and other dynamic apis like
`numpy`, that assumption can cause undetectable and unrecoverable errors.
To avoid the errors caused by this api in python 2.7, simply use three argument
slices instead of two; e.g., `plist[::1]`.
Examples:
The following examples are safe uses of slicing with `plist`s:
```python
pl = plist['abc', 'def', 'ghi']
del pl[:2:1]
assert (pl.aslist() ==
['ghi'])
# Change slices of the lists:
pl = plist['abc', 'def', 'ghi']
# Turn strings into mutable lists:
pl = pl.apply(list)
del pl._[:2:1]
# Turn lists back into strings:
pl = pl.apply(''.join)
assert (pl.aslist() ==
['c', 'f', 'i'])
```
The following example will log a warning -- even though it appears to work, the
underlying bytecode is incorrect:
```python
pl = plist['abc', 'def', 'ghi']
# Turn strings into mutable lists:
pl = pl.apply(list)
del pl._[:2]
# Turn lists back into strings:
pl = pl.apply(''.join)
assert (pl.aslist() ==
['c', 'f', 'i'])
# Logs:
# qj: <pstar> __delslice__: WARNING! <1711>: (multiline log follows)
# Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!
# Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.
# This avoids the broken code path in the python compiler.
```
Args:
i, j: Beginning and ending indices of `slice`.
sequence: `iterable` object to assign to the slice.
Returns:
`self`, to permit chaining through direct calls to `plist.__setslice__`.
"""
if self.__pepth__ != 0:
if '__warned__' not in plist.__delslice__.__dict__:
qj('Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!\n'
'Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.\n'
'This avoids the broken code path in the python compiler.', 'WARNING!')
plist.__delslice__.__dict__['__warned__'] = True
return plist.__getattr__(self, '__delslice__')(i, j)
try:
list.__delslice__(self, i, j)
except Exception:
plist.__delitem__(self, slice(i, j))
return self
##############################################################################
# __call__
##############################################################################
def __call__(self, *args, **kwargs):
"""Call each element of self, possibly recusively.
Any arguments passed to `__call__` that are `plist`s and have the same
length as `self` will be passed one-at-a-time to the each of the `callable`s
in `self`. Otherwise, arguments are passed in unmodified.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
# A plist of callables, one for each pdict:
foos_peys = foos.peys
assert (foos_peys.all(callable))
# The actual call to plist.__call__ (separated out for demonstration):
assert (foos_peys().aslist() ==
[['bar', 'foo'], ['bar', 'foo'], ['bar', 'foo']])
# Of course, you would normally do the above like this, which is the same:
assert (foos.peys().aslist() ==
[['bar', 'foo'], ['bar', 'foo'], ['bar', 'foo']])
by_bar = foos.bar.groupby()
# There's rarely any need to pass pepth, as the call gets routed to the
# correct object by default in almost all situations, even with grouped
# plists:
assert (by_bar.peys().aslist() ==
[[['bar', 'foo'], ['bar', 'foo']], [['bar', 'foo']]])
```
All argument calling conventions are possible:
```python
pl = plist['foo {}', 'bar {}', 'baz {}']
# Basic positional argument passing:
assert (pl.format(0).aslist() ==
['foo 0', 'bar 0', 'baz 0'])
# Passing a plist in a positional argument:
assert (pl.format(pl._[:3:1]).aslist() ==
['foo foo', 'bar bar', 'baz baz'])
# Basic keyword argument passing:
pl = pl.replace('{}', '{foo}')
assert (pl.format(foo=0).aslist() ==
['foo 0', 'bar 0', 'baz 0'])
# Passing a plist as a keyword argument:
assert (pl.format(foo=pl._[:3:1]).aslist() ==
['foo foo', 'bar bar', 'baz baz'])
```
They work the same way on grouped plists:
```python
pl = plist['foo {}', 'bar {}', 'baz {}']
by = pl._[0].groupby() # Group by first character.
assert (by.aslist() ==
[['foo {}'], ['bar {}', 'baz {}']])
# Basic positional argument passing:
assert (by.format(0).aslist() ==
[['foo 0'], ['bar 0', 'baz 0']])
# Passing a plist in a positional argument:
assert (by.format(by._[:3:1]).aslist() ==
[['foo foo'], ['bar bar', 'baz baz']])
# Basic keyword argument passing:
by = by.replace('{}', '{foo}')
assert (by.format(foo=0).aslist() ==
[['foo 0'], ['bar 0', 'baz 0']])
# Passing a plist as a keyword argument:
assert (by.format(foo=by._[:3:1]).aslist() ==
[['foo foo'], ['bar bar', 'baz baz']])
```
Args:
*args: Arguments to pass to elements of `self`.
**kwargs: Keyword arguments to pass to elements of `self`, after extracting:
pepth: Integer (default `0`). If greater than `0`, calls occur at that
depth in the `plist`. Equivalent to appending '_'s at the end of the
name of the attribute (see `plist.__getattribute__`). If less than
`0`, calls occur as deep in the `plist` as possible. Equivalent to
calling `plist._` before calling the attribute.
psplit: Integer (default `0`). If greater than `0`, calls to elements of
`self` are applied in parallel. If `psplit` is `1`, the number of
parallel executions is equal to the length of `self`.
Otherwise, `psplit` is the number of parallel executions.
call_pepth: *Private -- do not pass.* Internal state variable for tracking
how deep the call stack is in `plist` code, for use with
internal methods that need access to the original caller's
stack frame.
Returns:
New `plist` resulting from calling element of `self`.
"""
pepth = kwargs.pop('pepth', self.__pepth__)
self.__pepth__ = 0
call_pepth = kwargs.pop('call_pepth', 0)
psplit = kwargs.pop('psplit', 0)
args = [_ensure_len(len(self), a, strict=True) for a in args]
kwargs = {
k: _ensure_len(len(self), v, strict=True) for k, v in kwargs.items()
}
if pepth != 0:
try:
return plist([x(pepth=pepth - 1,
call_pepth=call_pepth + PLIST_CALL_ATTR_CALL_PEPTH_DELTA,
*[a[i] for a in args],
**{k: v[i] for k, v in kwargs.items()})
for i, x in enumerate(self)],
root=self.__root__)
except Exception as e:
if pepth > 0:
raise e
if psplit > 0:
pool = _get_thread_pool(psplit, len(self))
call_args = [pdict(x=x, i=i) for i, x in enumerate(self)]
map_func = lambda ca: ca.x(*[a[ca.i] for a in args],
**{k: v[ca.i] for k, v in kwargs.items()})
pl = plist(pool.map(map_func, call_args, chunksize=_get_thread_chunksize(psplit, len(self))), root=self.__root__)
pool.close()
return pl
return plist([x(*[a[i] for a in args],
**{k: v[i] for k, v in kwargs.items()})
for i, x in enumerate(self)],
root=self.__root__)
##############################################################################
# __contains__
##############################################################################
def __contains__(self, other):
"""Implements the `in` operator to avoid inappropriate use of `plist` comparators.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (2 in foos.foo)
assert (dict(foo=0, bar=0) in foos)
by_bar = foos.bar.groupby()
assert (2 in by_bar.foo)
assert (dict(foo=0, bar=0) in by_bar)
```
Returns:
`bool` value indicating whether `other` was found in `self`.
"""
if self is other:
return False
found = False
try:
found = self.any(plist.__contains__, other)
except Exception as e:
pass
return (found
or any([x is other
or (not isinstance(x, plist)
and not isinstance(other, plist)
and x == other) for x in self]))
##############################################################################
# Comparison operators -- ALL PERFORM FILTERING!
##############################################################################
__cmp__ = _build_comparator(
operator.__eq__,
operator.__or__,
lambda self, return_inds: (
self.lfill(pepth=-1)
if return_inds else self),
False)
__eq__ = __cmp__
__ne__ = _build_comparator(
operator.__ne__,
operator.__and__,
lambda self, return_inds: ([] if return_inds else plist()),
True)
__gt__ = _build_comparator(
operator.__gt__,
operator.__and__,
lambda self, return_inds: ([] if return_inds else plist()),
True)
__ge__ = _build_comparator(
operator.__ge__,
operator.__and__,
lambda self, return_inds: (
self.lfill(pepth=-1)
if return_inds else self),
True)
__lt__ = _build_comparator(
operator.__lt__,
operator.__and__,
lambda self, return_inds: ([] if return_inds else plist()),
True)
__le__ = _build_comparator(
operator.__le__,
operator.__and__,
lambda self, return_inds: (
self.lfill(pepth=-1)
if return_inds else self),
True)
##############################################################################
# Logical operators -- ALL PERFORM SET OPERATIONS!
##############################################################################
__and__ = _build_logical_op(operator.__and__)
__rand__ = _build_binary_rop(operator.__and__)
__iand__ = _build_binary_op(operator.__iand__)
__or__ = _build_logical_op(operator.__or__)
__ror__ = _build_binary_rop(operator.__or__)
__ior__ = _build_binary_op(operator.__ior__)
__xor__ = _build_logical_op(operator.__xor__)
__rxor__ = _build_binary_rop(operator.__xor__)
__ixor__ = _build_binary_op(operator.__ixor__)
##############################################################################
# Binary operators
##############################################################################
__add__, __radd__, __iadd__ = _build_binary_ops(operator.__add__, operator.__iadd__)
__sub__, __rsub__, __isub__ = _build_binary_ops(operator.__sub__, operator.__isub__)
__mul__, __rmul__, __imul__ = _build_binary_ops(operator.__mul__, operator.__imul__)
__truediv__, __rtruediv__, __itruediv__ = _build_binary_ops(operator.__truediv__, operator.__itruediv__)
if sys.version_info[0] < 3:
__div__, __rdiv__, __idiv__ = _build_binary_ops(operator.__div__, operator.__idiv__)
__pow__, __rpow__, __ipow__ = _build_binary_ops(operator.__pow__, operator.__ipow__)
__mod__, __rmod__, __imod__ = _build_binary_ops(operator.__mod__, operator.__imod__)
__divmod__ = _build_binary_op(divmod)
__rdivmod__ = _build_binary_rop(divmod)
__floordiv__, __rfloordiv__, __ifloordiv__ = _build_binary_ops(operator.__floordiv__, operator.__ifloordiv__)
__lshift__, __rlshift__, __ilshift__ = _build_binary_ops(operator.__lshift__, operator.__ilshift__)
__rshift__, __rrshift__, __irshift__ = _build_binary_ops(operator.__rshift__, operator.__irshift__)
##############################################################################
# Unary operators
##############################################################################
__neg__ = _build_unary_op(operator.__neg__)
__pos__ = _build_unary_op(operator.__pos__)
__abs__ = _build_unary_op(abs)
__invert__ = _build_unary_op(operator.__invert__)
__complex__ = _build_unary_op(complex)
__int__ = _build_unary_op(int)
if sys.version_info[0] < 3:
__long__ = _build_unary_op(long)
__float__ = _build_unary_op(float)
__oct__ = _build_unary_op(oct)
__hex__ = _build_unary_op(hex)
# This makes python2.7 tests very unhappy, but doesn't break any tests on python3.6.
# def __nonzero__(self):
# return bool(list(self.nonempty(-1)))
##############################################################################
# Ensure plists can't be hashed.
##############################################################################
__hash__ = None
# Nope. Crashes when trying to index by plists of lists of ints.
# def __index__(self):
# return plist([x.__index__() for x in self], root=self.__root__)
##############################################################################
# Allow plist use as context managers.
##############################################################################
def __enter__(self):
"""Allow the use of plists in `with` statements.
Examples:
```python
import glob, os
path = os.path.dirname(__file__)
filenames = plist(glob.glob(os.path.join(path, '*.py')))
with filenames.apply(open, 'r') as f:
texts = f.read()
assert (len(texts) >= 1)
assert (len(texts.all(isinstance, str)) >= 1)
```
Returns:
`plist` of results of calling `__enter__` on each element of `self`.
"""
return plist([x.__enter__() for x in self], root=self.__root__)
def __exit__(self, exc_type, exc_value, traceback):
"""Allow the use of plists in `with` statements.
See `plist.__enter__`.
Returns:
`plist` of results of calling `__exit__` on each element of `self`.
"""
return plist([x.__exit__(exc_type, exc_value, traceback) for x in self], root=self.__root__).all(bool)
##############################################################################
# Sensible tab completion.
##############################################################################
def __dir__(self):
"""Allow natural tab-completion on `self` and its contents.
Examples:
```python
pl = plist['a', 'b', 'c']
assert ('capitalize' in dir(pl))
assert ('groupby' in dir(pl))
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert ('foo' in dir(foos))
assert ('groupby' in dir(foos))
assert ('foo' in dir(foos.bar.groupby()))
```
Returns:
Combined `plist` of methods and properties available on `self` and its contents.
"""
# list.__dir__ doesn't exist on 2.7, so we can't use it to get our own dir() results.
# Instead, we'll do what the python c runtime does, and just collect the keys from
# the plist class's __dict__ and up its superclass chain (which is just list, ignoring
# object).
return plist(
[plist(list(list.__dict__.keys()) + list(plist.__dict__.keys()))]
+ [plist(dir(x)) for x in self]).ungroup(-1).puniq()
##############################################################################
##############################################################################
##############################################################################
# Public methods.
##############################################################################
##############################################################################
##############################################################################
def _(self):
"""Causes the next call to `self` to be performed as deep as possible in the `plist`.
This is a convenience method primarily for easy subscripting of the values of
a `plist`.
Examples:
```python
pl = plist([np.arange(10) for _ in range(3)])
assert (pl._[2].aslist() ==
[2, 2, 2])
import operator as op
assert (pl._[2:4:1].apply(op.eq,
[np.array([2, 3]), np.array([2, 3]), np.array([2, 3])])
.apply(np.all).aslist() ==
[True, True, True])
```
It can be used to call any method on the values of a `plist` as well:
```python
pl = plist([['foo'], ['bar']])
pl._.append('baz')
assert (pl.apply(type).aslist() ==
[list, list])
assert (pl.aslist() ==
[['foo', 'baz'], ['bar', 'baz']])
```
Returns:
`self`, but in a state such that the next access to a property or method of
`self` occurs at the maximum depth.
"""
self.__pepth__ = -1
return self
def __(self):
"""Causes the next call to `self` to be performed on the innermost `plist`.
This is a convenience method primarily for easy subscripting of the innermost `plist`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.__[0].aslist() ==
[{'bar': 0, 'foo': 0}, {'bar': 1, 'foo': 1}])
# This makes slicing the innermost plist easy as well, but note the three-argument slice:
assert (by_bar.__[:1:].aslist() ==
[[{'bar': 0, 'foo': 0}], [{'bar': 1, 'foo': 1}]])
```
It can be used to call any method on the values of a `plist` as well:
```python
pl = plist * [['foo'], ['bar']]
pl.__.append('baz')
assert (pl.apply(type).aslist() ==
[plist, plist])
assert (pl.aslist() ==
[['foo', 'baz'], ['bar', 'baz']])
```
Compare the use of `__` with the use of `_`, which will work on the leaf values if they
support the property being accessed:
```python
# Get the first two characters from the strings in the innermost plist.
assert (pl._[:2:].aslist() ==
[['fo', 'ba'], ['ba', 'ba']])
# Get the first two elements from the innermost plist (which in this case is the entire plist).
assert (pl.__[:2:].aslist() ==
[['foo', 'baz'], ['bar', 'baz']])
```
Returns:
`self`, but in a state such that the next access to a property or method of
`self` occurs at the innermost `plist`.
"""
self.__pepth__ = self.pdepth(True)
return self
##############################################################################
# __root__ pointer management.
##############################################################################
def root(self):
"""Returns the root of the `plist`.
Examples:
When a `plist` is created, by default its root is `self`:
```python
pl = plist([1, 2, 3])
assert (pl.root() is pl)
```
Subsequent calls to the `plist` will return new `plist`s, but most of those
calls will retain the original root:
```python
pl2 = pl + 3
assert (pl2.aslist() ==
[4, 5, 6])
assert (pl2.root() is pl)
assert (pl2.pstr().root() is pl)
```
Some methods create a new root `plist` in order to keep the values and the root
syncronized:
```python
assert (pl2[0:2].aslist() ==
[4, 5])
assert (pl2[0:2].root().aslist() ==
[1, 2])
assert (pl2.sortby(reverse=True).aslist() ==
[6, 5, 4])
assert (pl2.sortby(reverse=True).root().aslist() ==
[3, 2, 1])
```
`plist` filtering also always returns the root, in order to make the filter easily chainable:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
filtered = foos.bar == 0
assert (filtered.aslist() ==
[dict(foo=0, bar=0), dict(foo=2, bar=0)])
assert (filtered.root() is filtered)
(foos.bar == 0).baz = 6
(foos.bar == 1).baz = foos.foo * 2
assert (foos.aslist() ==
[dict(foo=0, bar=0, baz=6), dict(foo=1, bar=1, baz=2), dict(foo=2, bar=0, baz=6)])
```
Grouping also always returns the root:
```python
by_bar = foos.bar.groupby()
assert (by_bar.aslist() ==
[[{'bar': 0, 'baz': 6, 'foo': 0}, {'bar': 0, 'baz': 6, 'foo': 2}],
[{'bar': 1, 'baz': [0, 2, 4], 'foo': 1}]])
assert (by_bar.aslist() == by_bar.root().aslist())
```
Returns:
The root `plist` of `self`.
"""
return self.__root__
def uproot(self):
"""Sets the root to `self` so future `root()` calls return this `plist`.
Examples:
In some cases it is better reset the root. For example, after applying
a number of operations to a `plist` to get the data into the desired form,
resetting the root to `self` often makes sense, as future filtering
should not return the original data:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
(foos.bar == 0).baz = 6
(foos.bar == 1).baz = foos.foo * 2
floos = foos.rekey(dict(foo='floo'))
assert (floos.root() is foos)
assert (floos.peys()[0].aslist() ==
['bar', 'baz', 'floo'])
assert ((floos.floo < 2).aslist() ==
[dict(foo=0, bar=0, baz=6), dict(foo=1, bar=1, baz=2)])
floos = floos.uproot()
assert ((floos.floo < 2).aslist() ==
[dict(floo=0, bar=0, baz=6), dict(floo=1, bar=1, baz=2)])
```
See `plist.root` for more details.
Returns:
`self`.
"""
self.__root__ = self
return self
##############################################################################
# Conversion methods.
##############################################################################
def copy(self):
"""Copy `self` to new `plist`. Performs a shallow copy.
`self.root()` is copied as well and used to root the copy if
`self.root() is not self`.
If `self.root() is self`, the root is not maintained.
Examples:
```python
pl1 = plist[1, 2, 3]
pl2 = pl1.copy()
assert (pl1 is not pl2)
assert (pl1.root() is pl1 and pl2.root() is pl2)
pl3 = pl2 + 1
pl4 = pl3.copy()
assert (pl4.root().aslist() == pl3.root().aslist())
assert (pl4.root() is not pl3.root())
assert (pl4.root().aslist() == pl2.aslist())
assert (pl4.root() is not pl2)
```
Returns:
Copy of `self` with `self.root()` handled appropriately.
"""
if self.__root__ is self:
return plist(self)
return plist(self, root=self.__root__.copy())
def aslist(self):
"""Recursively convert all nested `plist`s from `self` to `list`s, inclusive.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.apply(type).aslist() == [plist, plist])
assert ([type(x) for x in by_bar.aslist()] == [list, list])
```
Returns:
`list` with the same structure and contents as `self`.
"""
try:
return [x.aslist() for x in self]
except Exception:
pass
return [x for x in self]
def astuple(self):
"""Recursively convert all nested `plist`s from `self` to `tuple`s, inclusive.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.apply(type).aslist() == [plist, plist])
assert ([type(x) for x in by_bar.astuple()] == [tuple, tuple])
```
Returns:
`tuple` with the same structure and contents as `self`.
"""
try:
return tuple([x.astuple() for x in self])
except Exception:
pass
return tuple([x for x in self])
def aspset(self):
"""Recursively convert all nested `plist`s from `self` to `pset`s, inclusive.
All values must be hashable for the conversion to succeed. Grouped `plist`s
necessarily return `frozenpset`s at all non-root nodes.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.bar.aspset() == pset([0, 1]))
by_bar = foos.bar.groupby()
assert (by_bar.bar.apply(type).aslist() == [plist, plist])
assert (type(by_bar.bar.aspset()) == pset)
assert ([type(x) for x in by_bar.bar.aspset()] == [frozenpset, frozenpset])
```
Returns:
`pset` with the same structure and contents as `self`.
"""
try:
return pset([x.aspset() for x in self])
except Exception:
try:
return frozenpset([x.aspset() for x in self])
except Exception:
pass
return frozenpset([x for x in self])
def aspdict(self):
"""Convert `self` to a `pdict` if there is a natural mapping of keys to values in `self`.
Recursively creates a `pdict` from `self`. Experimental, likely to change.
Examples:
```python
pl = plist['foo', 'bar', 'baz']
assert (pl.pdict() ==
dict(foo='foo', bar='bar', baz='baz'))
assert (pl.replace('a', '').replace('o', '').pdict() ==
dict(foo='f', bar='br', baz='bz'))
foos = plist([pdict(foo=0, bar=0, baz=3), pdict(foo=1, bar=1, baz=2), pdict(foo=2, bar=0, baz=1)])
by_bar = foos.bar.groupby()
assert (by_bar.bar.ungroup().puniq().zip(by_bar).uproot().aspdict() ==
{0: [{'bar': 0, 'baz': 3, 'foo': 0}, {'bar': 0, 'baz': 1, 'foo': 2}],
1: [{'bar': 1, 'baz': 2, 'foo': 1}]})
assert ([type(x) for x in by_bar.astuple()] == [tuple, tuple])
```
Returns:
New `pdict` based on the contents of `self`.
"""
pd = self.pdict()
try:
pd.update({k: v.aspdict() for k, v in pd.pitems()})
except Exception:
pass
return pd
if np is None:
def np(self, *args, **kwargs):
"""If `numpy` were installed on your system, `plist.np()` would convert your `plist` to a `numpy.array`.
Please install `numpy`:
```bash
pip install numpy
```
Raises:
NotImplementedError: We can't give you `numpy.array`s if you don't give us `numpy`.
"""
raise NotImplementedError('numpy is unavailable on your system. Please install numpy before calling plist.np().')
else:
def np(self, *args, **kwargs):
"""Converts the elements of `self` to `numpy.array`s, forwarding passed args.
Examples:
```python
foos = plist([pdict(foo=i, bar=i % 2) for i in range(5)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
foos.bin = -1
assert (foos.aslist() ==
[{'bar': 0, 'baz': 3, 'bin': -1, 'foo': 0},
{'bar': 1, 'baz': 6, 'bin': -1, 'foo': 1},
{'bar': 0, 'baz': 5, 'bin': -1, 'foo': 2},
{'bar': 1, 'baz': 6, 'bin': -1, 'foo': 3},
{'bar': 0, 'baz': 7, 'bin': -1, 'foo': 4}])
assert (foos.foo.wrap().np().sum().aslist() ==
[10])
by_bar = foos.bar.sortby(reverse=True).groupby()
baz = by_bar.baz
# Filters for the max per group, which includes the two-way tie in the first group.
(baz == baz.np().max()).bin = 13
assert (by_bar.aslist() ==
[[{'bar': 1, 'baz': 6, 'bin': 13, 'foo': 1},
{'bar': 1, 'baz': 6, 'bin': 13, 'foo': 3}],
[{'bar': 0, 'baz': 3, 'bin': -1, 'foo': 0},
{'bar': 0, 'baz': 5, 'bin': -1, 'foo': 2},
{'bar': 0, 'baz': 7, 'bin': 13, 'foo': 4}]])
assert ((by_bar.foo.np() * by_bar.baz.np() - by_bar.bin.np()).sum().aslist() ==
[-2, 27])
```
Args:
*args: Positional arguments passed to `np.array`.
**kwargs: Keyword arguments passed to `np.array`.
Returns:
New `plist` with values from `self` converted to `np.array`s.
"""
return plist([np.array(x, *args, **kwargs) for x in self], root=self.__root__)
if pd is None:
def pd(self, *args, **kwargs):
"""If `pandas` were installed on your system, `plist.pd()` would convert your `plist` to a `pandas.DataFrame`.
Please install `pandas`:
```bash
pip install pandas
```
Raises:
NotImplementedError: We can't give you `panda.DataFrame`s if you don't give us `pandas`.
"""
raise NotImplementedError('pandas is unavailable on your system. Please install pandas before calling plist.pd().')
else:
def pd(self, *args, **kwargs):
r"""Converts `self` into a `pandas.DataFrame`, forwarding passed args.
Examples:
```python
foos = plist([pdict(foo=i, bar=i % 2) for i in range(5)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
foos.bin = -1
assert (foos.aslist() ==
[{'bar': 0, 'baz': 3, 'bin': -1, 'foo': 0},
{'bar': 1, 'baz': 6, 'bin': -1, 'foo': 1},
{'bar': 0, 'baz': 5, 'bin': -1, 'foo': 2},
{'bar': 1, 'baz': 6, 'bin': -1, 'foo': 3},
{'bar': 0, 'baz': 7, 'bin': -1, 'foo': 4}])
by_bar = foos.bar.sortby(reverse=True).groupby()
baz = by_bar.baz
(baz == baz.np().max()).bin = 13
assert (by_bar.aslist() ==
[[{'bar': 1, 'baz': 6, 'bin': 13, 'foo': 1},
{'bar': 1, 'baz': 6, 'bin': 13, 'foo': 3}],
[{'bar': 0, 'baz': 3, 'bin': -1, 'foo': 0},
{'bar': 0, 'baz': 5, 'bin': -1, 'foo': 2},
{'bar': 0, 'baz': 7, 'bin': 13, 'foo': 4}]])
assert (str(foos.pd()) ==
' bar baz bin foo\n'
'0 1 6 13 1\n'
'1 1 6 13 3\n'
'2 0 3 -1 0\n'
'3 0 5 -1 2\n'
'4 0 7 13 4')
assert (str(foos.pd(index='foo')) ==
' bar baz bin\n'
'foo \n'
'1 1 6 13\n'
'3 1 6 13\n'
'0 0 3 -1\n'
'2 0 5 -1\n'
'4 0 7 13')
assert (by_bar.pd_().pstr().aslist() ==
[' bar baz bin foo\n'
'0 1 6 13 1\n'
'1 1 6 13 3',
' bar baz bin foo\n'
'0 0 3 -1 0\n'
'1 0 5 -1 2\n'
'2 0 7 13 4'])
```
Note the use of `pd_()` on the grouped `plist`. This allows you to get a separate `pandas.DataFrame` for
each group in your `plist`, and then do normal `DataFrame` manipulations with them individually.
If you want a `pandas.GroupBy` object, you should convert the `plist` to a `DataFrame` first, and then
call `DataFrame.groupby`. Also see `plist.remix` for alternative ways of converting `plist`s to
`DataFrame`s.
Args:
*args: Positional arguments passed to `pandas.DataFrame.from_records`.
**kwargs: Keyword arguments passed to `pandas.DataFrame.from_records`.
Returns:
A `pandas.DataFrame` object constructed from `self`.
"""
return pd.DataFrame.from_records(self.aslist(), *args, **kwargs)
def pdict(self, *args, **kwargs):
"""Convert `self` to a `pdict` if there is a natural mapping of keys to values in `self`.
If `self is self.root()`, attempts to treat the contents of `self` as key-value pairs in order
to create the `pdict` (i.e., `[(key, value), (key, value), ...]`). If that fails, attempts to build
the `pdict` assuming `self.root()` is a `plist` of `KeyValue` `namedtuple`s, using `self.root().key`
for the keys, and the values in `self` for the values. If that fails, creates a `pdict` pairing
values from `self.root()` with values from `self`. In that case, if `self is self.root()`, the
`pdict` will be of the form: `pdict(v1=v1, v2=v2, ...)`, as in the first example below.
Examples:
```python
pl = plist['foo', 'bar', 'baz']
assert (pl.pdict() ==
dict(foo='foo', bar='bar', baz='baz'))
assert (pl.replace('a', '').replace('o', '').pdict() ==
dict(foo='f', bar='br', baz='bz'))
pd = pdict(foo=1, bar=2, floo=0)
assert (pd.pitems().pdict() == pd)
assert (pd.palues().pdict() == pd)
assert ((pd.palues() + 2).pdict() ==
dict(foo=3, bar=4, floo=2))
assert (pd.peys()._[0].pdict(),
pdict(foo='f', bar='b', floo='f'))
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.pstr().zip(foos.bar).uproot().pdict() ==
{'0': 0, '1': 1, '2': 0})
assert (plist[('foo', 1), ('foo', 2)].pdict() ==
dict(foo=2))
```
If you created this `plist` from a `pdict`, but you want to use pairs in `self` to create the
new `pdict`, you will need to `uproot` first:
```python
pd = pdict(foo=1, bar=2.0, baz=3.3)
pl = pd.palues().apply(lambda x: (str(x), x))
pd2 = pl.pdict()
pd3 = pl.uproot().pdict()
assert (pl.aslist() ==
[('2.0', 2.0), ('3.3', 3.3), ('1', 1)])
assert (pd2 ==
dict(foo=('1', 1), bar=('2.0', 2.0), baz=('3.3', 3.3)))
assert (pd3 ==
{'1': 1, '2.0': 2.0, '3.3': 3.3})
```
Args:
*args: Passed to `pdict.update` after the new `pdict` is created.
**kwargs: Passed to `pdict.update` after the new `pdict` is created.
Returns:
New `pdict` based on the contents of `self`.
"""
if self is self.__root__:
try:
if self.all(lambda x: len(x) == 2):
return pdict({k: v for k, v in self}).update(*args, **kwargs)
except Exception:
pass
try:
return pdict({k: v for k, v in zip(self.__root__.key, self)}).update(*args, **kwargs)
except Exception:
pass
return pdict({k: v for k, v in zip(self.__root__, self)}).update(*args, **kwargs)
def pset(self):
"""Converts the elements of self into pset objects.
Useful for creating `set`s from grouped `plist`s.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.pitems().pset().aslist() ==
[{('foo', 0), ('bar', 0)}, {('foo', 1), ('bar', 1)}, {('foo', 2), ('bar', 0)}])
by_bar = foos.bar.groupby()
assert (by_bar.foo.pset().aslist() ==
[{0, 2}, {1}])
```
Returns:
New `plist` of `pset`s for each value in `self`.
"""
return plist([pset(x) for x in self], root=self.__root__)
def pstr(self):
"""Returns a plist with leaf elements converted to strings.
Calls `str` on each leaf element of self.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.pstr().aslist() ==
['0', '1', '2'])
by_bar = foos.bar.groupby()
assert (by_bar.foo.pstr().aslist() ==
[['0', '2'], ['1']])
```
Note that string concatenation works naturally with `plist`s, so it is easy to build
up a desired string using `plist.pstr`:
```python
assert (('foo: ' + by_bar.foo.pstr() + ', bar: ' + by_bar.bar.pstr()).aslist() ==
[['foo: 0, bar: 0', 'foo: 2, bar: 0'], ['foo: 1, bar: 1']])
```
If you want the string representation of a layer of a grouped `plist`, instead use
`plist.apply(str)` at the desired depth:
```python
assert (by_bar.foo.apply(str).aslist() ==
['[0, 2]', '[1]'])
```
Returns:
`plist` of strings.
"""
try:
return plist([x.pstr() for x in self], root=self.__root__)
except Exception:
return plist([str(x) for x in self], root=self.__root__)
##############################################################################
# Matplotlib pyplot convenience methods.
##############################################################################
if plt is None:
def plt(self, *args, **kwargs):
"""If `matplotlib.pyplot` were installed on your system, `plist.plt()` would make your use of `pyplot` easier.
Please install `matplotlib.pyplot`:
```bash
pip install matplotlib
```
Raises:
NotImplementedError: We can't make `pyplot` easier to use if you don't give us `pyplot`.
"""
raise NotImplementedError('matplotlib.pyplot is unavailable on your system. Please install matplotlib.pyplot before calling plist.plt().')
else:
def plt(self, **kwargs):
"""Convenience method for managing `matplotlib.pyplot` state within a `plist` chain.
`plt()` serves two purposes:
1. It returns a delegation object that allows calling `pyplot` functions without having to call `apply` -- e.g.,
`plist.plt().plot()` instead of `plist.apply(plt.plot)`.
1. It allows calling of multiple `pyplot` functions in a single call just by passing `**kwargs`. This
makes it easier to set up plotting contexts and to control when plots are shown, without adding
lots of one-line `plt` calls before and after the data processing and plotting code.
Neither of these use cases provides anything that can't be done directly with normal calls to `plt`
functions and `plist.apply`. This method is just to make your life easier if you do a lot of
plotting.
When passing `kwargs` to `plt()`, they are executed in alphabetical order. If that is inappropriate,
(e.g., when creating a figure and setting other parameters), you can break up the call into two or
more `plt()` calls to enforce any desired ordering, but you should probably just do that kind of
complicated setup outside of the `plist` context.
Examples:
```python
foos = plist([pdict(foo=i, bar=i % 2) for i in range(5)])
(foos.bar == 0).baz = 3 - ((foos.bar == 0).foo % 3)
(foos.bar == 1).baz = 6
foos.foo.plt().scatter(foos.bar).plt(show=None)
# Equivlent to:
foos.foo.apply(plt.scatter, foos.bar)
plt.show()
by_bar = foos.bar.groupby()
by_bar.foo.plt().plot().plt(show=None)
# Equivlent to:
by_bar.foo.apply(plt.plot)
plt.show()
# Create a figure of size 12x8, set the x and y limits, add x and y axis labels,
# draw a scatter plot with custom colors and labels per group, add the legend, and show the figure.
by_bar.foo.plt(
figure=dict(figsize=(12, 8)), xlim=(-1, 5), ylim=(-1, 7), xlabel='foo', ylabel='baz'
).scatter(
by_bar.baz, c=plist['r', 'g'], label='bar: ' + by_bar.bar.puniq().ungroup().pstr()
).plt(legend=dict(loc=0), show=None)
# Equivalent to:
plt.figure(figsize=(12, 8))
plt.xlabel('foo')
plt.xlim((-1, 5))
plt.ylabel('baz')
plt.ylim((-1, 7))
by_bar.foo.apply(plt.scatter, by_bar.baz, c=plist['r', 'g'], label='bar: ' + by_bar.bar.puniq().ungroup().pstr())
plt.legend(loc=0)
plt.show()
```
Args:
**kwargs: Key/value pairs where the key is a function name on `plt`, and the value is the arguments
to call that function with, or `None` for an empty call.
Returns:
Delegation object that can call `pyplot` functions like `plt.plot`, as well as accessing whatever
properties are available to elements of `self`.
"""
def call_plt_fn(attr, args):
attr = getattr(plt, attr)
if args is None:
attr()
elif isinstance(args, list):
if isinstance(args[-1], dict):
attr(*args[:-1], **args[-1])
else:
attr(*args)
elif isinstance(args, dict):
attr(**args)
else:
attr(args)
kwargs = pdict(kwargs)
kwargs.pitems().apply(call_plt_fn, psplat=True)
class Plt(object):
"""Wrapper class for calling `plt` functions in a `plist` context.
Permits dynamic access to `plt` functions while maintaining the current
`plist` values.
"""
def __init__(self, first_arg):
self.first_arg = first_arg
self.last_plt = None
def __getattr__(self, name):
first_arg = self.first_arg
if isinstance(first_arg, Plt):
first_arg = first_arg.first_arg
try:
attr = getattr(plt, name)
def call_plt_fn(*a, **kw):
self.last_plt = attr(first_arg, *a, **kw)
return self
return call_plt_fn
except Exception:
return getattr(first_arg, name)
return plist([Plt(x) for x in self], root=self.__root__)
##############################################################################
# Shortcutting boolean test methods.
##############################################################################
def all(self, *args, **kwargs):
"""Returns `self` if `args[0]` evaluates to `True` for all elements of `self`.
Shortcuts if `args[0]` ever evaluates to `False`.
If `args` are not passed, the function evaluated is `bool`.
Useful as an implicit `if` condition in chaining, but can be used explicitly
in `if` statements as well.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.all(isinstance, pdict).aslist() ==
[pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.all(lambda x: x > 0).aslist() == [])
```
`all` does not recurse into grouped `plist`s, so you must specify the
desired level of evaluation:
```python
by_bar = foos.bar.groupby()
assert (by_bar.foo.all_(lambda x: x > 0).aslist() ==
[[], [1]])
assert (by_bar.foo.all_(lambda x: x > 0).nonempty().root().aslist() ==
[[{'bar': 1, 'foo': 1}]])
```
Args:
*args: Optional. If present, the first entry must be a function to evaluate.
All other args are passed through to that function. If absent, the
function is set to `bool`.
**kwargs: Passed through to the function specified in `*args`.
Returns:
`self` or an empty `plist` (which evaluates to `False`).
"""
if len(args):
func = args[0]
args = args[1:]
else:
func = bool
for x in self:
if not func(x, *args, **kwargs):
return plist()
return self
def any(self, *args, **kwargs):
"""Returns `self` if `args[0]` evaluates to `True` for any elements of `self`.
Shortcuts if `args[0]` ever evaluates to `True`.
If `args` are not passed, the function evaluated is `bool`.
Useful as an implicit `if` condition in chaining, but can be used explicitly
in `if` statements as well.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.any(isinstance, pdict).aslist() ==
[pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.any(lambda x: x < 0).aslist() == [])
```
`any` does not recurse into grouped `plist`s, so you must specify the
desired level of evaluation:
```python
by_bar = foos.bar.groupby()
assert (by_bar.foo.any_(lambda x: x > 1).aslist() ==
[[0, 2], []])
assert (by_bar.foo.any_(lambda x: x > 1).nonempty().root().aslist() ==
[[{'bar': 0, 'foo': 0}, {'bar': 0, 'foo': 2}]])
```
Args:
*args: Optional. If present, the first entry must be a function to evaluate.
All other args are passed through to that function. If absent, the
function is set to `bool`.
**kwargs: Passed through to the function specified in `*args`.
Returns:
`self` or an empty `plist` (which evaluates to `False`).
"""
if len(args):
func = args[0]
args = args[1:]
else:
func = bool
for x in self:
if func(x, *args, **kwargs):
return self
return plist()
def none(self, *args, **kwargs):
"""Returns `self` if `args[0]` evaluates to `False` for all elements of `self`.
Shortcuts if `args[0]` ever evaluates to `True`.
If `args` are not passed, the function evaluated is `bool`.
Useful as an implicit `if` condition in chaining, but can be used explicitly
in `if` statements as well.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.none(isinstance, pset).aslist() ==
[pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.none(lambda x: x > 1).aslist() == [])
```
`none` does not recurse into grouped `plist`s, so you must specify the
desired level of evaluation:
```python
by_bar = foos.bar.groupby()
assert (by_bar.foo.none_(lambda x: x > 1).aslist() ==
[[], [1]])
assert (by_bar.foo.none_(lambda x: x > 1).nonempty().root().aslist() ==
[[{'bar': 1, 'foo': 1}]])
```
Args:
*args: Optional. If present, the first entry must be a function to evaluate.
All other args are passed through to that function. If absent, the
function is set to `bool`.
**kwargs: Passed through to the function specified in `*args`.
Returns:
`self` or an empty `plist` (which evaluates to `False`).
"""
if len(args):
func = args[0]
args = args[1:]
else:
func = bool
for x in self:
if func(x, *args, **kwargs):
return plist()
return self
##############################################################################
# Equality checking that returns bool instead of plist.
##############################################################################
def pequal(self, other):
"""Shortcutting recursive equality function.
`pequal` always returns `True` or `False` rather than a plist. This is a
convenience method for cases when the filtering that happens with `==` is
undesirable or inconvenient.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pequal(foos) == True)
zero_bars = foos.bar == 0
assert (zero_bars.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}])
assert ((foos == zero_bars).aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}])
assert (foos.pequal(zero_bars) == False)
```
Args:
other: Object to check equality against.
Returns:
True if all elements of self and other are recursively equal.
False otherwise.
"""
if not isinstance(other, plist):
return False
if len(self) != len(other):
return False
try:
for x, y in zip(self, other):
if not x.pequal(y):
return False
except Exception:
for x, y in zip(self, other):
if x != y:
return False
return True
##############################################################################
# Function application methods.
##############################################################################
def apply(self, func, *args, **kwargs):
"""Apply an arbitrary function to elements of self, forwarding arguments.
Any arguments passed to `apply` that are `plist`s and have the same
length as `self` will be passed one-at-a-time to `func` with each
element of `self`. Otherwise, arguments are passed in unmodified.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.apply('foo: {}'.format).aslist() ==
['foo: 0', 'foo: 1', 'foo: 2'])
assert (foos.foo.apply('foo: {}, bar: {}'.format, foos.bar).aslist() ==
['foo: 0, bar: 0', 'foo: 1, bar: 1', 'foo: 2, bar: 0'])
assert (foos.foo.apply('foo: {}, bar: {bar}'.format, bar=foos.bar).aslist() ==
['foo: 0, bar: 0', 'foo: 1, bar: 1', 'foo: 2, bar: 0'])
# The same as above, but in parallel:
assert (foos.foo.apply('foo: {}, bar: {}'.format, foos.bar, psplit=1).aslist() ==
['foo: 0, bar: 0', 'foo: 1, bar: 1', 'foo: 2, bar: 0'])
by_bar = foos.bar.groupby()
assert (by_bar.foo.apply('bar: {bar} => {}'.format, bar=foos.bar.puniq()).aslist() ==
['bar: 0 => [0, 2]', 'bar: 1 => [1]'])
assert (by_bar.foo.apply_('bar: {bar} => {}'.format, bar=by_bar.bar).aslist() ==
[['bar: 0 => 0', 'bar: 0 => 2'], ['bar: 1 => 1']])
```
Using `paslist` and `psplat`:
```python
foos = plist([pdict(foo=i, bar=i % 2) for i in range(5)])
(foos.bar == 0).baz = 3 - ((foos.bar == 0).foo % 3)
(foos.bar == 1).baz = 6
by_bar_baz = foos.bar.groupby().baz.groupby()
by_bar_baz_apply_paslist = by_bar_baz.foo.apply(
lambda x, *a, **kw: {'{x}: {a} ({kw})'.format(x=x, a=a, kw=kw)}, by_bar_baz.baz, bar=by_bar_baz.bar, paslist=True)
by_bar_baz_apply_paslist_psplat = by_bar_baz.foo.apply(
lambda *a, **kw: {'{a} ({kw})'.format(a=a, kw=kw)}, by_bar_baz.baz, bar=by_bar_baz.bar, paslist=True, psplat=True)
assert (by_bar_baz_apply_paslist.aslist() ==
[["[[0], [2], [4]]: ([[3], [1], [2]],) ({'bar': [[0], [0], [0]]})"],
["[[1, 3]]: ([[6, 6]],) ({'bar': [[1, 1]]})"]])
assert (by_bar_baz_apply_paslist_psplat.aslist() ==
[["([0], [2], [4], [[3], [1], [2]]) ({'bar': [[0], [0], [0]]})"],
["([1, 3], [[6, 6]]) ({'bar': [[1, 1]]})"]])
```
Args:
func: `callable`, `list` of `callable`s, or string name of method in `plist`.
*args: Arguments to pass to `func`.
**kwargs: Keyword arguments to pass to `func`, after extracting:
paslist: Boolean (default `False`). If `True`, converts
elements of `self` to `list` using `plist.aslist()`
before passing them to `func`, and reconverts the
result of each call to a `plist`. Note that this does
not guarantee that the returned `plist` has the same
shape as `self`, as `plist.aslist()` recursively
converts all contained `plist`s to `list`s, but `func`
might return any arbitrary result, so the same
conversion cannot be inverted automatically.
psplat: Boolean (default `False`). If `True`, expands the
arguments provided by `self` with the `*` operator
(sometimes called the 'splat' operator).
psplit: Integer (default `0`). If greater than `0`, `func` is
applied in parallel. If `psplit` is `1`, the number of
parallel executions is equal to the length of `self`.
Otherwise, `psplit` is the number of parallel executions.
Returns:
`plist` resulting from applying `func` to each element of `self`.
"""
paslist = kwargs.pop('paslist', False)
psplat = kwargs.pop('psplat', False)
psplit = kwargs.pop('psplit', 0)
args = [_ensure_len(len(self), a, strict=True) for a in args]
kwargs = {
k: _ensure_len(len(self), v, strict=True) for k, v in kwargs.items()
}
if isinstance(func, str):
func = plist.__getattribute__(self, func)
if hasattr(func, '__len__') and len(func) == len(self):
return plist([func[i](*[a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}) for i, x in enumerate(self)], root=self.__root__)
else:
# We should be calling a single function of a plist object. If that's not the case, something odd is happening, and the crash is appropriate.
return func(*[a[0] for a in args], **{k: v[0] for k, v in kwargs.items()})
funcs = plist(_ensure_len(len(self), func))
if plist.all(funcs, isinstance, STRING_TYPES):
funcs = plist.__getattribute__(self, funcs)
return plist([funcs[i](*[a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}) for i, x in enumerate(self)], root=self.__root__)
if psplit > 0:
pool = _get_thread_pool(psplit, len(self))
call_args = [pdict(x=x, i=i) for i, x in enumerate(self)]
if paslist:
if psplat:
map_func = lambda ca: plist(funcs[ca.i](*ca.x.aslist() + [a[ca.i] for a in args], **{k: v[ca.i] for k, v in kwargs.items()}), root=ca.x.__root__)
else:
map_func = lambda ca: plist(funcs[ca.i](ca.x.aslist(), *[a[ca.i] for a in args], **{k: v[ca.i] for k, v in kwargs.items()}), root=ca.x.__root__)
else:
if psplat:
map_func = lambda ca: funcs[ca.i](*list(ca.x) + [a[ca.i] for a in args], **{k: v[ca.i] for k, v in kwargs.items()})
else:
map_func = lambda ca: funcs[ca.i](ca.x, *[a[ca.i] for a in args], **{k: v[ca.i] for k, v in kwargs.items()})
pl = plist(pool.map(map_func, call_args, chunksize=_get_thread_chunksize(psplit, len(self))), root=self.__root__)
pool.close()
return pl
if paslist:
if psplat:
return plist([plist(funcs[i](*x.aslist() + [a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}), root=x.__root__) for i, x in enumerate(self)], root=self.__root__)
return plist([plist(funcs[i](x.aslist(), *[a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}), root=x.__root__) for i, x in enumerate(self)], root=self.__root__)
else:
if psplat:
return plist([funcs[i](*list(x) + [a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}) for i, x in enumerate(self)], root=self.__root__)
return plist([funcs[i](x, *[a[i] for a in args], **{k: v[i] for k, v in kwargs.items()}) for i, x in enumerate(self)], root=self.__root__)
def reduce(self, func, *args, **kwargs):
"""Apply a function repeatedly to its own result, returning a plist of length at most 1.
`reduce` can be initialized either by using the `initial_value` keyword argument,
or by the first value in `args`, if anything is passed to `args`, or from the first value
in `self`, if the other options are not present.
Examples:
This is an example of passing `initial_value` as the first value in `args`:
```python
s = 'foo bar was a baz of bin'
pl = plist['foo', 'bar', 'baz', 'bin']
reduced = pl.reduce(lambda s, x, y: qj(s).replace(x, y), s, pl._[::-1])
# Logs:
# qj: <pstar> reduce: s <3451>: foo bar was a baz of bin
# qj: <pstar> reduce: s <3451>: oof bar was a baz of bin
# qj: <pstar> reduce: s <3451>: oof rab was a baz of bin
# qj: <pstar> reduce: s <3451>: oof rab was a zab of bin
assert (reduced.aslist() ==
['oof rab was a zab of nib'])
assert (reduced.root().aslist() ==
['foo bar was a baz of bin'])
assert (reduced.root().root() is pl)
```
The same thing, but using the `initial_value` keyword argument:
```python
reduced = pl.reduce(lambda s, x, y: qj(s).replace(x, y), pl._[::-1], initial_value=s)
assert (reduced.aslist() ==
['oof rab was a zab of nib'])
assert (reduced.root().aslist() ==
['foo bar was a baz of bin'])
assert (reduced.root().root() is pl)
```
Using the first value in `self` for the initial value:
```python
pl = plist[1, 2, 3, 4, 5]
reduced = pl.reduce(lambda x, y, z: (x + y) * z, z=pl[::-1])
assert (reduced.aslist() ==
[466])
```
Any additional `args` or `kwargs` values will be passed through to `func` at each call,
in parallel to values of `self`. Note that `plist` arguments of the same length as `self`
get passed through starting at the 0th element, and going until there are no more elements
of `self`. If no value was passed for `initial_value`, this means that any additional
arguments will only use `n-1` values. For example, in the code above, `z` ranges from 5 to
2, producing the following computation:
```python
assert ((((((1 + 2) * 5 + 3) * 4 + 4) * 3 + 5) * 2) ==
466)
```
When `self` is a grouped `plist`, `pepth` determines which groups are reduced over:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0), pdict(foo=3, bar=1), pdict(foo=4, bar=0)])
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
foos.bin = (foos.baz + foos.bar) * foos.foo
by_bar_baz_bin = foos.bar.groupby().baz.groupby().bin.groupby()
assert (by_bar_baz_bin.aslist() ==
[[[[{'bar': 0, 'baz': 3, 'bin': 0, 'foo': 0}]],
[[{'bar': 0, 'baz': 5, 'bin': 10, 'foo': 2}]],
[[{'bar': 0, 'baz': 7, 'bin': 28, 'foo': 4}]]],
[[[{'bar': 1, 'baz': 6, 'bin': 7, 'foo': 1}],
[{'bar': 1, 'baz': 6, 'bin': 21, 'foo': 3}]]]])
import operator as op
assert (by_bar_baz_bin.foo.reduce(op.add, initial_value=0).aslist() ==
[10])
assert (by_bar_baz_bin.foo.reduce_(op.add, initial_value=0).aslist() ==
[[6], [4]])
assert (by_bar_baz_bin.foo.reduce__(op.add, initial_value=0).aslist() ==
[[[0], [2], [4]], [[4]]])
assert (by_bar_baz_bin.foo.reduce___(op.add, initial_value=0).aslist() ==
[[[[0]], [[2]], [[4]]], [[[1], [3]]]])
```
Notice that the deepest reduction actually returns a reconstruction of the input plist,
`by_bar_baz_bin.foo`, because at that level every element is in its own plist.
You can also pass a list of functions to apply. The first function is applied to the
broadest layer of the plist. Any additional layers consume a function. If the functions
run out before the last layer, all the deeper layers get the last function from the list.
If the layers run out before the functions do, deeper functions are not used in the
reduction.
```python
assert (by_bar_baz_bin.foo.reduce_(op.add, 0).reduce(op.mul, 1).aslist() ==
[24])
assert (by_bar_baz_bin.foo.reduce([op.mul, op.add], 0).aslist() ==
[24])
```
Note how `op.add` is applied to the deepest layers in both examples, and `op.mul` is only applied
to the outermost plist, performing `6 * 4`.
You can also set `initial_value` using a plist of the same structure:
```python
assert (by_bar_baz_bin.foo.reduce(op.add, by_bar_baz_bin.baz).aslist() ==
[37])
assert (by_bar_baz_bin.foo.reduce([op.mul, op.add, op.mul, op.add], initial_value=by_bar_baz_bin.baz).aslist() ==
[1323])
```
Note that `reduce` does not currently provide a good mechanism for using a function of more than two arguments
while reducing a deep plist, as that would require a reduction operation to be provided for each additional argument.
Therefore, attempting to reduce a deep plist with a multiargument function is likely to crash or give unexpected
results, and is not recommended.
Args:
func: function to call. Must take at least two positional arguments of the same type as `self`,
and return a value of that type.
*args: Additional arguments to pass to func at each step. If `initial_value` is not in
`kwargs`, then the first value in `args` is used as `initial_value`.
**kwargs: Additional kwargs to pass to `func`. If `initial_value` is passed, it is
removed from `kwargs` and used as the first argument passed to `func` on
the first call.
"""
sentinal = object()
initial_value = sentinal
start = 0
try:
rargs = [_ensure_len(len(self), a, strict=True) for a in args]
rkwargs = {
k: _ensure_len(len(self), v, strict=True) for k, v in kwargs.items()
}
next_funcs = func
if isinstance(next_funcs, list) and len(next_funcs):
if len(next_funcs) == 1:
func = next_funcs = next_funcs[0]
else:
func, next_funcs = next_funcs[0], next_funcs[1:]
new_plist = plist([x.reduce(next_funcs, *[a[i] for a in rargs], **{k: v[i] for k, v in rkwargs.items()}) for i, x in enumerate(self)], root=self.__root__)
initial_value = new_plist[0]
start = 1
if kwargs.pop('initial_value', sentinal) == sentinal:
if args:
args = args[1:]
except Exception as e:
new_plist = self
initial_value = kwargs.pop('initial_value', initial_value)
if initial_value == sentinal:
if args:
initial_value, args = args[0], args[1:]
elif len(new_plist):
initial_value = new_plist[0]
start = 1
else:
raise ValueError('plist.reduce must be called with either an initial value or on a non-empty plist.')
args = [_ensure_len(len(new_plist), a, strict=True)[:-start or None] for a in args]
kwargs = {
k: _ensure_len(len(new_plist), v, strict=True)[:-start or None] for k, v in kwargs.items()
}
cur_val = initial_value
for i, x in enumerate(new_plist[start:]):
cur_val = func(cur_val, x, *[a[i] for a in args], **{k: v[i] for k, v in kwargs.items()})
return plist.ungroup(plist([cur_val], root=plist([initial_value], root=new_plist.__root__)))
def filter(self, func=bool, *args, **kwargs):
"""Filter `self` by an arbitrary function on elements of `self`, forwarding arguments.
`filter` always returns the root of the filtered `plist`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.filter().aslist() ==
[dict(foo=1, bar=1), dict(foo=2, bar=0)])
assert (foos.foo.filter(lambda x: x < 2).aslist() ==
[dict(foo=0, bar=0), dict(foo=1, bar=1)])
(foos.bar == 0).bin = 'zero'
(foos.bar == 1).bin = 1
assert (foos.bin.filter(isinstance, str).aslist() ==
[{'bar': 0, 'bin': 'zero', 'foo': 0}, {'bar': 0, 'bin': 'zero', 'foo': 2}])
```
Args:
func: callable. Defaults to `bool`. Return value will be cast to `bool`.
*args: Arguments to pass to `func`.
**kwargs: Keyword arguments to pass to `func`, after extracting the same arguments as `plist.apply`.
Returns:
`plist` resulting from filtering out elements of `self` for whom `func` evaluated to a `False` value.
"""
return self.apply(func, *args, **kwargs).apply(bool) == True
def qj(self, *args, **kwargs):
"""Applies logging function qj to self for easy in-chain logging.
`qj` is a debug logging function. Calling `plist.qj()` is often the fastest way
to begin debugging an issue.
See [qj](https://github.com/iansf/qj) for detailed information on using `qj`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.foo.qj('foo').aslist() ==
[0, 1, 2])
# Logs:
# qj: <calling_module> calling_func: foo <3869>: [0, 1, 2]
```
Args:
*args: Arguments to pass to `qj`.
**kwargs: Keyword arguments to pass to `qj`.
Returns:
`self`
"""
call_pepth = kwargs.pop('call_pepth', 0)
return qj(self, _depth=4 + call_pepth, *args, **kwargs)
##############################################################################
# Grouping and sorting methods.
##############################################################################
def groupby(self):
"""Group `self.root()` by the values in `self` and return `self.root()`.
Examples:
Given a plist:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
foo_by_bar = foos.bar.groupby()
assert (foo_by_bar.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}],
[{'foo': 1, 'bar': 1}]])
```
Note that foo_by_bar now has two nested plists. The first inner plist has
the two pdicts where `foo.bar == 0`. The second inner plist has the
remaining pdict where `foo.bar == 1`.
Calling groupby again:
```python
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
```
Now by_bar_foo has two nested layers of inner plists. The outer nest
groups the values by `bar`, and the inner nest groups them by `foo`.
groupby always operates with leaf children of the plist, and it always adds
new groups as subgroups of the current innermost group.
Grouping relies on the values being hashable. If, for some reason, you need
to group by a non-hashable value, you should convert it to a hashable
representation first, for example using `plist.pstr()` or `plist.apply(id)`:
```python
foos = plist([{'bar': [1, 2, 3]}, {'bar': [1, 2, 3]}])
try:
by_bar_crash = foos.bar.groupby() # CRASHES!
except Exception as e:
assert (isinstance(e, TypeError))
by_bar_pstr = foos.bar.pstr().groupby()
assert (by_bar_pstr.aslist() ==
[[{'bar': [1, 2, 3]},
{'bar': [1, 2, 3]}]])
by_bar_id = foos.bar.apply(id).groupby()
assert (by_bar_id.aslist() ==
[[{'bar': [1, 2, 3]}],
[{'bar': [1, 2, 3]}]])
```
Note that in the example above, using `pstr()` probably gives the intended
result of grouping both elements together, whereas `apply(id)` gives the
unsurprising result of putting each element into its own group.
Returns:
plist with one additional layer of internal plists, where each such plist
groups together the root elements based on the values in this plist.
"""
try:
return plist([x.groupby() for x in self])
except Exception:
groups = collections.OrderedDict()
for i, x in enumerate(self):
if x not in groups:
groups[x] = plist()
groups[x].append(self.__root__[i])
return plist(groups.values())
def enum(self):
"""Wrap the current `plist` values in tuples where the first item is the index.
Examples:
```python
pl = plist['a', 'b', 'c']
assert (pl.enum().aslist() ==
[(0, 'a'), (1, 'b'), (2, 'c')])
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.foo.enum_().aslist() ==
[[(0, 0), (1, 2)], [(0, 1)]])
```
Returns:
`plist` of `(i, x)` pairs from calling `enumerate` on `self`.
"""
return plist(enumerate(self), root=self.__root__)
def wrap(self):
"""Adds and returns an outer `plist` around `self`.
Examples:
`wrap` is useful when you wish to call a function on the top-level plist,
but you don't want to stop your call chain:
```python
foos = plist([{'bar': [1, 2, 3]}, {'bar': [4, 5, 6]}])
assert (foos.aslist() ==
[{'bar': [1, 2, 3]},
{'bar': [4, 5, 6]}])
arr1 = np.array(foos.bar.pstr().groupby().bar)
assert (np.all(arr1 ==
np.array([[[1, 2, 3]],
[[4, 5, 6]]])))
arr2 = foos.bar.pstr().groupby().bar.np()
assert (np.all(np.array(arr2.aslist()) ==
np.array([np.array([[1, 2, 3]]),
np.array([[4, 5, 6]])])))
arr3 = foos.bar.pstr().groupby().bar.wrap().np()
assert (np.all(np.array(arr3.aslist()) ==
np.array([np.array([[[1, 2, 3]],
[[4, 5, 6]]])])))
assert (np.any(arr1 != arr2[0]))
assert (np.all(arr1 == arr3[0]))
```
In the example above, calling `np.array` on the grouped plist gives a
particular array structure, but it does not return a `plist`, so you can't as
naturally use that array in ongoing computations while keeping track of
the correspondence of the array with the original data in `foos`.
Calling `plist.np()` directly on the grouped `plist` gives a different result,
however, as shown in `arr2`. The array is missing one dimension relative to
the call that generated `arr1`.
Instead, it is easy to call `plist.wrap()` before calling `plist.np()` in
this case in order to get the same result of passing `self` to `np.array()`,
but the advantage is that the `numpy.array` is still wrapped in a `plist`, so it
can be used in follow-on computations.
Returns:
`plist` with one additional level of nesting.
"""
return plist([self])
def sortby(self, key=None, reverse=False):
"""Sorts `self` and `self.root()` in-place and returns `self`.
`sortby` and `groupby` work together nicely to create sorted, nested plists.
Note that `sortby` modifies and returns `self`, whereas `groupby` returns a
new `plist` with a new root. This is because `sortby` doesn't change the
structure of the plist, only the order of its (or its children's) elements.
Examples:
A basic sort:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
bar_sorted = foos.bar.sortby()
assert (bar_sorted.aslist() ==
[0, 0, 1])
foos_sorted_by_bar = bar_sorted.root()
assert (foos_sorted_by_bar.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0},
{'foo': 1, 'bar': 1}])
```
Sorting with groups works in the same way -- the sort is applied to each
group of `self`:
```python
by_bar = foos.bar.groupby()
assert (by_bar.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}],
[{'foo': 1, 'bar': 1}]])
by_bar_sorted = by_bar.bar.sortby(reverse=True)
assert (by_bar_sorted.aslist() ==
[[1], [0, 0]])
by_bar_sorted = by_bar_sorted.root()
assert (by_bar_sorted.aslist() ==
[[{'foo': 1, 'bar': 1}],
[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}]])
```
Args:
key: Key function to pass to `sorted`. Defaults to the identity function.
See the python documentation for `sorted` for more information.
reverse: Boolean specifying whether to sort in reverse order or not.
Returns:
`self`, sorted.
"""
key = key or (lambda x: x)
sorted_inds = [i for i, _ in sorted(enumerate(self), key=lambda x: key(x[1]), reverse=reverse)]
self.__root__[:] = self.__root__[sorted_inds]
if self is not self.__root__:
self[:] = self[sorted_inds]
return self
def ungroup(self, r=1, s=None):
"""Inverts the last grouping operation applied and returns a new plist.
`ungroup` undoes the last `groupby` operation by default. It removes
groupings in the inverse order that they are applied in -- `groupby`
always adds new groups at the inner most layer, so `ungroup` removes
groups from the innermost layer. It does not undo any implicit sorting
caused by the `groupby` operation, however.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.sortby().groupby()
assert (by_bar.ungroup().aslist() ==
foos.aslist())
by_bar[0].baz = 6
by_bar[1].baz = by_bar[1].foo * 2
by_bar_baz = by_bar.baz.groupby()
assert (by_bar_baz.ungroup().aslist() ==
by_bar.aslist())
assert (by_bar_baz.ungroup(2).aslist() ==
foos.aslist())
assert (by_bar_baz.ungroup(-1).aslist() ==
by_bar.ungroup(-1).aslist())
```
Args:
r: Integer value for the number of groups to remove. If `r == 0`, no
groups are removed. If it is positive, that many groups must be
removed, or `upgroup` raises a `ValueError`. If `r < 0`, all groups in
this plist are removed, returning a flat plist.
s: Successor object. Do not pass -- used to track how many ungroupings
have happened so that `ungroup` knows when to stop.
Returns:
New plist with one or more fewer inner groups, if there were any.
Raises:
ValueError: If there are fewer groups to ungroup than requested.
"""
s = _successor(r) if s is None else s
if s.v == 0:
return self
new_items = []
try:
cs = s
new_xs = []
for x in self:
cs = _successor(s.v)
new_xs.append(x.ungroup(cs.v, cs))
# Assumes that all children have the same depth.
# The plist is malformed if that isn't the case, and things will crash at some point.
s.v = cs.v
if s.v == 0:
return plist(new_xs)
for x in new_xs:
new_items.extend(x)
except Exception:
if s.v == 0:
raise ValueError('Called ungroup on a plist that has non-group children')
return self
s.p()
return plist(new_items)
def zip(self, *others):
"""Zips `self` with `others`, recursively.
Examples:
```python
pl1 = plist['a', 'b', 'c']
pl2 = plist[1, 2, 3]
pl3 = plist['nother', 'ig', 'odebase']
assert (pl2.zip(pl1, pl3).aslist() ==
[(1, 'a', 'nother'), (2, 'b', 'ig'), (3, 'c', 'odebase')])
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
by_bar = foos.bar.groupby()
assert (by_bar.bar.zip(by_bar.foo).aslist() ==
[[(0, 0), (0, 2)], [(1, 1)]])
```
Args:
*others: `iterable`s that have the same length as `self`.
Returns:
New `plist` with the same structure as `self`.
Raises:
`ValueError` if `self` and each `iterable` in `others` don't all have the same length at
level `zip` is initially called at.
"""
plothers = plist(others)
if plothers.any(lambda x: len(x) != len(self)):
raise ValueError('plist.zip arguments must all have the same length as self (%d)' % len(self))
try:
return plist([x.zip(*plothers.__getitem___(i)) for i, x in enumerate(self)], root=self.__root__)
except Exception:
pass
zipped = [x for x in zip(self, *others)] # 3.6 compatibility
return plist(zipped, root=self.__root__[0:len(zipped):1])
##############################################################################
# Additional filtering methods.
##############################################################################
def nonempty(self, r=0):
"""Returns a new `plist` with empty sublists removed.
Examples:
`nonempty` is useful in combination with grouping and filtering:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
foo_by_bar = foos.bar.groupby()
assert (foo_by_bar.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}],
[{'foo': 1, 'bar': 1}]])
filtered = foo_by_bar.foo != 1
assert (filtered.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}],
[]])
filtered_nonempty = filtered.nonempty()
assert (filtered_nonempty.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}]])
```
If the plist is deep, multiple levels of empty sublists can be removed at
the same time:
```python
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
filtered = by_bar_foo.foo != 1
assert (filtered.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[]]])
filtered_nonempty_0 = filtered.nonempty()
assert (filtered_nonempty_0.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[]]])
filtered_nonempty_1 = filtered.nonempty(1)
assert (filtered_nonempty_1.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]]])
filtered_nonempty_n1 = filtered.nonempty(-1)
assert (filtered_nonempty_n1.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]]])
```
Note that `filtered_nonempty_0` is identical to `filteed`, since there are
no empty sublists at the top level. In this example, `filtered_nonempty_1`
and `filtered_nonempty_n1` give the same result -- the deepest empty sublist
is removed, and then the next deepest empty sublist is removed.
It is also possible to remove empty sublists only at deeper levels, using
the two ways to call functions on sublists -- passing `pepth` and adding `_`
to the method name:
```python
filtered_nonempty_p1 = filtered.nonempty(pepth=1)
assert (filtered_nonempty_p1.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[]])
filtered_nonempty_u1 = filtered.nonempty_()
assert (filtered_nonempty_u1.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[]])
```
`filtered_nonempty_p1` and `filtered_nonempty_u1` both remove a single layer
of empty sublists starting from one layer into `filtered`.
Args:
r: Integer value for the number of times to recurse. Defaults to 0, which
causes only empty direct children of `self` to be removed. If `r > 0`,
`nonempty` recurses `r` times, and then removes empty sublists at that
depth and empty sublists back up the recursive call chain. If `r < 0`,
`nonempty` recurses as deep as it can, and then removes empty sublists
back up the recursive call chain.
Returns:
New plist with empty sublist removed.
"""
if r != 0:
try:
new_plist = plist([x.nonempty(r=r - 1) for x in self if len(x)])
except Exception:
new_plist = self
else:
new_plist = self
return plist([x for x in new_plist if len(x)],
root=plist([self.__root__[i] for i, x in enumerate(new_plist) if len(x)]))
def puniq(self):
"""Returns a new `plist` with only a single element of each value in `self`.
Examples:
`puniq` reduces the values of the groups of self using an equality check:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
reduced = foos.bar.puniq()
assert (reduced.aslist() ==
[0, 1])
assert (reduced.root().aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1}])
```
Grouped plists
```python
foo_by_bar = foos.bar.groupby()
assert (foo_by_bar.aslist() ==
[[{'foo': 0, 'bar': 0},
{'foo': 2, 'bar': 0}],
[{'foo': 1, 'bar': 1}]])
reduced = foo_by_bar.bar.puniq()
assert (reduced.aslist() ==
[[0], [1]])
assert (reduced.root().aslist() ==
[[{'foo': 0, 'bar': 0}],
[{'foo': 1, 'bar': 1}]])
```
The equality check respects the subgroups of self:
```python
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
reduced_no_effect = by_bar_foo.bar.puniq()
assert (reduced_no_effect.aslist() ==
[[[0], [0]], [[1]]])
assert (reduced_no_effect.root().aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
```
As with `plist.groupby`, `puniq` relies on the values being hashable.
If, for some reason, you need to reduce by a non-hashable value, you should
convert it to a hashable representation first, for example using
`plist.pstr()` or `plist.apply(id)`:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=0, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 0, 'bar': 0}])
try:
reduced_crash = foos.puniq() # CRASHES!
except Exception as e:
assert (isinstance(e, TypeError))
reduced_pstr = foos.pstr().puniq()
assert (reduced_pstr.aslist() ==
["{'bar': 0, 'foo': 0}",
"{'bar': 1, 'foo': 1}"])
assert (reduced_pstr.root().aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1}])
reduced_id = foos.apply(id).puniq()
assert (reduced_id.root().aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 0, 'bar': 0}])
```
In the last case, since each of the elements of `foo` are unique pdicts,
reducing by `plist.apply(id)` has no useful effect, but if there had been
any duplicates in the elements of `foo`, they would have been removed.
Returns:
New `plist` with a new `root` where there is only one example of each value
in each sublist. The corresponding root element is the first element in
`self.root()` that has that value.
"""
try:
if self.enum().all(lambda x: x[1].__root__.pequal(self.__root__[x[0]])):
new_plist = plist([x.puniq() for x in self])
new_plist.__root__ = plist([x.__root__ for x in new_plist])
return new_plist
return plist([x.puniq() for x in self], root=self.__root__)
except Exception:
pass
vals = set()
new_items = []
new_roots = []
not_root = (self is not self.__root__)
for i, x in enumerate(self):
if x in vals:
continue
vals.add(x)
new_items.append(x)
if not_root:
new_roots.append(self.__root__[i])
if not_root:
return plist(new_items, root=plist(new_roots))
return plist(new_items)
def remix(self, *args, **kwargs):
r"""Returns a new `plist` of `pdicts` based on selected data from `self`.
Examples:
`remix` allows you to easily restructure your data into a manageable form:
```python
foos = plist([{'foo': 0, 'bar': {'baz': 13, 'bam': 0, 'bin': 'not'}},
{'foo': 1, 'bar': {'baz': 42, 'bam': 1, 'bin': 'good'}},
{'foo': 2, 'bar': {'baz': -9, 'bam': 0, 'bin': 'data'}}])
rmx = foos.remix('foo', baz=foos.bar.baz)
assert (rmx.aslist() ==
[{'foo': 0, 'baz': 13},
{'foo': 1, 'baz': 42},
{'foo': 2, 'baz': -9}])
```
Note that `rmx.baz` gets its values from `foo.bar.baz` in a natural manner.
If `remix` is called on a grouped `plist`, the result is still a flat `plist`
of flat `pdict`s, but the values in the `pdict`s are themselves `plist`s:
```python
foo_by_bam = foos.bar.bam.groupby()
assert (foo_by_bam.aslist() ==
[[{'foo': 0, 'bar': {'bam': 0, 'baz': 13, 'bin': 'not'}},
{'foo': 2, 'bar': {'bam': 0, 'baz': -9, 'bin': 'data'}}],
[{'foo': 1, 'bar': {'bam': 1, 'baz': 42, 'bin': 'good'}}]])
rmx_by_bam = foo_by_bam.remix('foo', baz=foo_by_bam.bar.baz)
assert (rmx_by_bam.aslist() ==
[{'foo': [0, 2], 'baz': [13, -9]},
{'foo': [1], 'baz': [42]}])
```
This behavior can be useful when integrating with `pandas`, for example:
```python
df = rmx_by_bam.pd()
assert (str(df) ==
' baz foo\n'
'0 [13, -9] [0, 2]\n'
'1 [42] [1]')
```
If you instead want `remix` to return grouped `pdict`s, just pass `pepth=-1`
to have it execute on the deepest `plist`s, as with any other call to a `plist`:
```python
rmx_by_bam = foo_by_bam.remix('foo', baz=foo_by_bam.bar.baz, pepth=-1)
assert (rmx_by_bam.aslist() ==
[[{'foo': 0, 'baz': 13},
{'foo': 2, 'baz': -9}],
[{'foo': 1, 'baz': 42}]])
```
Args:
*args: Property names of items in `self` to include in the remix.
**kwargs: Key/value pairs where the key will be a new property on items in
the remix and the value is a deepcast and set to that key.
Returns:
Flat `plist` of flat `pdicts` based on data from `self` and the passed
arguments and keyword arguments.
"""
kwargs = {
k: _ensure_len(len(self), v, strict=True) for k, v in kwargs.items()
}
new_items = []
for i, x in enumerate(self):
y = pdict(
**{
a: (hasattr(x, a) and getattr(x, a)) or x[a]
for a in args
}
)
y.update({k: v[i] for k, v in kwargs.items()})
new_items.append(y)
return plist(new_items)
##############################################################################
# Structure-relevant methods
##############################################################################
# Depth, length, shape, and structure.
def pdepth(self, s=False):
"""Returns a `plist` of the recursive depth of each leaf element, from 0.
Examples:
`pdepth` returns a plist of the same plist structure as self:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pdepth().aslist() ==
[0])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.pdepth().aslist() ==
[[[2], [2]], [[2]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.pdepth().aslist() ==
[[[2], [2]], [[]]])
```
Since the depth values are always equal or empty in well-formed plists, it
is sometimes more convenient to get the depth as a scalar value. Pass a True
value to the first parameter (`s` for 'scalar'):
```python
assert (foos.pdepth(s=1) == 0)
assert (by_bar_foo.pdepth(1) == 2)
assert (filtered.pdepth(True) == 2)
```
Args:
s: Boolean that controls whether a scalar is returned (when `True`) or a
plist of the same structure as self (when `False`, the default).
Returns:
plist whose elements are the recursive depth of the leaf children, or a
scalar representing the maximum depth encountered in self if `s` is
`True`.
"""
try:
d = plist([x.pdepth() + 1 for x in self], root=self.__root__)
except Exception:
d = plist([0], root=self.__root__)
if s:
d = d.ungroup(-1).puniq()
if d:
return max(d)
return 0
return d
def plen(self, r=0, s=False):
"""Returns a `plist` of the length of a recursively-selected layer of `self`.
Examples:
`plen` returns a plist of the same depth as self, up to `r`:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.plen().aslist() ==
[3])
assert (foos.plen(1).aslist() ==
[3])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.plen().aslist() ==
[2])
assert (by_bar_foo.plen(r=1).aslist() ==
[[3]])
assert (by_bar_foo.plen(2).aslist() ==
[[[3]]])
assert (by_bar_foo.plen(-1).aslist() ==
[[[3]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.plen().aslist() ==
[2])
assert (filtered.plen(-1).aslist() ==
[[[2]]])
```
Since the depth values are always equal or empty in well-formed plists, it
is sometimes more convenient to get the depth as a scalar value. Pass a True
value to the first parameter (`s` for 'scalar'):
```python
assert (foos.plen(s=1) == 3)
assert (by_bar_foo.plen(r=2, s=1) == 3)
assert (filtered.plen(-1, s=True) == 2)
```
Args:
r: Target recursion depth. Defaults to 0. Set to -1 to recurse as deep as
possible.
s: Boolean that controls whether a scalar is returned (when `True`) or a
`plist` of the same depth as `self` (when `False`, the default).
Returns:
`plist` whose depth equals the requested recursion depth (or less, if
`r > self.pdepth()`), containing a single value which is the number of
`plist` elements at that depth, or that value as a scalar if `s` is `True`.
"""
l = None
if r != 0:
try:
l = plist([sum(x.plen(r - 1) for x in self)], root=self.__root__)
except Exception:
pass
if l is None:
l = plist([len(self)], root=self.__root__)
if s:
l = l.ungroup(-1).puniq()
if l:
return max(l)
return 0
return l
def pshape(self):
"""Returns a `plist` of the same structure as `self`, filled with leaf lengths.
Examples:
`pshape` returns a plist of the same structure as `self`:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pshape().aslist() ==
[3])
foo_by_bar = foos.bar.groupby()
assert (foo_by_bar.aslist() ==
[[{'bar': 0, 'foo': 0},
{'bar': 0, 'foo': 2}],
[{'bar': 1, 'foo': 1}]])
assert (foo_by_bar.pshape().aslist() ==
[[2], [1]])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.pshape().aslist() ==
[[[1], [1]], [[1]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.pshape().aslist() ==
[[[1], [1]], [[]]])
```
Returns:
New `plist` of the same structure as `self`, where each leaf `plist` has a
single element, which is the length of the corresponding leaf `plist` in
`self`.
"""
try:
return plist([x.pshape() for x in self], root=self.__root__)
except Exception:
return plist([len(self)], root=self.__root__)
def pstructure(self):
"""Returns a `list` of the number of elements in each layer of `self`.
Gives a snapshot view of the structure of `self`. The length of the returned
list is the depth of `self`. Each value in the list is the result of calling
`self.plen(r)`, where `r` ranges from 0 to `self.pdepth()`. `plen(r)` gives
the sum of the lengths of all plists at layer `r`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pstructure().aslist() ==
[3])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.pstructure().aslist() ==
[2, 3, 3])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.pstructure().aslist() ==
[2, 3, 2])
```
Returns:
A `list` (not a `plist`) of `self.pdepth()` integers, where each integer is
the number of elements in all `plist`s at that layer, 0-indexed according to
depth.
"""
s = []
for r in range(self.pdepth(True) + 1):
s.extend(self.plen(r).ungroup(-1))
return plist(s, root=self.__root__)
# Fill with different values.
def lfill(self, v=0, s=None):
"""Returns a **`list`** with the structure of `self` filled in order from `v`.
Identical to `plist.pfill()`, but returns a **`list`** instead of a `plist`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.lfill() ==
[0, 1, 2])
assert (foos.lfill(-7) ==
[-7, -6, -5])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.lfill() ==
[[[0], [1]], [[2]]])
assert (by_bar_foo.lfill_() ==
[[[0], [1]], [[0]]])
assert (by_bar_foo.lfill(pepth=2) ==
[[[0], [0]], [[0]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.lfill(3) ==
[[[3], [4]], [[]]])
```
Args:
v: Integer. The value to start filling from. Defaults to 0.
s: Successor object. Do not pass -- used to track the count of calls
across the recursive traversal of `self`.
Returns:
A **`list`** (not a `plist`) of possibly nested `list`s where each leaf element is
an integer, starting with the value of `v` in the 'top left' element of
the structure.
"""
s = _successor(v - 1) if s is None else s
try:
return [x.lfill(s=s) for x in self]
except Exception:
return [s.s() for _ in range(len(self))]
def pfill(self, v=0, s=None):
"""Returns a `plist` with the structure of `self` filled in order from `v`.
Identical to `plist.lfill()`, but returns a **`plist`** instead of a `list`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pfill().aslist() ==
[0, 1, 2])
assert (foos.pfill(-7).aslist() ==
[-7, -6, -5])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.pfill().aslist() ==
[[[0], [1]], [[2]]])
assert (by_bar_foo.pfill_().aslist() ==
[[[0], [1]], [[0]]])
assert (by_bar_foo.pfill(pepth=2).aslist() ==
[[[0], [0]], [[0]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.pfill(3).aslist() ==
[[[3], [4]], [[]]])
```
Args:
v: Integer. The value to start filling from. Defaults to 0.
s: Successor object. Do not pass -- used to track the count of calls
across the recursive traversal of `self`.
Returns:
A `plist` of possibly nested `plist`s where each leaf element is an integer,
starting with the value of `v` in the 'top left' element of the structure.
"""
s = _successor(v - 1) if s is None else s
try:
return plist([x.pfill(s=s) for x in self], root=self.__root__)
except Exception:
return plist([s.s() for _ in range(len(self))], root=self.__root__)
def pleft(self):
"""Returns a `plist` with the structure of `self` filled `plen(-1)` to 0.
Convenience method identical to `-self.pfill(1) + self.plen(-1, s=True)`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.pleft().aslist() ==
[2, 1, 0])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.pleft().aslist() ==
[[[2], [1]], [[0]]])
assert (by_bar_foo.pleft_().aslist() ==
[[[1], [0]], [[0]]])
assert (by_bar_foo.pleft(pepth=2).aslist() ==
[[[0], [0]], [[0]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
assert (filtered.pleft().aslist() ==
[[[1], [0]], [[]]])
```
This is useful for calling functions that have some global state that should
change each time a new grouping is started, such as generating many plots
from a single grouped plist using `pyplot`, where the function would need to
call `plt.show()` after each group was completed:
```python
def plot(x, remaining):
plt.plot(x)
if remaining == 0:
plt.show()
(foos.bar == 0).baz = 3 + (foos.bar == 0).foo
(foos.bar == 1).baz = 6
foos.bin = (foos.baz + foos.bar) * foos.foo
by_bar_baz_bin = foos.bar.groupby().baz.groupby().bin.groupby()
by_bar_baz_bin.foo.apply(plot, by_bar_baz_bin.pleft(pepth=2), pepth=2)
```
Returns:
A `plist` of possibly nested `plist`s where each leaf element is an integer,
starting with `self.plen(-1)` in the 'top left' element of the structure
and counting down to 0.
"""
return -self.pfill(1) + self.plen(-1, s=True)
def values_like(self, value=0):
"""Returns a `plist` with the structure of `self` filled with `value`.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0},
{'foo': 1, 'bar': 1},
{'foo': 2, 'bar': 0}])
assert (foos.values_like(1).aslist() ==
[1, 1, 1])
by_bar_foo = foos.bar.groupby().foo.groupby()
assert (by_bar_foo.aslist() ==
[[[{'foo': 0, 'bar': 0}],
[{'foo': 2, 'bar': 0}]],
[[{'foo': 1, 'bar': 1}]]])
assert (by_bar_foo.values_like('foo').aslist() ==
[[['foo'], ['foo']], [['foo']]])
all_the_same_dict = by_bar_foo.values_like({}, pepth=2)
assert (all_the_same_dict.aslist() ==
[[[{}], [{}]], [[{}]]])
all_the_same_dict.ungroup(-1)[0].update(foo=1)
assert (all_the_same_dict.aslist() ==
[[[{'foo': 1}], [{'foo': 1}]], [[{'foo': 1}]]])
filtered = by_bar_foo.bar == 0
assert (filtered.aslist() ==
[[[{'bar': 0, 'foo': 0}],
[{'bar': 0, 'foo': 2}]],
[[]]])
tuples = filtered.values_like((1, 2, 3))
assert (tuples.aslist() ==
[[[(1, 2, 3)], [(1, 2, 3)]], [[]]])
```
Note in the example above that filling with a mutable object like a `dict` gives
a `plist` filled that single object, which might be surprising, but is the
same as other common python idioms, such as:
```python
all_the_same_dict = [{}] * 3
assert (all_the_same_dict ==
[{}, {}, {}])
all_the_same_dict[0].update(foo=1)
assert (all_the_same_dict ==
[{'foo': 1}, {'foo': 1}, {'foo': 1}])
```
Args:
value: Value to fill the returned `plist` with. Can by any python object.
Returns:
A `plist` with the structure of `self` filled with `value`.
"""
values = _ensure_len(len(self), value, strict=True)
try:
return plist([x.values_like(v) for x, v in zip(self, values)], root=self.__root__)
except Exception:
pass
return plist([v for v in values], root=self.__root__)
##############################################################################
# Calling-frame-modifying utility methods.
##############################################################################
def me(self, name_or_plist='me', call_pepth=0):
"""Sets the current plist as a variable available in the caller's context.
`me` is a convenience method to naturally enable long chaining to prepare
the data in the `plist` for a future call to `apply` or some other call. It
attempts to add the current `plist` to the caller's context, either as a
local variable, or as a global (module-level) variable. Because it modifies
the caller's frame, it is not recommended for production code, but can be
useful in jupyter notebooks and colabs during exploration of datasets.
Examples:
Using `me` with a local variable requires that the variable already exist in
the local context, and that it be a `plist`:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos.baz = 3 * foos.foo + foos.bar
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0, 'baz': 0},
{'foo': 1, 'bar': 1, 'baz': 4},
{'foo': 2, 'bar': 0, 'baz': 6}])
def new_context():
me = plist()
foos.bar.groupby().baz.sortby_().groupby().me().foo.plt().plot(me.bar)
new_context()
```
The same can work with a name of your choice:
```python
def new_context():
baz = plist()
foos.bar.groupby().baz.sortby_().groupby().me('baz').foo.plt().plot(baz.baz)
new_context()
```
You can pass the `plist` you want to use instead:
```python
def new_context():
me2 = plist()
foos.bar.groupby().baz.sortby_().groupby().me(me2).foo.plt().plot(me2.foo + 1)
new_context()
```
If there isn't a local variable of that name, `me()` will put the `plist` into
the caller's `globals()` `dict` under the requested name. The following both
work if there are no local or global variables named `me` or `baz`:
```python
def new_context():
foos.bar.groupby().baz.sortby_().groupby().me().foo.plt().plot(me.baz)
foos.bar.groupby().baz.sortby_().groupby().me('baz').foo.plt().plot(baz.baz)
del globals()['me']
del globals()['baz']
new_context()
```
Args:
name_or_plist: String naming a variable in the caller's context or the
global (module-level) context, or an existing plist. In
both cases, the variable will be overwritten with a plist
that is a shallow copy of `self`. Defaults to `'me'`.
call_pepth: Do not pass. Used by `plist.__call__` to keep track of how
many stack frames occur between the caller and `me()`.
Returns:
`self`, permitting continued chaining.
Raises:
ValueError: If `name_or_plist` is a string, and that name appears in the
caller's local variables, but does not evaluate to a `plist`.
ValueError: If something other than a string or a `plist` is passed to
`name_or_plist`.
"""
try:
call_pepth += 3
f = inspect.currentframe()
for _ in range(call_pepth):
f = f.f_back
if isinstance(name_or_plist, str):
frame_locals = f.f_locals
if name_or_plist in frame_locals:
me = frame_locals[name_or_plist]
if not isinstance(me, plist):
raise ValueError('To use plist.me(name_or_plist) with a local variable named %s, it must be a plist object. Got %r.' % (name_or_plist, me))
else:
me = plist()
f.f_globals[name_or_plist] = me
elif isinstance(name_or_plist, plist):
me = name_or_plist
else:
raise ValueError('plist.me(name_or_plist) requires that name_or_plist be either a str or a plist. Got %r.' % name_or_plist)
if hasattr(list, 'clear'):
list.clear(me)
else:
del me[:]
list.extend(me, self)
me.__root__ = self.__root__
finally:
# Delete the stack frame to ensure there are no memory leaks, as suggested
# by https://docs.python.org/2/library/inspect.html#the-interpreter-stack
del f
return self
def pand(self, name='__plist_and_var__', call_pepth=0):
"""Stores `self` into a `plist` of `tuple`s that gets extended with each call.
`pand` is meant to facilitate building up `tuple`s of values to be sent as
a single block to a chained call to `apply`, or as `*args` when calling
`plist.apply(psplat=True)`. The name is `pand` to evoke conjunction: the
caller wants a `plist` with this *and* this *and* this.
`pand` stores a variable in the caller's frame that isn't visible to the
caller, but is visible to future calls to `pand` due to how `locals()`
works.
Examples:
```python
foos = plist([pdict(foo=0, bar=0), pdict(foo=1, bar=1), pdict(foo=2, bar=0)])
foos.baz = 3 * foos.foo + foos.bar
assert (foos.aslist() ==
[{'foo': 0, 'bar': 0, 'baz': 0},
{'foo': 1, 'bar': 1, 'baz': 4},
{'foo': 2, 'bar': 0, 'baz': 6}])
def new_context():
assert (foos.bar.groupby().baz.groupby().foo.pand().root().bar.pand().ungroup()
.apply_(qj, '(foo, bar)') ==
[[[(0, 0)],
[(2, 0)]],
[[(1, 1)]]])
new_context()
# Logs:
# qj: <pstar> apply: (foo, bar) <1249>: (0, 0)
# qj: <pstar> apply: (foo, bar) <1249>: (2, 0)
# qj: <pstar> apply: (foo, bar) <1249>: (1, 1)
```
The same construction can be used with methods that expect the arguments
individually, requiring the `tuple` to be expanded:
```python
def new_context():
(foos.bar.groupby().baz.groupby().foo.pand().root().bar.pstr().pand()
.ungroup().apply_(qj, psplat=True, b=0))
new_context()
# Logs:
# qj: <pstar> apply: (foo, bar) <2876>: (0, 0)
# qj: <pstar> apply: (foo, bar) <2876>: (2, 0)
# qj: <pstar> apply: (foo, bar) <2876>: (1, 1)
# qj: <pstar> apply: (0, 0) <2876>: (0, 0)
# qj: <pstar> apply: (2, 0) <2876>: (2, 0)
# qj: <pstar> apply: (1, 1) <2876>: (1, 1)
```
Building multiple `tuple`s in the same context requires passing `name` to keep
them separate:
```python
def new_context():
me = plist()
assert (foos.bar.groupby().baz.groupby().me().foo.pand().root().bar.pand().ungroup()
.apply_(qj,
me.foo.pand('strs').root().bar.pand('strs').ungroup().pstr()) ==
[[(0, 0),
(2, 0)],
[(1, 1)]])
new_context()
# Logs:
# qj: <pstar> apply: (0, 0) <1249>: (0, 0)
# qj: <pstar> apply: (2, 0) <1249>: (2, 0)
# qj: <pstar> apply: (1, 1) <1249>: (1, 1)
```
Note that the construction above is hard to understand, and probably
shouldn't be used.
Args:
name: String naming an available variable in the caller's context. Should
only be passed if the calling frame needs to create multiple
different `tuple`s. Defaults to '__plist_and_var__'. If a variable of
the same name exists in the caller's context, `pand` will fail to
write to it.
call_pepth: Do not pass. Used by `plist.__call__` to keep track of how
many stack frames occur between the caller and `pand()`.
Returns:
The current `plist` of `tuple`s, with `self` added.
Raises:
ValueError: If the variable named by `name` is already present in the
caller's frame and is not a `plist`, or has different `pshape()`
than `self`.
"""
try:
call_pepth += 3
f = inspect.currentframe()
for _ in range(call_pepth):
f = f.f_back
frame_locals = f.f_locals
if name in frame_locals:
and_var = frame_locals[name]
if not isinstance(and_var, plist):
raise ValueError('plist.pand() expected a plist object with the name %s in the calling frame. Got %r.' % (name, and_var))
if not self.pshape().pequal(and_var.pshape()):
raise ValueError('plist.pand() found a previous plist object with an incompatible shape.\n'
'\tMake sure that all calls to plist.pand() in the same stack frame operate on plists with the same shape,'
' or are called with different `name` arguments.\n'
'\tExpected %r, got %r.' % (self.pshape(), and_var.pshape()))
else:
and_var = self.values_like(tuple())
and_var = and_var.apply(list, pepth=-1).apply(lambda x, y: x.append(y) or x, self, pepth=-1).apply(tuple, pepth=-1)
frame_locals[name] = and_var
return and_var
finally:
# Delete the stack frame to ensure there are no memory leaks, as suggested
# by https://docs.python.org/2/library/inspect.html#the-interpreter-stack
del f
################################################################################
################################################################################
################################################################################
# Conversion
################################################################################
################################################################################
################################################################################
class _Converter(type):
_cls_map = pdict({
defaultpdict.__mro__[1]: defaultpdict,
frozenpset.__mro__[1]: frozenpset,
pdict.__mro__[1]: pdict,
plist.__mro__[1]: plist,
pset.__mro__[1]: pset,
ptuple.__mro__[1]: ptuple,
defaultpdict: defaultpdict,
frozenpset: frozenpset,
pdict: pdict,
plist: plist,
pset: pset,
ptuple: ptuple,
})
def __call__(self, obj, cls_map=None, depth=-1, dbg=0):
if depth == 0:
return obj
if cls_map is None:
cls_map = self._cls_map
if isinstance(obj, (pstar, _SyntaxSugar)):
raise ValueError('pstar conversion functions cannot operate on each other. '
'I.e., you can\'t do things like:\n`plist * pdict * data`.\n'
'Use grouping or ordering to avoid this:\n'
'`plist * (pdict * data)` or `plist * data * pdict`.')
target_type = cls_map.get(type(obj), None)
if target_type:
if hasattr(target_type, '__mro__') and defaultdict in target_type.__mro__:
try:
return target_type(obj.default_factory, **{k: self(obj[k], cls_map, depth - 1) for k in obj})
except Exception as e:
qj(str(e), 'First defaultdict conversion failed for %s' % str(obj), b=dbg)
try:
return target_type(obj.default_factory, {k: self(obj[k], cls_map, depth - 1) for k in obj})
except Exception as e:
qj(str(e), 'Second defaultdict conversion failed for %s' % str(obj), b=dbg)
try:
return target_type(**{k: self(obj[k], cls_map, depth - 1) for k in obj})
except Exception as e:
qj(str(e), 'First dict-style conversion failed for %s' % str(obj), b=dbg)
try:
return target_type([self(x, cls_map, depth - 1) for x in obj])
except Exception as e:
qj(str(e), 'List-style conversion failed for %s' % str(obj), b=dbg)
try:
return target_type({k: self(obj[k], cls_map, depth - 1) for k in obj})
except Exception as e:
qj(str(e), 'Second dict-style conversion failed for %s' % str(obj), b=dbg)
return obj
def __mul__(self, other):
return self(other)
def __rmul__(self, other):
return self(other)
def __truediv__(self, other):
return self.__rtruediv__(other) # Right division is the principled one.
def __rtruediv__(self, other, depth=-1):
# Get non-pstar types mapping to pstar types.
python_types = plist(self._cls_map.items())._[0] != [defaultpdict, frozenpset, pdict, plist, pset, ptuple]
# Swap order of python and pstar types, then merge them with python types mapped to themselves.
cls_map = python_types._[::-1].uproot().pdict(python_types._[0].zip(python_types._[0]).uproot().pdict())
assert len(self._cls_map) == len(cls_map) # We better not have dropped any classes
return self(other, cls_map, depth)
def __add__(self, other):
return self(other, depth=1)
def __radd__(self, other):
return self(other, depth=1)
def __sub__(self, other):
return self.__rsub__(other) # Right subtraction is the principled one.
def __rsub__(self, other):
return self.__rtruediv__(other, depth=1)
if sys.version_info[0] < 3:
__div__, __rdiv__ = __truediv__, __rtruediv__
def cls_map(self):
return self._cls_map.copy()
class pstar(_compatible_metaclass(_Converter, object)):
"""Recursively converts between standard python types and pstar types.
Examples:
Converting python types to `pstar` types:
```python
data = [dict(foo=[0, 1, 2], bar=dict(bin=0), baz=defaultdict(int, a=1, b=2, c=3)),
dict(foo=[1, 2, 3], bar=dict(bin=1), baz=frozenset([3, 4, 5])),
dict(foo=[2, 3, 4], bar=dict(bin=0), baz=set([7, 8, 9]))]
# Recursively convert all pstar-compatible types:
pl = pstar(data)
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [plist, plist, plist])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.baz.apply(type).aslist() == [defaultpdict, frozenpset, pset])
# An alternative way to do the same conversion:
pl = pstar * data
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [plist, plist, plist])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.baz.apply(type).aslist() == [defaultpdict, frozenpset, pset])
# Only convert the outermost object:
pl = pstar + data
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [dict, dict, dict])
assert (pl.foo.apply(type).aslist() == [list, list, list])
assert (pl.bar.apply(type).aslist() == [dict, dict, dict])
assert (pl.baz.apply(type).aslist() == [defaultdict, frozenset, set])
# The same outer conversion, as a function call:
pl = pstar(data, depth=1)
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [dict, dict, dict])
assert (pl.foo.apply(type).aslist() == [list, list, list])
assert (pl.bar.apply(type).aslist() == [dict, dict, dict])
assert (pl.baz.apply(type).aslist() == [defaultdict, frozenset, set])
# Convert two layers:
pl = pstar(data, depth=2)
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [list, list, list])
assert (pl.bar.apply(type).aslist() == [dict, dict, dict])
assert (pl.baz.apply(type).aslist() == [defaultdict, frozenset, set])
pl = pstar * data
# Convert from pstar types back to python types:
data2 = pl / pstar
assert (data2 == data)
assert (type(data2) == list)
assert ([type(x) for x in data2] == [dict, dict, dict])
assert ([type(x['foo']) for x in data2] == [list, list, list])
assert ([type(x['bar']) for x in data2] == [dict, dict, dict])
assert ([type(x['baz']) for x in data2] == [defaultdict, frozenset, set])
# Only convert the outermost object:
data2 = pl - pstar
assert (data2 == data)
assert (type(data2) == list)
assert ([type(x) for x in data2] == [pdict, pdict, pdict])
assert ([type(x['foo']) for x in data2] == [plist, plist, plist])
assert ([type(x['bar']) for x in data2] == [pdict, pdict, pdict])
assert ([type(x['baz']) for x in data2] == [defaultpdict, frozenpset, pset])
# Convert inner objects even when outer objects have already been converted:
data3 = data2 / pstar
assert (data3 == data)
assert (type(data3) == list)
assert ([type(x) for x in data3] == [dict, dict, dict])
assert ([type(x['foo']) for x in data3] == [list, list, list])
assert ([type(x['bar']) for x in data3] == [dict, dict, dict])
assert ([type(x['baz']) for x in data3] == [defaultdict, frozenset, set])
```
You can also convert from each `pstar` class to its python equivalent and back using
arithmetic operations on the `class` itself, for convenience:
```python
d1 = {'foo': 1, 'bar': 2}
pd = pdict * d1
assert (type(d1) == dict)
assert (type(pd) == pdict)
assert (pd == d1)
d2 = pd / pdict
assert (type(d2) == dict)
assert (d2 == d1)
pl = plist * data
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [dict, dict, dict])
assert (pl.foo.apply(type).aslist() == [plist, plist, plist])
assert (pl.bar.apply(type).aslist() == [dict, dict, dict])
assert (pl.baz.apply(type).aslist() == [defaultdict, frozenset, set])
data2 = data * pdict
assert (type(data2) == list)
assert (plist(data2).apply(type).aslist() == [pdict, pdict, pdict])
assert (plist(data2).foo.apply(type).aslist() == [list, list, list])
assert (plist(data2).bar.apply(type).aslist() == [pdict, pdict, pdict])
assert (plist(data2).baz.apply(type).aslist() == [defaultdict, frozenset, set])
pl = plist + data * pdict
assert (type(pl) == plist)
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [list, list, list])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.baz.apply(type).aslist() == [defaultdict, frozenset, set])
```
You can't do arbitrary arithmetic with the conversion methods, though.
One conversion method can't directly operate on another:
```python
try:
plist * pdict * data
except Exception as e:
assert (isinstance(e, ValueError))
```
If you want to combine multiple conversions, order of operations matters:
```python
pl = plist + pdict * data
assert (type(pl) == plist)
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [list, list, list])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
pl = plist * (pdict * data)
assert (type(pl) == plist)
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [plist, plist, plist])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
```
You can combine `pstar.pstar` and the `pstar` classes together to do partial conversion:
```python
pl = pstar * data / pset
assert (isinstance(pl, plist))
assert (pl.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.foo.apply(type).aslist() == [plist, plist, plist])
assert (pl.bar.apply(type).aslist() == [pdict, pdict, pdict])
assert (pl.baz.apply(type).aslist() == [defaultpdict, frozenpset, set])
```
The semantics of the operators are:
- `+` and `-`: Non-recursive conversions (only the operand itself is converted).
- `*` and `/`: Recursive conversions (the operand and any children are converted).
- `+` and `*` on the left or right: Convert python classes to `pstar` classes; e.g., `dict` to `pdict`.
- `-` and `/` on the right: Convert this `pstar` class to its equivalent python class; e.g., `plist` to `list`.
- `-` and `/` on the left: Convert all but this `pstar` type to their python equivalents;
e.g., all but `pdict` get converted -- equivalent to `obj - pstar + pdict`
or `obj / pstar * pdict`.
Below are examples focused on `pdict`s, but the same is true for all of the operators:
```python
# Starting from a nested pstar object, you may want to convert pdicts to dicts.
pd = pdict(foo=plist[1, 2, 3], bar=pset[4, 5, 6], baz=pdict(a=7, b=8, d=9))
# Subtracting by pdict will convert a top-level pdict to dict, but will leave other objects alone.
d = pd - pdict
assert (type(d) == dict)
assert (type(d['foo']) == plist)
assert (type(d['bar']) == pset)
assert (type(d['baz']) == pdict) # Note that the child is still a pdict!
pl = pd.foo - pdict
assert (type(pl) == plist) # The type is unchanged, since pd.foo is not a pdict
assert (pl is not pd.foo) # Conversion still creates a new copy, though!
assert (pl == pd.foo) # But the contents are identical, of course.
# Dividing by pdict will convert any pdict values to dicts, but leave others unchanged.
d = pd / pdict
assert (type(d) == dict)
assert (type(d['foo']) == plist)
assert (type(d['bar']) == pset)
assert (type(d['baz']) == dict) # Note that the child is a dict!
# You probably shouldn't left-subtract by pdict, but you can. It converts any other pstar classes
# to their python equivalents, but leaves pdicts alone.
pd2 = pdict - pd
assert (type(pd2) == pdict)
l = pdict - pd.foo
assert (type(l) == list)
assert (type(pd.foo) == plist)
assert (l == pd.foo)
# Left division is also not recommended, but it works. It converts all other pstar classes
# to their python equivalents, but leaves pdicts alone.
pd2 = pdict / pd
assert (type(pd2) == pdict)
assert (type(pd2.foo) == list)
assert (type(pd2.bar) == set)
assert (type(pd2.baz) == pdict)
```
The only exceptions are for the `pstar` left subtraction and left division, which are identical
to right subtraction and right division:
```python
d = pd - pstar
assert (type(d) == dict)
assert (type(d['foo']) == plist)
assert (type(d['bar']) == pset)
assert (type(d['baz']) == pdict)
d = pstar - pd
assert (type(d) == dict)
assert (type(d['foo']) == plist)
assert (type(d['bar']) == pset)
assert (type(d['baz']) == pdict)
d = pd / pstar
assert (type(d) == dict)
assert (type(d['foo']) == list)
assert (type(d['bar']) == set)
assert (type(d['baz']) == dict)
d = pstar / pd
assert (type(d) == dict)
assert (type(d['foo']) == list)
assert (type(d['bar']) == set)
assert (type(d['baz']) == dict)
```
You can also access the core `pstar` classes from the `pstar` conversion object:
```python
foos = pstar.plist([pstar.pdict(foo=0, bar=0), pstar.pdict(foo=1, bar=1), pstar.pdict(foo=2, bar=0)])
```
This is convenient if you only imported as `from pstar import pstar`.
"""
defaultpdict = defaultpdict
frozenpset = frozenpset
pdict = pdict
plist = plist
pset = pset
ptuple = ptuple
# pylint: enable=line-too-long,invalid-name,g-explicit-length-test
# pylint: enable=broad-except,g-long-lambda
|
jczaplew/busMSN
|
refs/heads/github
|
node_modules/grunt-contrib-nodeunit/node_modules/nodeunit/deps/ejs/node_modules/expresso/deps/jscoverage/js/build/win32/pgomerge.py
|
79
|
#!/usr/bin/python
# Usage: pgomerge.py <binary basename> <dist/bin>
# Gathers .pgc files from dist/bin and merges them into
# $PWD/$basename.pgd using pgomgr, then deletes them.
# No errors if any of these files don't exist.
import sys, os, os.path, subprocess
if not sys.platform == "win32":
raise Exception("This script was only meant for Windows.")
def MergePGOFiles(basename, pgddir, pgcdir):
"""Merge pgc files produced from an instrumented binary
into the pgd file for the second pass of profile-guided optimization
with MSVC. |basename| is the name of the DLL or EXE without the
extension. |pgddir| is the path that contains <basename>.pgd
(should be the objdir it was built in). |pgcdir| is the path
containing basename!N.pgc files, which is probably dist/bin.
Calls pgomgr to merge each pgc file into the pgd, then deletes
the pgc files."""
if not os.path.isdir(pgddir) or not os.path.isdir(pgcdir):
return
pgdfile = os.path.abspath(os.path.join(pgddir, basename + ".pgd"))
if not os.path.isfile(pgdfile):
return
for file in os.listdir(pgcdir):
if file.startswith(basename) and file.endswith(".pgc"):
try:
pgcfile = os.path.normpath(os.path.join(pgcdir, file))
subprocess.call(['pgomgr', '-merge',
pgcfile,
pgdfile])
os.remove(pgcfile)
except OSError:
pass
if __name__ == '__main__':
if len(sys.argv) != 3:
print >>sys.stderr, "Usage: pgomerge.py <binary basename> <dist/bin>"
sys.exit(1)
MergePGOFiles(sys.argv[1], os.getcwd(), sys.argv[2])
|
ychen820/microblog
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pbr/tests/testpackage/pbr_testpackage/cmd.py
|
142
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
def main():
print("PBR Test Command")
class Foo(object):
@classmethod
def bar(self):
print("PBR Test Command - with class!")
|
powdahound/ec2instances.info
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup
setup(
name='ec2instances.info',
packages=['ec2instances.info'],
version='0.0.2',
description='The community-maintained dataset of aws instance types'
' and pricing',
author='Garret Heaton',
author_email='github@garretheaton.com',
url='https://github.com/powdahound/ec2instances.info',
license="MIT",
)
|
Freso/listenbrainz-server
|
refs/heads/master
|
listenbrainz/domain/spotify.py
|
1
|
import base64
import requests
import six
import time
from flask import current_app
import spotipy.oauth2
from listenbrainz.db import spotify as db_spotify
from datetime import datetime, timezone
SPOTIFY_API_RETRIES = 5
SPOTIFY_IMPORT_PERMISSIONS = (
'user-read-currently-playing',
'user-read-recently-played',
)
SPOTIFY_LISTEN_PERMISSIONS = (
'streaming',
'user-read-email',
'user-read-private',
'playlist-modify-public',
'playlist-modify-private',
)
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
class Spotify:
def __init__(self, user_id, musicbrainz_id, musicbrainz_row_id, user_token, token_expires,
refresh_token, last_updated, record_listens, error_message, latest_listened_at,
permission):
self.user_id = user_id
self.user_token = user_token
self.token_expires = token_expires
self.refresh_token = refresh_token
self.last_updated = last_updated
self.record_listens = record_listens
self.error_message = error_message
self.musicbrainz_id = musicbrainz_id
self.latest_listened_at = latest_listened_at
self.musicbrainz_row_id = musicbrainz_row_id
self.permission = permission
def get_spotipy_client(self):
return spotipy.Spotify(auth=self.user_token)
@property
def last_updated_iso(self):
if self.last_updated is None:
return None
return self.last_updated.isoformat() + "Z"
@property
def latest_listened_at_iso(self):
if self.latest_listened_at is None:
return None
return self.latest_listened_at.isoformat() + "Z"
@property
def token_expired(self):
now = datetime.utcnow()
now = now.replace(tzinfo=timezone.utc)
return now >= self.token_expires
@staticmethod
def from_dbrow(row):
return Spotify(
user_id=row['user_id'],
user_token=row['user_token'],
token_expires=row['token_expires'],
refresh_token=row['refresh_token'],
last_updated=row['last_updated'],
record_listens=row['record_listens'],
error_message=row['error_message'],
musicbrainz_id=row['musicbrainz_id'],
musicbrainz_row_id=row['musicbrainz_row_id'],
latest_listened_at=row['latest_listened_at'],
permission=row['permission'],
)
def __str__(self):
return "<Spotify(user:%s): %s>" % (self.user_id, self.musicbrainz_id)
def refresh_user_token(spotify_user: Spotify):
""" Refreshes the user token for the given spotify user.
Args:
spotify_user (domain.spotify.Spotify): the user whose token is to be refreshed
Returns:
user (domain.spotify.Spotify): the same user with updated tokens
Raises:
SpotifyAPIError: if unable to refresh spotify user token
SpotifyInvalidGrantError: if the user has revoked authorization to spotify
Note: spotipy eats up the json body in case of error but we need it for checking
whether the user has revoked our authorization. hence, we use our own
code instead of spotipy to fetch refresh token.
"""
retries = SPOTIFY_API_RETRIES
response = None
while retries > 0:
response = _get_spotify_token("refresh_token", spotify_user.refresh_token)
if response.status_code == 200:
break
elif response.status_code == 400:
error_body = response.json()
if "error" in error_body and error_body["error"] == "invalid_grant":
raise SpotifyInvalidGrantError(error_body)
response = None # some other error occurred
retries -= 1
if response is None:
raise SpotifyAPIError('Could not refresh API Token for Spotify user')
response = response.json()
access_token = response['access_token']
if "refresh_token" in response:
refresh_token = response['refresh_token']
else:
refresh_token = spotify_user.refresh_token
expires_at = int(time.time()) + response['expires_in']
db_spotify.update_token(spotify_user.user_id, access_token, refresh_token, expires_at)
return get_user(spotify_user.user_id)
def get_spotify_oauth(permissions=None):
""" Returns a spotipy OAuth instance that can be used to authenticate with spotify.
Args: permissions ([str]): List of permissions needed by the OAuth instance
"""
client_id = current_app.config['SPOTIFY_CLIENT_ID']
client_secret = current_app.config['SPOTIFY_CLIENT_SECRET']
scope = ' '.join(permissions) if permissions else None
redirect_url = current_app.config['SPOTIFY_CALLBACK_URL']
return spotipy.oauth2.SpotifyOAuth(client_id, client_secret, redirect_uri=redirect_url, scope=scope)
def get_user(user_id):
""" Returns a Spotify instance corresponding to the specified LB row ID.
If the user_id is not present in the spotify table, returns None
Args:
user_id (int): the ListenBrainz row ID of the user
"""
row = db_spotify.get_user(user_id)
if row:
return Spotify.from_dbrow(row)
return None
def remove_user(user_id):
""" Delete user entry for user with specified ListenBrainz user ID.
Args:
user_id (int): the ListenBrainz row ID of the user
"""
db_spotify.delete_spotify(user_id)
def add_new_user(user_id, spot_access_token):
"""Create a spotify row for a user based on OAuth access tokens
Args:
user_id: A flask auth `current_user.id`
spot_access_token: A spotipy access token from SpotifyOAuth.get_access_token
"""
access_token = spot_access_token['access_token']
refresh_token = spot_access_token['refresh_token']
expires_at = int(time.time()) + spot_access_token['expires_in']
permissions = spot_access_token['scope']
active = SPOTIFY_IMPORT_PERMISSIONS[0] in permissions and SPOTIFY_IMPORT_PERMISSIONS[1] in permissions
db_spotify.create_spotify(user_id, access_token, refresh_token, expires_at, active, permissions)
def get_active_users_to_process():
""" Returns a list of Spotify user instances that need their Spotify listens imported.
"""
return [Spotify.from_dbrow(row) for row in db_spotify.get_active_users_to_process()]
def update_last_updated(user_id, success=True, error_message=None):
""" Update the last_update field for user with specified user ID.
Also, set the user as active or inactive depending on whether their listens
were imported without error.
If there was an error, add the error to the db.
Args:
user_id (int): the ListenBrainz row ID of the user
success (bool): flag representing whether the last import was successful or not.
error_message (str): the user-friendly error message to be displayed.
"""
if error_message:
db_spotify.add_update_error(user_id, error_message)
else:
db_spotify.update_last_updated(user_id, success)
def update_latest_listened_at(user_id, timestamp):
""" Update the latest_listened_at field for user with specified ListenBrainz user ID.
Args:
user_id (int): the ListenBrainz row ID of the user
timestamp (int): the unix timestamp of the latest listen imported for the user
"""
db_spotify.update_latest_listened_at(user_id, timestamp)
def get_access_token(code: str):
""" Get a valid Spotify Access token given the code.
Returns:
a dict with the following keys
{
'access_token',
'token_type',
'scope',
'expires_in',
'refresh_token',
}
Note: We use this function instead of spotipy's implementation because there
is a bug in the spotipy code which leads to loss of the scope received from the
Spotify API.
"""
r = _get_spotify_token("authorization_code", code)
if r.status_code != 200:
raise SpotifyListenBrainzError(r.reason)
return r.json()
def _get_spotify_token(grant_type: str, token: str) -> requests.Response:
""" Fetch access token or refresh token from spotify auth api
Args:
grant_type (str): should be "authorization_code" to retrieve access token and "refresh_token" to refresh tokens
token (str): authorization code to retrieve access token first time and refresh token to refresh access tokens
Returns:
response from the spotify authentication endpoint
"""
client_id = current_app.config['SPOTIFY_CLIENT_ID']
client_secret = current_app.config['SPOTIFY_CLIENT_SECRET']
auth_header = base64.b64encode(six.text_type(client_id + ':' + client_secret).encode('ascii'))
headers = {'Authorization': 'Basic %s' % auth_header.decode('ascii')}
token_key = "refresh_token" if grant_type == "refresh_token" else "code"
payload = {
'redirect_uri': current_app.config['SPOTIFY_CALLBACK_URL'],
token_key: token,
'grant_type': grant_type,
}
return requests.post(OAUTH_TOKEN_URL, data=payload, headers=headers, verify=True)
def get_user_dict(user_id):
""" Get spotify user details in the form of a dict
Args:
user_id (int): the row ID of the user in ListenBrainz
"""
user = get_user(user_id)
if not user:
return {}
return {
'access_token': user.user_token,
'permission': user.permission,
}
class SpotifyInvalidGrantError(Exception):
""" Raised if spotify API returns invalid_grant during authorization. This usually means that the user has revoked
authorization to the ListenBrainz application through Spotify UI."""
pass
class SpotifyImporterException(Exception):
pass
class SpotifyListenBrainzError(Exception):
pass
class SpotifyAPIError(Exception):
pass
|
NUKnightLab/StoryMapJS
|
refs/heads/master
|
stagedev.py
|
1
|
def build():
"""Build lib version"""
_setup_env()
# Get build config
if not 'build' in _config:
abort('Could not find "build" in config file')
# Check version
if not 'version' in _config:
_config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
warn('Using development version value "%(version)s"' % _config)
notice('Building version %(version)s...' % _config)
# Clean build directory
clean(_config['build_path'])
# Build it
for key, param in _config['build'].iteritems():
getattr(static, key)(_config, param)
def stage_dev():
"""
Build lib and copy to local cdn repository as 'dev' version
No tagging/committing/etc/
"""
_setup_env()
if not 'stage' in _config:
abort('Could not find "stage" in config file')
# Make sure cdn exists
exists(dirname(env.cdn_path), required=True)
# Build version
build()
# Copy to local CDN repository
cdn_path = join(env.cdn_path, 'dev')
clean(cdn_path)
for r in _config['stage']:
static.copy(_config, [{
"src": r['src'],
"dst": cdn_path, "regex": r['regex']}])
# Create zip file in local CDN repository
_make_zip(join(cdn_path, '%(name)s.zip' % _config))
|
jstoxrocky/statsmodels
|
refs/heads/master
|
statsmodels/iolib/stata_summary_examples.py
|
39
|
""". regress totemp gnpdefl gnp unemp armed pop year
Source | SS df MS Number of obs = 16
-------------+------------------------------ F( 6, 9) = 330.29
Model | 184172402 6 30695400.3 Prob > F = 0.0000
Residual | 836424.129 9 92936.0144 R-squared = 0.9955
-------------+------------------------------ Adj R-squared = 0.9925
Total | 185008826 15 12333921.7 Root MSE = 304.85
------------------------------------------------------------------------------
totemp | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.863 -177.0291 207.1524
gnp | -.0358191 .033491 -1.07 0.313 -.111581 .0399428
unemp | -2.020229 .4883995 -4.14 0.003 -3.125065 -.9153928
armed | -1.033227 .2142741 -4.82 0.001 -1.517948 -.5485049
pop | -.0511045 .2260731 -0.23 0.826 -.5625173 .4603083
year | 1829.151 455.4785 4.02 0.003 798.7873 2859.515
_cons | -3482258 890420.3 -3.91 0.004 -5496529 -1467987
------------------------------------------------------------------------------
"""
#From Stata using Longley dataset as in the test and example for GLM
"""
. glm totemp gnpdefl gnp unemp armed pop year
Iteration 0: log likelihood = -109.61744
Generalized linear models No. of obs = 16
Optimization : ML Residual df = 9
Scale parameter = 92936.01
Deviance = 836424.1293 (1/df) Deviance = 92936.01
Pearson = 836424.1293 (1/df) Pearson = 92936.01
Variance function: V(u) = 1 [Gaussian]
Link function : g(u) = u [Identity]
AIC = 14.57718
Log likelihood = -109.6174355 BIC = 836399.2
------------------------------------------------------------------------------
| OIM
totemp | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.859 -151.3684 181.4917
gnp | -.0358191 .033491 -1.07 0.285 -.1014603 .029822
unemp | -2.020229 .4883995 -4.14 0.000 -2.977475 -1.062984
armed | -1.033227 .2142741 -4.82 0.000 -1.453196 -.6132571
pop | -.0511045 .2260731 -0.23 0.821 -.4941996 .3919906
year | 1829.151 455.4785 4.02 0.000 936.4298 2721.873
_cons | -3482258 890420.3 -3.91 0.000 -5227450 -1737066
------------------------------------------------------------------------------
"""
#RLM Example
"""
. rreg stackloss airflow watertemp acidconc
Huber iteration 1: maximum difference in weights = .48402478
Huber iteration 2: maximum difference in weights = .07083248
Huber iteration 3: maximum difference in weights = .03630349
Biweight iteration 4: maximum difference in weights = .2114744
Biweight iteration 5: maximum difference in weights = .04709559
Biweight iteration 6: maximum difference in weights = .01648123
Biweight iteration 7: maximum difference in weights = .01050023
Biweight iteration 8: maximum difference in weights = .0027233
Robust regression Number of obs = 21
F( 3, 17) = 74.15
Prob > F = 0.0000
------------------------------------------------------------------------------
stackloss | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
airflow | .8526511 .1223835 6.97 0.000 .5944446 1.110858
watertemp | .8733594 .3339811 2.61 0.018 .1687209 1.577998
acidconc | -.1224349 .1418364 -0.86 0.400 -.4216836 .1768139
_cons | -41.6703 10.79559 -3.86 0.001 -64.447 -18.89361
------------------------------------------------------------------------------
"""
|
guyromm/greencouriers
|
refs/heads/master
|
greencouriers/lib/app_globals.py
|
1
|
"""The application's Globals object"""
class Globals(object):
"""Globals acts as a container for objects available throughout the
life of the application
"""
def __init__(self,config):
"""One instance of Globals is created during application
initialization and is available during requests via the
'app_globals' variable
"""
pass
|
thedep2/CouchPotatoServer
|
refs/heads/develop
|
libs/requests/exceptions.py
|
21
|
# -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both :exc:`ConnectTimeout` and
:exc:`ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the server.
Requests that produce this error are safe to retry
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
|
JuliBakagianni/CEF-ELRC
|
refs/heads/master
|
lib/python2.7/site-packages/haystack/query.py
|
9
|
import logging
import operator
import warnings
from haystack import connections, connection_router
from haystack.backends import SQ
from haystack.constants import REPR_OUTPUT_SIZE, ITERATOR_LOAD_PER_QUERY, DEFAULT_OPERATOR
from haystack.exceptions import NotHandled
from haystack.inputs import Raw, Clean, AutoQuery
class SearchQuerySet(object):
"""
Provides a way to specify search parameters and lazily load results.
Supports chaining (a la QuerySet) to narrow the search.
"""
def __init__(self, using=None, query=None):
# ``_using`` should only ever be a value other than ``None`` if it's
# been forced with the ``.using`` method.
self._using = using
self.query = None
self._determine_backend()
# If ``query`` is present, it should override even what the routers
# think.
if query is not None:
self.query = query
self._result_cache = []
self._result_count = None
self._cache_full = False
self._load_all = False
self._ignored_result_count = 0
self.log = logging.getLogger('haystack')
def _determine_backend(self):
# A backend has been manually selected. Use it instead.
if self._using is not None:
return self._using
# No backend, so rely on the routers to figure out what's right.
from haystack import connections
hints = {}
if self.query:
hints['models'] = self.query.models
backend_alias = connection_router.for_read(**hints)
# The ``SearchQuery`` might swap itself out for a different variant
# here.
if self.query:
self.query = self.query.using(backend_alias)
else:
self.query = connections[backend_alias].get_query()
def __getstate__(self):
"""
For pickling.
"""
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
obj_dict['log'] = None
return obj_dict
def __setstate__(self, data_dict):
"""
For unpickling.
"""
self.__dict__ = data_dict
self.log = logging.getLogger('haystack')
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE])
if len(self) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
if not self._result_count:
self._result_count = self.query.get_count()
# Some backends give weird, false-y values here. Convert to zero.
if not self._result_count:
self._result_count = 0
# This needs to return the actual number of hits, not what's in the cache.
return self._result_count - self._ignored_result_count
def __iter__(self):
if self._cache_is_full():
# We've got a fully populated cache. Let Python do the hard work.
return iter(self._result_cache)
return self._manual_iter()
def __and__(self, other):
if isinstance(other, EmptySearchQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, SQ.AND)
return combined
def __or__(self, other):
combined = self._clone()
if isinstance(other, EmptySearchQuerySet):
return combined
combined.query.combine(other.query, SQ.OR)
return combined
def _cache_is_full(self):
if not self.query.has_run():
return False
if len(self) <= 0:
return True
try:
self._result_cache.index(None)
return False
except ValueError:
# No ``None``s found in the results. Check the length of the cache.
return len(self._result_cache) > 0
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
if len(self._result_cache) > 0:
try:
current_cache_max = self._result_cache.index(None)
except ValueError:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
if not self._fill_cache(current_position, current_position + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end, **kwargs):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results(**kwargs)
if results == None or len(results) == 0:
return False
# Setup the full cache now that we know how many results there are.
# We need the ``None``s as placeholders to know what parts of the
# cache we have/haven't filled.
# Using ``None`` like this takes up very little memory. In testing,
# an array of 100,000 ``None``s consumed less than .5 Mb, which ought
# to be an acceptable loss for consistent and more efficient caching.
if len(self._result_cache) == 0:
self._result_cache = [None for i in xrange(self.query.get_count())]
if start is None:
start = 0
if end is None:
end = self.query.get_count()
to_cache = self.post_process_results(results)
# Assign by slice.
self._result_cache[start:start + len(to_cache)] = to_cache
return True
def post_process_results(self, results):
to_cache = []
# Check if we wish to load all objects.
if self._load_all:
original_results = []
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
original_results.append(result)
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
objects = index.read_queryset()
loaded_objects[model] = objects.in_bulk(models_pks[model])
except NotHandled:
self.log.warning("Model '%s.%s' not handled by the routers.", self.app_label, self.model_name)
# Revert to old behaviour
loaded_objects[model] = model._default_manager.in_bulk(models_pks[model])
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings
model_objects = loaded_objects.get(result.model, {})
if not result.pk in model_objects:
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = model_objects[result.pk]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
to_cache.append(result)
return to_cache
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or (None in self._result_cache[start:bound] and not self._cache_is_full()):
try:
self._fill_cache(start, bound)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
# Methods that return a SearchQuerySet.
def all(self):
"""Returns all results for the query."""
return self._clone()
def none(self):
"""Returns all results for the query."""
return self._clone(klass=EmptySearchQuerySet)
def filter(self, *args, **kwargs):
"""Narrows the search based on certain attributes and the default operator."""
if DEFAULT_OPERATOR == 'OR':
return self.filter_or(*args, **kwargs)
else:
return self.filter_and(*args, **kwargs)
def exclude(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(~SQ(*args, **kwargs))
return clone
def filter_and(self, *args, **kwargs):
"""Narrows the search by looking for (and including) certain attributes."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs))
return clone
def filter_or(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs), use_or=True)
return clone
def order_by(self, *args):
"""Alters the order in which the results should appear."""
clone = self._clone()
for field in args:
clone.query.add_order_by(field)
return clone
def order_by_distance(self, **kwargs):
"""Alters the order in which the results should appear."""
clone = self._clone()
clone.query.add_order_by_distance(**kwargs)
return clone
def highlight(self):
"""Adds highlighting to the results."""
clone = self._clone()
clone.query.add_highlight()
return clone
def models(self, *models):
"""Accepts an arbitrary number of Model classes to include in the search."""
clone = self._clone()
for model in models:
if not model in connections[self.query._using].get_unified_index().get_indexed_models():
warnings.warn('The model %r is not registered for search.' % model)
clone.query.add_model(model)
return clone
def result_class(self, klass):
"""
Allows specifying a different class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
clone = self._clone()
clone.query.set_result_class(klass)
return clone
def boost(self, term, boost):
"""Boosts a certain aspect of the query."""
clone = self._clone()
clone.query.add_boost(term, boost)
return clone
def facet(self, field):
"""Adds faceting to a query for the provided field."""
clone = self._clone()
clone.query.add_field_facet(field)
return clone
def within(self, field, point_1, point_2):
"""Spatial: Adds a bounding box search to the query."""
clone = self._clone()
clone.query.add_within(field, point_1, point_2)
return clone
def dwithin(self, field, point, distance):
"""Spatial: Adds a distance-based search to the query."""
clone = self._clone()
clone.query.add_dwithin(field, point, distance)
return clone
def distance(self, field, point):
"""
Spatial: Denotes results must have distance measurements from the
provided point.
"""
clone = self._clone()
clone.query.add_distance(field, point)
return clone
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds faceting to a query for the provided field by date."""
clone = self._clone()
clone.query.add_date_facet(field, start_date, end_date, gap_by, gap_amount=gap_amount)
return clone
def query_facet(self, field, query):
"""Adds faceting to a query for the provided field with a custom query."""
clone = self._clone()
clone.query.add_query_facet(field, query)
return clone
def narrow(self, query):
"""Pushes existing facet choices into the search."""
clone = self._clone()
clone.query.add_narrow_query(query)
return clone
def raw_search(self, query_string, **kwargs):
"""Passes a raw query directly to the backend."""
return self.filter(content=Raw(query_string, **kwargs))
def load_all(self):
"""Efficiently populates the objects in the search results."""
clone = self._clone()
clone._load_all = True
return clone
def auto_query(self, query_string, fieldname='content'):
"""
Performs a best guess constructing the search query.
This method is somewhat naive but works well enough for the simple,
common cases.
"""
kwargs = {
fieldname: AutoQuery(query_string)
}
return self.filter(**kwargs)
def autocomplete(self, **kwargs):
"""
A shortcut method to perform an autocomplete search.
Must be run against fields that are either ``NgramField`` or
``EdgeNgramField``.
"""
clone = self._clone()
query_bits = []
for field_name, query in kwargs.items():
for word in query.split(' '):
bit = clone.query.clean(word.strip())
kwargs = {
field_name: bit,
}
query_bits.append(SQ(**kwargs))
return clone.filter(reduce(operator.__and__, query_bits))
def using(self, connection_name):
"""
Allows switching which connection the ``SearchQuerySet`` uses to
search in.
"""
clone = self._clone()
clone.query = self.query.using(connection_name)
clone._using = connection_name
return clone
# Methods that do not return a SearchQuerySet.
def count(self):
"""Returns the total number of matching results."""
return len(self)
def best_match(self):
"""Returns the best/top search result that matches the query."""
return self[0]
def latest(self, date_field):
"""Returns the most recent search result that matches the query."""
clone = self._clone()
clone.query.clear_order_by()
clone.query.add_order_by("-%s" % date_field)
return clone.best_match()
def more_like_this(self, model_instance):
"""Finds similar results to the object passed in."""
clone = self._clone()
clone.query.more_like_this(model_instance)
return clone
def facet_counts(self):
"""
Returns the facet counts found by the query.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_facet_counts()
else:
clone = self._clone()
return clone.query.get_facet_counts()
def spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion found by the query.
To work, you must set ``INCLUDE_SPELLING`` within your connection's
settings dictionary to ``True``. Otherwise, ``None`` will be returned.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_spelling_suggestion(preferred_query)
else:
clone = self._clone()
return clone.query.get_spelling_suggestion(preferred_query)
def values(self, *fields):
"""
Returns a list of dictionaries, each containing the key/value pairs for
the result, exactly like Django's ``ValuesQuerySet``.
"""
qs = self._clone(klass=ValuesSearchQuerySet)
qs._fields.extend(fields)
return qs
def values_list(self, *fields, **kwargs):
"""
Returns a list of field values as tuples, exactly like Django's
``QuerySet.values``.
Optionally accepts a ``flat=True`` kwarg, which in the case of a
single field being provided, will return a flat list of that field
rather than a list of tuples.
"""
flat = kwargs.pop("flat", False)
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
qs = self._clone(klass=ValuesListSearchQuerySet)
qs._fields.extend(fields)
qs._flat = flat
return qs
# Utility methods.
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
return clone
class EmptySearchQuerySet(SearchQuerySet):
"""
A stubbed SearchQuerySet that behaves as normal but always returns no
results.
"""
def __len__(self):
return 0
def _cache_is_full(self):
# Pretend the cache is always full with no results.
return True
def _clone(self, klass=None):
clone = super(EmptySearchQuerySet, self)._clone(klass=klass)
clone._result_cache = []
return clone
def _fill_cache(self, start, end):
return False
def facet_counts(self):
return {}
class ValuesListSearchQuerySet(SearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of field values as tuples, exactly
like Django's ``ValuesListQuerySet``.
"""
def __init__(self, *args, **kwargs):
super(ValuesListSearchQuerySet, self).__init__(*args, **kwargs)
self._flat = False
self._fields = []
# Removing this dependency would require refactoring much of the backend
# code (_process_results, etc.) and these aren't large enough to make it
# an immediate priority:
self._internal_fields = ['id', 'django_ct', 'django_id', 'score']
def _clone(self, klass=None):
clone = super(ValuesListSearchQuerySet, self)._clone(klass=klass)
clone._fields = self._fields
clone._flat = self._flat
return clone
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {
'fields': query_fields
}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
if self._flat:
accum = to_cache.extend
else:
accum = to_cache.append
for result in results:
accum([getattr(result, i, None) for i in self._fields])
return to_cache
class ValuesSearchQuerySet(ValuesListSearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of dictionaries, each containing
the key/value pairs for the result, exactly like Django's
``ValuesQuerySet``.
"""
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {
'fields': query_fields
}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
for result in results:
to_cache.append(dict((i, getattr(result, i, None)) for i in self._fields))
return to_cache
class RelatedSearchQuerySet(SearchQuerySet):
"""
A variant of the SearchQuerySet that can handle `load_all_queryset`s.
This is predominantly different in the `_fill_cache` method, as it is
far less efficient but needs to fill the cache before it to maintain
consistency.
"""
_load_all_querysets = {}
_result_cache = []
def _cache_is_full(self):
return len(self._result_cache) >= len(self)
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
start = current_position + self._ignored_result_count
if not self._fill_cache(start, start + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results()
if len(results) == 0:
return False
if start is None:
start = 0
if end is None:
end = self.query.get_count()
# Check if we wish to load all objects.
if self._load_all:
original_results = []
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
original_results.append(result)
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
if model in self._load_all_querysets:
# Use the overriding queryset.
loaded_objects[model] = self._load_all_querysets[model].in_bulk(models_pks[model])
else:
# Check the SearchIndex for the model for an override.
try:
index = connections[self.query._using].get_unified_index().get_index(model)
qs = index.load_all_queryset()
loaded_objects[model] = qs.in_bulk(models_pks[model])
except NotHandled:
# The model returned doesn't seem to be handled by the
# routers. We should silently fail and populate
# nothing for those objects.
loaded_objects[model] = []
if len(results) + len(self._result_cache) < len(self) and len(results) < ITERATOR_LOAD_PER_QUERY:
self._ignored_result_count += ITERATOR_LOAD_PER_QUERY - len(results)
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings; if this
# fails we've got a character pk.
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = loaded_objects[result.model][result.pk]
except (KeyError, IndexError):
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
self._result_cache.append(result)
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or not self._cache_is_full():
try:
while len(self._result_cache) < bound and not self._cache_is_full():
current_max = len(self._result_cache) + self._ignored_result_count
self._fill_cache(current_max, current_max + ITERATOR_LOAD_PER_QUERY)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
def load_all_queryset(self, model, queryset):
"""
Allows for specifying a custom ``QuerySet`` that changes how ``load_all``
will fetch records for the provided model.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
"""
clone = self._clone()
clone._load_all_querysets[model] = queryset
return clone
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
clone._load_all_querysets = self._load_all_querysets
return clone
|
zzz14/LOST-FOUND
|
refs/heads/master
|
userpage/jieba1/test/parallel/test_disable_hmm.py
|
5
|
#encoding=utf-8
from __future__ import print_function
import sys
sys.path.append("../../")
import jieba
jieba.enable_parallel(4)
def cuttest(test_sent):
result = jieba.cut(test_sent, HMM=False)
for word in result:
print(word, "/", end=' ')
print("")
if __name__ == "__main__":
cuttest("这是一个伸手不见五指的黑夜。我叫孙悟空,我爱北京,我爱Python和C++。")
cuttest("我不喜欢日本和服。")
cuttest("雷猴回归人间。")
cuttest("工信处女干事每月经过下属科室都要亲口交代24口交换机等技术性器件的安装工作")
cuttest("我需要廉租房")
cuttest("永和服装饰品有限公司")
cuttest("我爱北京天安门")
cuttest("abc")
cuttest("隐马尔可夫")
cuttest("雷猴是个好网站")
cuttest("“Microsoft”一词由“MICROcomputer(微型计算机)”和“SOFTware(软件)”两部分组成")
cuttest("草泥马和欺实马是今年的流行词汇")
cuttest("伊藤洋华堂总府店")
cuttest("中国科学院计算技术研究所")
cuttest("罗密欧与朱丽叶")
cuttest("我购买了道具和服装")
cuttest("PS: 我觉得开源有一个好处,就是能够敦促自己不断改进,避免敞帚自珍")
cuttest("湖北省石首市")
cuttest("湖北省十堰市")
cuttest("总经理完成了这件事情")
cuttest("电脑修好了")
cuttest("做好了这件事情就一了百了了")
cuttest("人们审美的观点是不同的")
cuttest("我们买了一个美的空调")
cuttest("线程初始化时我们要注意")
cuttest("一个分子是由好多原子组织成的")
cuttest("祝你马到功成")
cuttest("他掉进了无底洞里")
cuttest("中国的首都是北京")
cuttest("孙君意")
cuttest("外交部发言人马朝旭")
cuttest("领导人会议和第四届东亚峰会")
cuttest("在过去的这五年")
cuttest("还需要很长的路要走")
cuttest("60周年首都阅兵")
cuttest("你好人们审美的观点是不同的")
cuttest("买水果然后来世博园")
cuttest("买水果然后去世博园")
cuttest("但是后来我才知道你是对的")
cuttest("存在即合理")
cuttest("的的的的的在的的的的就以和和和")
cuttest("I love你,不以为耻,反以为rong")
cuttest("因")
cuttest("")
cuttest("hello你好人们审美的观点是不同的")
cuttest("很好但主要是基于网页形式")
cuttest("hello你好人们审美的观点是不同的")
cuttest("为什么我不能拥有想要的生活")
cuttest("后来我才")
cuttest("此次来中国是为了")
cuttest("使用了它就可以解决一些问题")
cuttest(",使用了它就可以解决一些问题")
cuttest("其实使用了它就可以解决一些问题")
cuttest("好人使用了它就可以解决一些问题")
cuttest("是因为和国家")
cuttest("老年搜索还支持")
cuttest("干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 ")
cuttest("大")
cuttest("")
cuttest("他说的确实在理")
cuttest("长春市长春节讲话")
cuttest("结婚的和尚未结婚的")
cuttest("结合成分子时")
cuttest("旅游和服务是最好的")
cuttest("这件事情的确是我的错")
cuttest("供大家参考指正")
cuttest("哈尔滨政府公布塌桥原因")
cuttest("我在机场入口处")
cuttest("邢永臣摄影报道")
cuttest("BP神经网络如何训练才能在分类时增加区分度?")
cuttest("南京市长江大桥")
cuttest("应一些使用者的建议,也为了便于利用NiuTrans用于SMT研究")
cuttest('长春市长春药店')
cuttest('邓颖超生前最喜欢的衣服')
cuttest('胡锦涛是热爱世界和平的政治局常委')
cuttest('程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面.再往左是李松洪')
cuttest('一次性交多少钱')
cuttest('两块五一套,三块八一斤,四块七一本,五块六一条')
cuttest('小和尚留了一个像大和尚一样的和尚头')
cuttest('我是中华人民共和国公民;我爸爸是共和党党员; 地铁和平门站')
|
davemerwin/blue-channel
|
refs/heads/master
|
external_apps/threadedcomments/forms.py
|
15
|
from django import forms
from threadedcomments.models import DEFAULT_MAX_COMMENT_LENGTH
from threadedcomments.models import FreeThreadedComment, ThreadedComment
from django.utils.translation import ugettext_lazy as _
class ThreadedCommentForm(forms.ModelForm):
"""
Form which can be used to validate data for a new ThreadedComment.
It consists of just two fields: ``comment``, and ``markup``.
The ``comment`` field is the only one which is required.
"""
comment = forms.CharField(
label = _('comment'),
max_length = DEFAULT_MAX_COMMENT_LENGTH,
widget = forms.Textarea
)
class Meta:
model = ThreadedComment
fields = ('comment', 'markup')
class FreeThreadedCommentForm(forms.ModelForm):
"""
Form which can be used to validate data for a new FreeThreadedComment.
It consists of just a few fields: ``comment``, ``name``, ``website``,
``email``, and ``markup``.
The fields ``comment``, and ``name`` are the only ones which are required.
"""
comment = forms.CharField(
label = _('comment'),
max_length = DEFAULT_MAX_COMMENT_LENGTH,
widget = forms.Textarea
)
class Meta:
model = FreeThreadedComment
fields = ('comment', 'name', 'website', 'email', 'markup')
|
uwdata/termite-visualizations
|
refs/heads/master
|
web2py/gluon/contrib/pg8000/interface.py
|
24
|
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, Mathieu Fenniak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "Mathieu Fenniak"
import socket
import protocol
import threading
from errors import *
def conninfo_parse(conninfo):
"Conninfo parser routine based on libpq conninfo_parse"
options = {}
buf = conninfo + " "
tmp = pname = ""
quoted_string = False
cp = 0
while cp < len(buf):
# Skip blanks before the parameter name
c = buf[cp]
if c.isspace() and tmp and not quoted_string and pname:
options[pname] = tmp
tmp = pname = ""
elif c == "'":
quoted_string = not quoted_string
elif c == '\\':
cp += 1
tmp += buf[cp]
elif c == "=":
if not tmp:
raise RuntimeError("missing parameter name (conninfo:%s)" % cp)
pname = tmp
tmp = ""
elif not c.isspace() or quoted_string:
tmp += c
cp += 1
if quoted_string:
raise RuntimeError("unterminated quoted string (conninfo:%s)" % cp)
return options
class DataIterator(object):
def __init__(self, obj, func):
self.obj = obj
self.func = func
def __iter__(self):
return self
def next(self):
retval = self.func(self.obj)
if retval == None:
raise StopIteration()
return retval
statement_number_lock = threading.Lock()
statement_number = 0
##
# This class represents a prepared statement. A prepared statement is
# pre-parsed on the server, which reduces the need to parse the query every
# time it is run. The statement can have parameters in the form of $1, $2, $3,
# etc. When parameters are used, the types of the parameters need to be
# specified when creating the prepared statement.
# <p>
# As of v1.01, instances of this class are thread-safe. This means that a
# single PreparedStatement can be accessed by multiple threads without the
# internal consistency of the statement being altered. However, the
# responsibility is on the client application to ensure that one thread reading
# from a statement isn't affected by another thread starting a new query with
# the same statement.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param connection An instance of {@link Connection Connection}.
#
# @param statement The SQL statement to be represented, often containing
# parameters in the form of $1, $2, $3, etc.
#
# @param types Python type objects for each parameter in the SQL
# statement. For example, int, float, str.
class PreparedStatement(object):
##
# Determines the number of rows to read from the database server at once.
# Reading more rows increases performance at the cost of memory. The
# default value is 100 rows. The affect of this parameter is transparent.
# That is, the library reads more rows when the cache is empty
# automatically.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx. It is
# possible that implementation changes in the future could cause this
# parameter to be ignored.
row_cache_size = 100
def __init__(self, connection, statement, *types, **kwargs):
global statement_number
if connection == None or connection.c == None:
raise InterfaceError("connection not provided")
try:
statement_number_lock.acquire()
self._statement_number = statement_number
statement_number += 1
finally:
statement_number_lock.release()
self.c = connection.c
self._portal_name = None
self._statement_name = kwargs.get("statement_name", "pg8000_statement_%s" % self._statement_number)
self._row_desc = None
self._cached_rows = []
self._ongoing_row_count = 0
self._command_complete = True
self._parse_row_desc = self.c.parse(self._statement_name, statement, types)
self._lock = threading.RLock()
def close(self):
if self._statement_name != "": # don't close unnamed statement
self.c.close_statement(self._statement_name)
if self._portal_name != None:
self.c.close_portal(self._portal_name)
self._portal_name = None
row_description = property(lambda self: self._getRowDescription())
def _getRowDescription(self):
if self._row_desc == None:
return None
return self._row_desc.fields
##
# Run the SQL prepared statement with the given parameters.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def execute(self, *args, **kwargs):
self._lock.acquire()
try:
if not self._command_complete:
# cleanup last execute
self._cached_rows = []
self._ongoing_row_count = 0
if self._portal_name != None:
self.c.close_portal(self._portal_name)
self._command_complete = False
self._portal_name = "pg8000_portal_%s" % self._statement_number
self._row_desc, cmd = self.c.bind(self._portal_name, self._statement_name, args, self._parse_row_desc, kwargs.get("stream"))
if self._row_desc:
# We execute our cursor right away to fill up our cache. This
# prevents the cursor from being destroyed, apparently, by a rogue
# Sync between Bind and Execute. Since it is quite likely that
# data will be read from us right away anyways, this seems a safe
# move for now.
self._fill_cache()
else:
self._command_complete = True
self._ongoing_row_count = -1
if cmd != None and cmd.rows != None:
self._ongoing_row_count = cmd.rows
finally:
self._lock.release()
def _fill_cache(self):
self._lock.acquire()
try:
if self._cached_rows:
raise InternalError("attempt to fill cache that isn't empty")
end_of_data, rows = self.c.fetch_rows(self._portal_name, self.row_cache_size, self._row_desc)
self._cached_rows = rows
if end_of_data:
self._command_complete = True
finally:
self._lock.release()
def _fetch(self):
if not self._row_desc:
raise ProgrammingError("no result set")
self._lock.acquire()
try:
if not self._cached_rows:
if self._command_complete:
return None
self._fill_cache()
if self._command_complete and not self._cached_rows:
# fill cache tells us the command is complete, but yet we have
# no rows after filling our cache. This is a special case when
# a query returns no rows.
return None
row = self._cached_rows.pop(0)
self._ongoing_row_count += 1
return tuple(row)
finally:
self._lock.release()
##
# Return a count of the number of rows relevant to the executed statement.
# For a SELECT, this is the number of rows returned. For UPDATE or DELETE,
# this the number of rows affected. For INSERT, the number of rows
# inserted. This property may have a value of -1 to indicate that there
# was no row count.
# <p>
# During a result-set query (eg. SELECT, or INSERT ... RETURNING ...),
# accessing this property requires reading the entire result-set into
# memory, as reading the data to completion is the only way to determine
# the total number of rows. Avoid using this property in with
# result-set queries, as it may cause unexpected memory usage.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
row_count = property(lambda self: self._get_row_count())
def _get_row_count(self):
self._lock.acquire()
try:
if not self._command_complete:
end_of_data, rows = self.c.fetch_rows(self._portal_name, 0, self._row_desc)
self._cached_rows += rows
if end_of_data:
self._command_complete = True
else:
raise InternalError("fetch_rows(0) did not hit end of data")
return self._ongoing_row_count + len(self._cached_rows)
finally:
self._lock.release()
##
# Read a row from the database server, and return it in a dictionary
# indexed by column name/alias. This method will raise an error if two
# columns have the same name. Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def read_dict(self):
row = self._fetch()
if row == None:
return row
retval = {}
for i in range(len(self._row_desc.fields)):
col_name = self._row_desc.fields[i]['name']
if retval.has_key(col_name):
raise InterfaceError("cannot return dict of row when two columns have the same name (%r)" % (col_name,))
retval[col_name] = row[i]
return retval
##
# Read a row from the database server, and return it as a tuple of values.
# Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def read_tuple(self):
return self._fetch()
##
# Return an iterator for the output of this statement. The iterator will
# return a tuple for each row, in the same manner as {@link
# #PreparedStatement.read_tuple read_tuple}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def iterate_tuple(self):
return DataIterator(self, PreparedStatement.read_tuple)
##
# Return an iterator for the output of this statement. The iterator will
# return a dict for each row, in the same manner as {@link
# #PreparedStatement.read_dict read_dict}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def iterate_dict(self):
return DataIterator(self, PreparedStatement.read_dict)
class SimpleStatement(PreparedStatement):
"Internal wrapper to Simple Query protocol emulating a PreparedStatement"
# This should be used internally only for trivial queries
# (not a true Prepared Statement, in fact it can have multiple statements)
# See Simple Query Protocol limitations and trade-offs (send_simple_query)
row_cache_size = None
def __init__(self, connection, statement):
if connection == None or connection.c == None:
raise InterfaceError("connection not provided")
self.c = connection.c
self._row_desc = None
self._cached_rows = []
self._ongoing_row_count = -1
self._command_complete = True
self.statement = statement
self._lock = threading.RLock()
def close(self):
# simple query doesn't have portals
pass
def execute(self, *args, **kwargs):
"Run the SQL simple query stataments"
self._lock.acquire()
try:
self._row_desc, cmd_complete, self._cached_rows = \
self.c.send_simple_query(self.statement, kwargs.get("stream"))
self._command_complete = True
self._ongoing_row_count = -1
if cmd_complete is not None and cmd_complete.rows is not None:
self._ongoing_row_count = cmd_complete.rows
finally:
self._lock.release()
def _fill_cache(self):
# data rows are already fetched in _cached_rows
pass
def _fetch(self):
if not self._row_desc:
raise ProgrammingError("no result set")
self._lock.acquire()
try:
if not self._cached_rows:
return None
row = self._cached_rows.pop(0)
return tuple(row)
finally:
self._lock.release()
def _get_row_count(self):
return self._ongoing_row_count
##
# The Cursor class allows multiple queries to be performed concurrently with a
# single PostgreSQL connection. The Cursor object is implemented internally by
# using a {@link PreparedStatement PreparedStatement} object, so if you plan to
# use a statement multiple times, you might as well create a PreparedStatement
# and save a small amount of reparsing time.
# <p>
# As of v1.01, instances of this class are thread-safe. See {@link
# PreparedStatement PreparedStatement} for more information.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param connection An instance of {@link Connection Connection}.
class Cursor(object):
def __init__(self, connection):
self.connection = connection
self._stmt = None
def require_stmt(func):
def retval(self, *args, **kwargs):
if self._stmt == None:
raise ProgrammingError("attempting to use unexecuted cursor")
return func(self, *args, **kwargs)
return retval
row_description = property(lambda self: self._getRowDescription())
def _getRowDescription(self):
if self._stmt == None:
return None
return self._stmt.row_description
##
# Run an SQL statement using this cursor. The SQL statement can have
# parameters in the form of $1, $2, $3, etc., which will be filled in by
# the additional arguments passed to this function.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
# @param query The SQL statement to execute.
def execute(self, query, *args, **kwargs):
if self.connection.is_closed:
raise ConnectionClosedError()
self.connection._unnamed_prepared_statement_lock.acquire()
try:
if kwargs.get("simple_query"):
# no arguments and no statement name,
# use PostgreSQL Simple Query Protocol
## print "SimpleQuery:", query
self._stmt = SimpleStatement(self.connection, query)
else:
# use PostgreSQL Extended Query Protocol
self._stmt = PreparedStatement(self.connection, query, statement_name="", *[{"type": type(x), "value": x} for x in args])
self._stmt.execute(*args, **kwargs)
finally:
self.connection._unnamed_prepared_statement_lock.release()
##
# Return a count of the number of rows currently being read. If possible,
# please avoid using this function. It requires reading the entire result
# set from the database to determine the number of rows being returned.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
# Implementation currently requires caching entire result set into memory,
# avoid using this property.
row_count = property(lambda self: self._get_row_count())
@require_stmt
def _get_row_count(self):
return self._stmt.row_count
##
# Read a row from the database server, and return it in a dictionary
# indexed by column name/alias. This method will raise an error if two
# columns have the same name. Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def read_dict(self):
return self._stmt.read_dict()
##
# Read a row from the database server, and return it as a tuple of values.
# Returns None after the last row.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def read_tuple(self):
return self._stmt.read_tuple()
##
# Return an iterator for the output of this statement. The iterator will
# return a tuple for each row, in the same manner as {@link
# #PreparedStatement.read_tuple read_tuple}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def iterate_tuple(self):
return self._stmt.iterate_tuple()
##
# Return an iterator for the output of this statement. The iterator will
# return a dict for each row, in the same manner as {@link
# #PreparedStatement.read_dict read_dict}.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
@require_stmt
def iterate_dict(self):
return self._stmt.iterate_dict()
def close(self):
if self._stmt != None:
self._stmt.close()
self._stmt = None
##
# Return the fileno of the underlying socket for this cursor's connection.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def fileno(self):
return self.connection.fileno()
##
# Poll the underlying socket for this cursor and sync if there is data waiting
# to be read. This has the effect of flushing asynchronous messages from the
# backend. Returns True if messages were read, False otherwise.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def isready(self):
return self.connection.isready()
##
# This class represents a connection to a PostgreSQL database.
# <p>
# The database connection is derived from the {@link #Cursor Cursor} class,
# which provides a default cursor for running queries. It also provides
# transaction control via the 'begin', 'commit', and 'rollback' methods.
# Without beginning a transaction explicitly, all statements will autocommit to
# the database.
# <p>
# As of v1.01, instances of this class are thread-safe. See {@link
# PreparedStatement PreparedStatement} for more information.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
#
# @param user The username to connect to the PostgreSQL server with. This
# parameter is required.
#
# @keyparam host The hostname of the PostgreSQL server to connect with.
# Providing this parameter is necessary for TCP/IP connections. One of either
# host, or unix_sock, must be provided.
#
# @keyparam unix_sock The path to the UNIX socket to access the database
# through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host
# must be provided. The port parameter will have no affect if unix_sock is
# provided.
#
# @keyparam port The TCP/IP port of the PostgreSQL server instance. This
# parameter defaults to 5432, the registered and common port of PostgreSQL
# TCP/IP servers.
#
# @keyparam database The name of the database instance to connect with. This
# parameter is optional, if omitted the PostgreSQL server will assume the
# database name is the same as the username.
#
# @keyparam password The user password to connect to the server with. This
# parameter is optional. If omitted, and the database server requests password
# based authentication, the connection will fail. On the other hand, if this
# parameter is provided and the database does not request password
# authentication, then the password will not be used.
#
# @keyparam socket_timeout Socket connect timeout measured in seconds.
# Defaults to 60 seconds.
#
# @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False.
class Connection(Cursor):
def __init__(self, dsn="", user=None, host=None, unix_sock=None, port=5432, database=None, password=None, socket_timeout=60, ssl=False):
self._row_desc = None
if dsn:
# update connection parameters parsed of the conninfo dsn
opts = conninfo_parse(dsn)
database = opts.get("dbname", database)
user = opts.get("user", user)
password = opts.get("password", user)
host = opts.get("host", host)
port = int(opts.get("port", port))
ssl = opts.get("sslmode", 'disable') != 'disable'
try:
self.c = protocol.Connection(unix_sock=unix_sock, host=host, port=port, socket_timeout=socket_timeout, ssl=ssl)
self.c.authenticate(user, password=password, database=database)
except socket.error, e:
raise InterfaceError("communication error", e)
Cursor.__init__(self, self)
self._begin = PreparedStatement(self, "BEGIN TRANSACTION")
self._commit = PreparedStatement(self, "COMMIT TRANSACTION")
self._rollback = PreparedStatement(self, "ROLLBACK TRANSACTION")
self._unnamed_prepared_statement_lock = threading.RLock()
self.in_transaction = False
self.autocommit = False
##
# An event handler that is fired when NOTIFY occurs for a notification that
# has been LISTEN'd for. The value of this property is a
# util.MulticastDelegate. A callback can be added by using
# connection.NotificationReceived += SomeMethod. The method will be called
# with a single argument, an object that has properties: backend_pid,
# condition, and additional_info. Callbacks can be removed with the -=
# operator.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
NotificationReceived = property(
lambda self: getattr(self.c, "NotificationReceived"),
lambda self, value: setattr(self.c, "NotificationReceived", value)
)
##
# An event handler that is fired when the database server issues a notice.
# The value of this property is a util.MulticastDelegate. A callback can
# be added by using connection.NotificationReceived += SomeMethod. The
# method will be called with a single argument, an object that has
# properties: severity, code, msg, and possibly others (detail, hint,
# position, where, file, line, and routine). Callbacks can be removed with
# the -= operator.
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
NoticeReceived = property(
lambda self: getattr(self.c, "NoticeReceived"),
lambda self, value: setattr(self.c, "NoticeReceived", value)
)
##
# An event handler that is fired when a runtime configuration option is
# changed on the server. The value of this property is a
# util.MulticastDelegate. A callback can be added by using
# connection.NotificationReceived += SomeMethod. Callbacks can be removed
# with the -= operator. The method will be called with a single argument,
# an object that has properties "key" and "value".
# <p>
# Stability: Added in v1.03, stability guaranteed for v1.xx.
ParameterStatusReceived = property(
lambda self: getattr(self.c, "ParameterStatusReceived"),
lambda self, value: setattr(self.c, "ParameterStatusReceived", value)
)
##
# Begins a new transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def begin(self):
if self.is_closed:
raise ConnectionClosedError()
if self.autocommit:
return
self._begin.execute()
self.in_transaction = True
##
# Commits the running transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def commit(self):
if self.is_closed:
raise ConnectionClosedError()
self._commit.execute()
self.in_transaction = False
##
# Rolls back the running transaction.
# <p>
# Stability: Added in v1.00, stability guaranteed for v1.xx.
def rollback(self):
if self.is_closed:
raise ConnectionClosedError()
self._rollback.execute()
self.in_transaction = False
##
# Closes an open connection.
def close(self):
if self.is_closed:
raise ConnectionClosedError()
self.c.close()
self.c = None
is_closed = property(lambda self: self.c == None)
##
# Return the fileno of the underlying socket for this connection.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def fileno(self):
return self.c.fileno()
##
# Poll the underlying socket for this connection and sync if there is data
# waiting to be read. This has the effect of flushing asynchronous
# messages from the backend. Returns True if messages were read, False
# otherwise.
# <p>
# Stability: Added in v1.07, stability guaranteed for v1.xx.
def isready(self):
return self.c.isready()
##
# Return the server_version as reported from the connected server.
# Raises InterfaceError if no version has been reported from the server.
def server_version(self):
return self.c.server_version()
def encoding(self, encoding=None):
"Returns the client_encoding as reported from the connected server"
return self.c.encoding()
|
havard024/prego
|
refs/heads/master
|
venv/lib/python2.7/site-packages/jinja2/testsuite/api.py
|
15
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.api
~~~~~~~~~~~~~~~~~~~~
Tests the public API and related stuff.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import tempfile
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, Undefined, DebugUndefined, \
StrictUndefined, UndefinedError, Template, meta, \
is_undefined, Template, DictLoader
from jinja2.utils import Cycler
env = Environment()
class ExtendedAPITestCase(JinjaTestCase):
def test_item_and_attribute(self):
from jinja2.sandbox import SandboxedEnvironment
for env in Environment(), SandboxedEnvironment():
# the |list is necessary for python3
tmpl = env.from_string('{{ foo.items()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo|attr("items")()|list }}')
assert tmpl.render(foo={'items': 42}) == "[('items', 42)]"
tmpl = env.from_string('{{ foo["items"] }}')
assert tmpl.render(foo={'items': 42}) == '42'
def test_finalizer(self):
def finalize_none_empty(value):
if value is None:
value = u''
return value
env = Environment(finalize=finalize_none_empty)
tmpl = env.from_string('{% for item in seq %}|{{ item }}{% endfor %}')
assert tmpl.render(seq=(None, 1, "foo")) == '||1|foo'
tmpl = env.from_string('<{{ none }}>')
assert tmpl.render() == '<>'
def test_cycler(self):
items = 1, 2, 3
c = Cycler(*items)
for item in items + items:
assert c.current == item
assert c.next() == item
c.next()
assert c.current == 2
c.reset()
assert c.current == 1
def test_expressions(self):
expr = env.compile_expression("foo")
assert expr() is None
assert expr(foo=42) == 42
expr2 = env.compile_expression("foo", undefined_to_none=False)
assert is_undefined(expr2())
expr = env.compile_expression("42 + foo")
assert expr(foo=42) == 84
def test_template_passthrough(self):
t = Template('Content')
assert env.get_template(t) is t
assert env.select_template([t]) is t
assert env.get_or_select_template([t]) is t
assert env.get_or_select_template(t) is t
def test_autoescape_autoselect(self):
def select_autoescape(name):
if name is None or '.' not in name:
return False
return name.endswith('.html')
env = Environment(autoescape=select_autoescape,
loader=DictLoader({
'test.txt': '{{ foo }}',
'test.html': '{{ foo }}'
}))
t = env.get_template('test.txt')
assert t.render(foo='<foo>') == '<foo>'
t = env.get_template('test.html')
assert t.render(foo='<foo>') == '<foo>'
t = env.from_string('{{ foo }}')
assert t.render(foo='<foo>') == '<foo>'
class MetaTestCase(JinjaTestCase):
def test_find_undeclared_variables(self):
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar'])
ast = env.parse('{% set foo = 42 %}{{ bar + foo }}'
'{% macro meh(x) %}{{ x }}{% endmacro %}'
'{% for item in seq %}{{ muh(item) + meh(seq) }}{% endfor %}')
x = meta.find_undeclared_variables(ast)
assert x == set(['bar', 'seq', 'muh'])
def test_find_refererenced_templates(self):
ast = env.parse('{% extends "layout.html" %}{% include helper %}')
i = meta.find_referenced_templates(ast)
assert i.next() == 'layout.html'
assert i.next() is None
assert list(i) == []
ast = env.parse('{% extends "layout.html" %}'
'{% from "test.html" import a, b as c %}'
'{% import "meh.html" as meh %}'
'{% include "muh.html" %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['layout.html', 'test.html', 'meh.html', 'muh.html']
def test_find_included_templates(self):
ast = env.parse('{% include ["foo.html", "bar.html"] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ("foo.html", "bar.html") %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html']
ast = env.parse('{% include ["foo.html", "bar.html", foo] %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
ast = env.parse('{% include ("foo.html", "bar.html", foo) %}')
i = meta.find_referenced_templates(ast)
assert list(i) == ['foo.html', 'bar.html', None]
class StreamingTestCase(JinjaTestCase):
def test_basic_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
self.assert_equal(stream.next(), '<ul>')
self.assert_equal(stream.next(), '<li>1 - 0</li>')
self.assert_equal(stream.next(), '<li>2 - 1</li>')
self.assert_equal(stream.next(), '<li>3 - 2</li>')
self.assert_equal(stream.next(), '<li>4 - 3</li>')
self.assert_equal(stream.next(), '</ul>')
def test_buffered_streaming(self):
tmpl = env.from_string("<ul>{% for item in seq %}<li>{{ loop.index "
"}} - {{ item }}</li>{%- endfor %}</ul>")
stream = tmpl.stream(seq=range(4))
stream.enable_buffering(size=3)
self.assert_equal(stream.next(), u'<ul><li>1 - 0</li><li>2 - 1</li>')
self.assert_equal(stream.next(), u'<li>3 - 2</li><li>4 - 3</li></ul>')
def test_streaming_behavior(self):
tmpl = env.from_string("")
stream = tmpl.stream()
assert not stream.buffered
stream.enable_buffering(20)
assert stream.buffered
stream.disable_buffering()
assert not stream.buffered
class UndefinedTestCase(JinjaTestCase):
def test_stopiteration_is_undefined(self):
def test():
raise StopIteration()
t = Template('A{{ test() }}B')
assert t.render(test=test) == 'AB'
t = Template('A{{ test().missingattribute }}B')
self.assert_raises(UndefinedError, t.render, test=test)
def test_default_undefined(self):
env = Environment(undefined=Undefined)
self.assert_equal(env.from_string('{{ missing }}').render(), u'')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42), '')
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_debug_undefined(self):
env = Environment(undefined=DebugUndefined)
self.assert_equal(env.from_string('{{ missing }}').render(), '{{ missing }}')
self.assert_raises(UndefinedError,
env.from_string('{{ missing.attribute }}').render)
self.assert_equal(env.from_string('{{ missing|list }}').render(), '[]')
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_equal(env.from_string('{{ foo.missing }}').render(foo=42),
u"{{ no such element: int object['missing'] }}")
self.assert_equal(env.from_string('{{ not missing }}').render(), 'True')
def test_strict_undefined(self):
env = Environment(undefined=StrictUndefined)
self.assert_raises(UndefinedError, env.from_string('{{ missing }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing.attribute }}').render)
self.assert_raises(UndefinedError, env.from_string('{{ missing|list }}').render)
self.assert_equal(env.from_string('{{ missing is not defined }}').render(), 'True')
self.assert_raises(UndefinedError, env.from_string('{{ foo.missing }}').render, foo=42)
self.assert_raises(UndefinedError, env.from_string('{{ not missing }}').render)
def test_indexing_gives_undefined(self):
t = Template("{{ var[42].foo }}")
self.assert_raises(UndefinedError, t.render, var=0)
def test_none_gives_proper_error(self):
try:
Environment().getattr(None, 'split')()
except UndefinedError, e:
assert e.message == "'None' has no attribute 'split'"
else:
assert False, 'expected exception'
def test_object_repr(self):
try:
Undefined(obj=42, name='upper')()
except UndefinedError, e:
assert e.message == "'int object' has no attribute 'upper'"
else:
assert False, 'expected exception'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtendedAPITestCase))
suite.addTest(unittest.makeSuite(MetaTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
suite.addTest(unittest.makeSuite(UndefinedTestCase))
return suite
|
Eluvatar/zombie-radar
|
refs/heads/master
|
location_monitor.py
|
1
|
# Simple module to track the location of nations
# Copyright (C) 2013-2015 Eluvatar
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from transmission.reception import subscribe
def on_departure(reg, nat):
pass
def on_arrival(reg, nat):
pass
def on_founded(nat):
pass
def on_refounded(nat):
pass
def handle_event(nat, reg_from, reg_to):
on_departure(reg_from, nat)
on_arrival(reg_to, nat)
@subscribe(pattern="@@(nation)@@ relocated from %%(region)%% to %%(region)%%.")
def handle_movement(event):
nat = event.group(1)
reg_from = event.group(2)
reg_to = event.group(3)
handle_event(nat, reg_from, reg_to)
@subscribe(pattern="@@(nation)@@ was founded in %%(region)%%.")
def handle_founded(event):
_handle_founded(event)
@subscribe(pattern="@@(nation)@@ was refounded in %%(region)%%.")
def handle_refounded(event):
_handle_founded(event)
def _handle_founded(event):
nat = event.group(1)
reg = event.group(2)
entry = {"name":nat}
on_founded(nat)
handle_event(nat, None, reg)
|
FabriceSalvaire/tex-calendar
|
refs/heads/master
|
doc/sunset-sunrise.py
|
1
|
import ephem
import datetime
obs = ephem.Observer()
obs.lat = '38.8'
obs.long= '-75.2'
start_date = datetime.datetime(2008, 1, 1)
end_date = datetime.datetime(2008, 12, 31)
td = datetime.timedelta(days=1)
sun = ephem.Sun()
sunrises = []
sunsets = []
dates = []
date = start_date
while date < end_date:
date += td
dates.append(date)
obs.date = date
rise_time = obs.next_rising(sun).datetime()
sunrises.append(rise_time)
set_time = obs.next_setting(sun).datetime()
sunsets.append(set_time)
To plot day length in hours over the course of a year, first run the above code. Then (assuming you have matplotlib):
from pylab import *
daylens = []
for i in range(len(sunrises)):
timediff = sunsets[i] - sunrises[i]
hours = timediff.seconds / 60. / 60. # to get it in hours
daylens.append(hours)
plot(dates, daylens)
# if you have an older version of matplotlib, you may need
# to convert dates into numbers before plotting:
# dates = [date2num(i) for i in dates]
xlabel('Date')
ylabel('Hours')
title('Day length in 2008')
show()
|
omprakasha/odoo
|
refs/heads/8.0
|
addons/stock/report/stock_graph.py
|
326
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pychart import *
import pychart.legend
import time
from openerp.report.misc import choice_colors
from openerp import tools
#
# Draw a graph for stocks
#
class stock_graph(object):
def __init__(self, io):
self._datas = {}
self._canvas = canvas.init(fname=io, format='pdf')
self._canvas.set_author("Odoo")
self._canvas.set_title("Stock Level Forecast")
self._names = {}
self.val_min = ''
self.val_max = ''
def add(self, product_id, product_name, datas):
if hasattr(product_name, 'replace'):
product_name=product_name.replace('/', '//')
if product_id not in self._datas:
self._datas[product_id] = {}
self._names[product_id] = tools.ustr(product_name)
for (dt,stock) in datas:
if not dt in self._datas[product_id]:
self._datas[product_id][dt]=0
self._datas[product_id][dt]+=stock
if self.val_min:
self.val_min = min(self.val_min,dt)
else:
self.val_min = dt
self.val_max = max(self.val_max,dt)
def draw(self):
colors = choice_colors(len(self._datas.keys()))
user_color = {}
for user in self._datas.keys():
user_color[user] = colors.pop()
val_min = int(time.mktime(time.strptime(self.val_min,'%Y-%m-%d')))
val_max = int(time.mktime(time.strptime(self.val_max,'%Y-%m-%d')))
plots = []
for product_id in self._datas:
f = fill_style.Plain()
f.bgcolor = user_color[user]
datas = self._datas[product_id].items()
datas = map(lambda x: (int(time.mktime(time.strptime(x[0],'%Y-%m-%d'))),x[1]), datas)
datas.sort()
datas2 = []
val = 0
for d in datas:
val+=d[1]
if len(datas2):
d2 = d[0]-60*61*24
if datas2[-1][0]<d2-1000:
datas2.append((d2,datas2[-1][1]))
datas2.append((d[0],val))
if len(datas2) and datas2[-1][0]<val_max-100:
datas2.append((val_max, datas2[-1][1]))
if len(datas2)==1:
datas2.append( (datas2[0][0]+100, datas2[0][1]) )
st = line_style.T()
st.color = user_color[product_id]
st.width = 1
st.cap_style=1
st.join_style=1
plot = line_plot.T(label=self._names[product_id], data=datas2, line_style=st)
plots.append(plot)
interval = max((val_max-val_min)/15, 86400)
x_axis = axis.X(format=lambda x:'/a60{}'+time.strftime('%Y-%m-%d',time.gmtime(x)), tic_interval=interval, label=None)
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bStock Level Forecast", line_style=None)
tb.draw()
ar = area.T(size = (620,435), x_range=(val_min,val_max+1), y_axis = axis.Y(format="%d", label="Virtual Stock (Unit)"), x_axis=x_axis)
for plot in plots:
ar.add_plot(plot)
ar.draw(self._canvas)
def close(self):
self._canvas.close()
if __name__ == '__main__':
gt = stock_graph('test.pdf')
gt.add(1, 'Pomme', [('2005-07-29', 6), ('2005-07-30', -2), ('2005-07-31', 4)])
gt.add(2, 'Cailloux', [('2005-07-29', 9), ('2005-07-30', -4), ('2005-07-31', 2)])
gt.draw()
gt.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mcsalgado/ansible
|
refs/heads/devel
|
lib/ansible/parsing/splitter.py
|
118
|
# (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
### FIXME: args should already be a unicode string
from ansible.utils.unicode import to_unicode
args = to_unicode(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for x in vargs:
x = _decode_escapes(x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# only internal variables can start with an underscore, so
# we don't allow users to set them directy in arguments
if k.startswith('_'):
raise AnsibleError("invalid parameter specified: '%s'" % k)
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
return params
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
|
StefanRijnhart/odoo
|
refs/heads/master
|
addons/document/report/document_report.py
|
341
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class report_document_user(osv.osv):
_name = "report.document.user"
_description = "Files details by Users"
_auto = False
_columns = {
'name': fields.char('Year', size=64,readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'user_id': fields.many2one('res.users', 'Owner', readonly=True),
'user': fields.related('user_id', 'name', type='char', size=64, readonly=True),
'directory': fields.char('Directory',size=64,readonly=True),
'datas_fname': fields.char('File Name',size=64,readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'change_date': fields.datetime('Modified Date', readonly=True),
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'type':fields.char('Directory Type',size=64,readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_user')
cr.execute("""
CREATE OR REPLACE VIEW report_document_user as (
SELECT
min(f.id) as id,
to_char(f.create_date, 'YYYY') as name,
to_char(f.create_date, 'MM') as month,
f.user_id as user_id,
count(*) as nbr,
d.name as directory,
f.datas_fname as datas_fname,
f.create_date as create_date,
f.file_size as file_size,
min(d.type) as type,
f.write_date as change_date
FROM ir_attachment f
left join document_directory d on (f.parent_id=d.id and d.name<>'')
group by to_char(f.create_date, 'YYYY'), to_char(f.create_date, 'MM'),d.name,f.parent_id,d.type,f.create_date,f.user_id,f.file_size,d.type,f.write_date,f.datas_fname
)
""")
class report_document_file(osv.osv):
_name = "report.document.file"
_description = "Files details by Directory"
_auto = False
_columns = {
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'month': fields.char('Month', size=24, readonly=True),
}
_order = "month"
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_file')
cr.execute("""
create or replace view report_document_file as (
select min(f.id) as id,
count(*) as nbr,
min(EXTRACT(MONTH FROM f.create_date)||'-'||to_char(f.create_date,'Month')) as month,
sum(f.file_size) as file_size
from ir_attachment f
group by EXTRACT(MONTH FROM f.create_date)
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anand-c-goog/tensorflow
|
refs/heads/master
|
tensorflow/tools/pip_package/simple_console.py
|
603
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
dpetzold/django
|
refs/heads/master
|
django/apps/config.py
|
224
|
import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3's _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
|
tcheehow/MissionPlanner
|
refs/heads/master
|
Lib/encodings/cp037.py
|
93
|
""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
minhphung171093/OpenERP_V7
|
refs/heads/master
|
openerp/addons/mrp_repair/wizard/cancel_repair.py
|
52
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
from openerp.tools.translate import _
class repair_cancel(osv.osv_memory):
_name = 'mrp.repair.cancel'
_description = 'Cancel Repair'
def cancel_repair(self, cr, uid, ids, context=None):
""" Cancels the repair
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
assert record_id, _('Active ID not Found')
repair_order_obj = self.pool.get('mrp.repair')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_order = repair_order_obj.browse(cr, uid, record_id, context=context)
if repair_order.invoiced or repair_order.invoice_method == 'none':
repair_order_obj.action_cancel(cr, uid, [record_id], context=context)
else:
raise osv.except_osv(_('Warning!'),_('Repair order is not invoiced.'))
return {'type': 'ir.actions.act_window_close'}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context = {}
res = super(repair_cancel, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'mrp.repair'):
return res
repair_order = self.pool.get('mrp.repair').browse(cr, uid, record_id, context=context)
if not repair_order.invoiced:
res['arch'] = """
<form string="Cancel Repair" version="7.0">
<header>
<button name="cancel_repair" string="_Yes" type="object" class="oe_highlight"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<label string="Do you want to continue?"/>
</form>
"""
return res
repair_cancel()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
darkleons/BE
|
refs/heads/master
|
addons/payment_transfer/controllers/main.py
|
395
|
# -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
_logger = logging.getLogger(__name__)
class OgoneController(http.Controller):
_accept_url = '/payment/transfer/feedback'
@http.route([
'/payment/transfer/feedback',
], type='http', auth='none')
def transfer_form_feedback(self, **post):
cr, uid, context = request.cr, SUPERUSER_ID, request.context
_logger.info('Beginning form_feedback with post data %s', pprint.pformat(post)) # debug
request.registry['payment.transaction'].form_feedback(cr, uid, post, 'transfer', context)
return werkzeug.utils.redirect(post.pop('return_url', '/'))
|
40223247/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py
|
858
|
# helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
wzbozon/statsmodels
|
refs/heads/master
|
statsmodels/datasets/stackloss/data.py
|
25
|
"""Stack loss data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
TITLE = __doc__
SOURCE = """
Brownlee, K. A. (1965), "Statistical Theory and Methodology in
Science and Engineering", 2nd edition, New York:Wiley.
"""
DESCRSHORT = """Stack loss plant data of Brownlee (1965)"""
DESCRLONG = """The stack loss plant data of Brownlee (1965) contains
21 days of measurements from a plant's oxidation of ammonia to nitric acid.
The nitric oxide pollutants are captured in an absorption tower."""
NOTE = """::
Number of Observations - 21
Number of Variables - 4
Variable name definitions::
STACKLOSS - 10 times the percentage of ammonia going into the plant
that escapes from the absoroption column
AIRFLOW - Rate of operation of the plant
WATERTEMP - Cooling water temperature in the absorption tower
ACIDCONC - Acid concentration of circulating acid minus 50 times 10.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the stack loss data and returns a Dataset class instance.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/stackloss.csv',"rb"), delimiter=",",
names=True, dtype=float)
return data
|
viaict/viaduct
|
refs/heads/develop
|
migrations/versions/2017_07_24_b8cea80e0a3a_add_requires_direct_payment_to_.py
|
1
|
"""Add requires_direct_payment to CustomForm.
Revision ID: b8cea80e0a3a
Revises: 5bc8d6e5633b
Create Date: 2017-07-24 14:26:57.045590
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b8cea80e0a3a'
down_revision = '5bc8d6e5633b'
def upgrade():
op.add_column('custom_form',
sa.Column('requires_direct_payment', sa.Boolean(),
nullable=False))
def downgrade():
op.drop_column('custom_form', 'requires_direct_payment')
|
j-marjanovic/myhdl
|
refs/heads/master
|
myhdl/test/conversion/toVHDL/test_signed.py
|
1
|
from __future__ import absolute_import
import os
path = os.path
import random
from random import randrange
from myhdl import *
from myhdl.conversion import verify
NRTESTS = 10
def binaryOps(
Bitand,
## Bitor,
## Bitxor,
## FloorDiv,
LeftShift,
Modulo,
Mul,
## Pow,
RightShift,
Sub,
Sum, Sum1, Sum2, Sum3,
EQ,
NE,
LT,
GT,
LE,
GE,
BoolAnd,
BoolOr,
left, right, aBit):
@instance
def logic():
while 1:
yield left, right, aBit
## Bitand.next = left & right
## Bitor.next = left | right
## Bitxor.next = left ^ right
## if right != 0:
## FloorDiv.next = left // right
# Keep left shifts smaller than 2** 31 for VHDL's to_integer
if left < 256 and right < 22 and right >= 0:
LeftShift.next = left << right
## if right != 0:
## Modulo.next = left % right
Mul.next = left * right
## # Icarus doesn't support ** yet
## #if left < 256 and right < 22:
## # Pow.next = left ** right
## Pow.next = 0
## if right >= -0:
## RightShift.next = left >> right
## RightShift.next = left
Sub.next = left - right
Sum.next = left + right
Sum1.next = left + right[2:]
Sum2.next = left + right[1]
Sum3.next = left + aBit
EQ.next = left == right
NE.next = left != right
LT.next = left < right
GT.next = left > right
LE.next = left <= right
GE.next = left >= right
BoolAnd.next = bool(left) and bool(right)
BoolOr.next = bool(left) or bool(right)
return logic
def binaryBench(Ll, Ml, Lr, Mr):
seqL = []
seqR = []
for i in range(NRTESTS):
seqL.append(randrange(Ll, Ml))
seqR.append(randrange(Lr, Mr))
for j, k in ((Ll, Lr), (Ml-1, Mr-1), (Ll, Mr-1), (Ml-1, Lr)):
seqL.append(j)
seqR.append(k)
seqL = tuple(seqL)
seqR = tuple(seqR)
aBit = Signal(bool(0))
left = Signal(intbv(Ll, min=Ll, max=Ml))
right = Signal(intbv(Lr, min=Lr, max=Mr))
M = 2**14
Bitand = Signal(intbv(0, min=-2**17, max=2**17))
## Bitand_v = Signal(intbv(0, min=-2**17, max=2**17))
## Bitor = Signal(intbv(0)[max(m, n):])
## Bitor_v = Signal(intbv(0)[max(m, n):])
## Bitxor = Signal(intbv(0)[max(m, n):])
## Bitxor_v = Signal(intbv(0)[max(m, n):])
## FloorDiv = Signal(intbv(0)[m:])
## FloorDiv_v = Signal(intbv(0)[m:])
LeftShift = Signal(intbv(0, min=-2**64, max=2**64))
Modulo = Signal(intbv(0)[M:])
Mul = Signal(intbv(0, min=-2**17, max=2**17))
## Pow = Signal(intbv(0)[64:])
RightShift = Signal(intbv(0, min=-M, max=M))
Sub, Sub1, Sub2, Sub3 = [Signal(intbv(min=-M, max=M)) for i in range(4)]
Sum, Sum1, Sum2, Sum3 = [Signal(intbv(min=-M, max=M)) for i in range(4)]
EQ, NE, LT, GT, LE, GE = [Signal(bool()) for i in range(6)]
BoolAnd, BoolOr = [Signal(bool()) for i in range(2)]
binops = binaryOps(
Bitand,
## Bitor,
## Bitxor,
## FloorDiv,
LeftShift,
Modulo,
Mul,
## Pow,
RightShift,
Sub,
Sum, Sum1, Sum2, Sum3,
EQ,
NE,
LT,
GT,
LE,
GE,
BoolAnd,
BoolOr,
left, right, aBit)
@instance
def stimulus():
for i in range(len(seqL)):
left.next = seqL[i]
right.next = seqR[i]
yield delay(10)
@instance
def check():
while 1:
yield left, right
aBit.next = not aBit
yield delay(1)
#print "%s %s %s %s" % (left, right, Mul, Mul_v)
#print "%s %s %s %s" % (left, right, bin(Mul), bin(Mul_v))
#print "%s %s %s %s" % (left, right, Sum, Sum_v)
#print "%s %s %s %s" % (left, right, bin(Sum), bin(Sum_v))
## print left
## print right
## print bin(left)
## print bin(right)
## print bin(Bitand)
## print bin(Bitand_v)
## print Bitand
## print Bitand_v
## self.assertEqual(Bitand, Bitand_v)
#w = len(Bitand)
#self.assertEqual(bin(Bitand, w), bin(Bitand_v,w ))
## self.assertEqual(Bitor, Bitor_v)
## self.assertEqual(Bitxor, Bitxor_v)
## ## self.assertEqual(FloorDiv, FloorDiv_v)
print(LeftShift)
# print Modulo
print(Mul)
# self.assertEqual(Pow, Pow_v)
print(RightShift)
print(Sub)
print(Sum)
print(Sum1)
print(Sum2)
print(Sum3)
print(int(EQ))
print(int(NE))
print(int(LT))
print(int(GT))
print(int(LE))
print(int(GE))
print(int(BoolAnd))
print(int(BoolOr))
return binops, stimulus, check
def checkBinaryOps( Ll, Ml, Lr, Mr):
assert verify(binaryBench, Ll, Ml, Lr, Mr ) == 0
def testBinaryOps():
for Ll, Ml, Lr, Mr in (
(-254, 236, 0, 4),
(-128, 128, -128, 128),
(-53, 25, -23, 123),
(-23, 145, -66, 12),
(23, 34, -34, -16),
(-54, -20, 45, 73),
(-25, -12, -123, -66),
):
yield checkBinaryOps, Ll, Ml, Lr, Mr
def unaryOps(
BoolNot,
Invert,
UnaryAdd,
UnarySub,
arg):
@instance
def logic():
while 1:
yield arg
# BoolNot.next = not arg
Invert.next = ~arg
# UnaryAdd.next = +arg
UnarySub.next = --arg
return logic
def unaryBench( m):
M = 2**m
seqM = tuple([i for i in range(-M, M)])
arg = Signal(intbv(0, min=-M, max=+M))
BoolNot = Signal(bool(0))
Invert = Signal(intbv(0, min=-M, max=+M))
UnaryAdd = Signal(intbv(0, min=-M, max=+M))
UnarySub = Signal(intbv(0, min=-M, max=+M))
unaryops = unaryOps(
BoolNot,
Invert,
UnaryAdd,
UnarySub,
arg)
@instance
def stimulus():
for i in range(len(seqM)):
arg.next = seqM[i]
yield delay(10)
raise StopSimulation
@instance
def check():
while 1:
yield arg
yield delay(1)
# print BoolNot
print(Invert)
# print UnaryAdd
print(UnarySub)
return unaryops, stimulus, check
def checkUnaryOps(m):
assert verify(unaryBench, m) == 0
def testUnaryOps():
for m in (4, 7):
yield checkUnaryOps, m
def augmOps(
## Bitand,
## Bitor,
## Bitxor,
## FloorDiv,
LeftShift,
## Modulo,
Mul,
RightShift,
Sub,
Sum,
left, right):
M = 2**17
N = 2**64
@instance
def logic():
var = intbv(0, min=-M, max=+M)
var2 = intbv(0, min=-N, max=+N)
while 1:
yield left, right
## var[:] = left
## var &= right
## Bitand.next = var
## var[:] = left
## var |= right
## Bitor.next = var
## var[:] = left
## var ^= left
## Bitxor.next = var
## if right != 0:
## var[:] = left
## var //= right
## FloorDiv.next = var
if left < 256 and right < 22 and right >= 0:
var2[:] = left
var2 <<= right
LeftShift.next = var2
## if right != 0:
## var[:] = left
## var %= right
## Modulo.next = var
var[:] = left
var *= right
Mul.next = var
var[:] = left
if right >= 0:
var >>= right
RightShift.next = var
var[:] = left
var -= right
Sub.next = var
var[:] = left
var += right
Sum.next = var
return logic
def augmBench( Ll, Ml, Lr, Mr):
M = 2**17
seqL = []
seqR = []
for i in range(NRTESTS):
seqL.append(randrange(Ll, Ml))
seqR.append(randrange(Lr, Mr))
for j, k in ((Ll, Lr), (Ml-1, Mr-1), (Ll, Mr-1), (Ml-1, Lr)):
seqL.append(j)
seqR.append(k)
seqL = tuple(seqL)
seqR = tuple(seqR)
left = Signal(intbv(Ll, min=Ll, max=Ml))
right = Signal(intbv(Lr, min=Lr, max=Mr))
## Bitand = Signal(intbv(0)[max(m, n):])
## Bitor = Signal(intbv(0)[max(m, n):])
## Bitxor = Signal(intbv(0)[max(m, n):])
## FloorDiv = Signal(intbv(0)[m:])
LeftShift = Signal(intbv(0, min=-2**64, max=2**64))
## Modulo = Signal(intbv(0)[m:])
Mul = Signal(intbv(0, min=-M, max=+M))
RightShift = Signal(intbv(0, min=-M, max=+M))
Sub = Signal(intbv(0, min=-M, max=+M))
Sum = Signal(intbv(0, min=-M, max=+M))
augmops = augmOps(
## Bitand,
## Bitor,
## Bitxor,
## FloorDiv,
LeftShift,
## Modulo,
Mul,
RightShift,
Sub,
Sum,
left, right)
@instance
def stimulus():
for i in range(len(seqL)):
left.next = seqL[i]
right.next = seqR[i]
yield delay(10)
@instance
def check():
while 1:
yield left, right
yield delay(1)
# print "%s %s %s %s" % (left, right, Or, Or_v)
## self.assertEqual(Bitand, Bitand_v)
## self.assertEqual(Bitor, Bitor_v)
## self.assertEqual(Bitxor, Bitxor_v)
## self.assertEqual(FloorDiv, FloorDiv_v)
print(LeftShift)
## self.assertEqual(Modulo, Modulo_v)
print(Mul)
print(RightShift)
print(Sub)
print(Sum)
return augmops, stimulus, check
def checkAugmOps( Ll, Ml, Lr, Mr):
assert verify(augmBench, Ll, Ml, Lr, Mr) == 0
def testAugmOps():
for Ll, Ml, Lr, Mr in (
(-254, 236, 0, 4),
(-128, 128, -128, 128),
(-53, 25, -23, 123),
(-23, 145, -66, 12),
(23, 34, -34, -16),
(-54, -20, 45, 73),
(-25, -12, -123, -66),
):
yield checkAugmOps, Ll, Ml, Lr, Mr
def expressions(a, b, clk):
c = Signal(intbv(0, min=0, max=47))
e = Signal(bool())
@instance
def logic():
d = intbv(0, min=-23, max=43)
d[:] = -17
c.next = 5
yield clk.posedge
a.next = c + 1
b.next = c + 1
yield clk.posedge
a.next = c + -10
b.next = c + -1
yield clk.posedge
a.next = c < -10
b.next = c < -1
yield clk.posedge
a.next = d + c
b.next = d >= c
yield clk.posedge
## a.next = d & c
## b.next = c + (d & c)
yield clk.posedge
a.next = d + -c
b.next = c + (-d)
yield clk.posedge
a.next = -d
yield clk.posedge
a.next = -c
yield clk.posedge
c.next = 46
yield clk.posedge
a.next = ~d + 1
b.next = ~c + 1
yield clk.posedge
a.next = ~c + 1
b.next = ~d + 1
yield clk.posedge
raise StopSimulation
return logic
def expressionsBench():
a = Signal(intbv(0, min=-34, max=47))
b = Signal(intbv(0, min=0, max=47))
clk = Signal(bool())
expr = expressions(a, b, clk)
@instance
def check():
while 1:
yield clk.posedge
yield delay(1)
print(int(a))
print(int(b))
@instance
def clkgen():
while True:
yield delay(10)
clk.next = not clk
return expr, check, clkgen
def testExpressions():
assert verify(expressionsBench) == 0
|
home-assistant/home-assistant
|
refs/heads/dev
|
tests/components/plugwise/test_binary_sensor.py
|
2
|
"""Tests for the Plugwise binary_sensor integration."""
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import STATE_OFF, STATE_ON
from tests.components.plugwise.common import async_init_integration
async def test_anna_climate_binary_sensor_entities(hass, mock_smile_anna):
"""Test creation of climate related binary_sensor entities."""
entry = await async_init_integration(hass, mock_smile_anna)
assert entry.state is ConfigEntryState.LOADED
state = hass.states.get("binary_sensor.auxiliary_slave_boiler_state")
assert str(state.state) == STATE_OFF
state = hass.states.get("binary_sensor.auxiliary_dhw_state")
assert str(state.state) == STATE_OFF
async def test_anna_climate_binary_sensor_change(hass, mock_smile_anna):
"""Test change of climate related binary_sensor entities."""
entry = await async_init_integration(hass, mock_smile_anna)
assert entry.state is ConfigEntryState.LOADED
hass.states.async_set("binary_sensor.auxiliary_dhw_state", STATE_ON, {})
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.auxiliary_dhw_state")
assert str(state.state) == STATE_ON
await hass.helpers.entity_component.async_update_entity(
"binary_sensor.auxiliary_dhw_state"
)
state = hass.states.get("binary_sensor.auxiliary_dhw_state")
assert str(state.state) == STATE_OFF
async def test_adam_climate_binary_sensor_change(hass, mock_smile_adam):
"""Test change of climate related binary_sensor entities."""
entry = await async_init_integration(hass, mock_smile_adam)
assert entry.state is ConfigEntryState.LOADED
state = hass.states.get("binary_sensor.adam_plugwise_notification")
assert str(state.state) == STATE_ON
assert "unreachable" in state.attributes.get("warning_msg")[0]
assert not state.attributes.get("error_msg")
assert not state.attributes.get("other_msg")
|
leeon/annotated-django
|
refs/heads/note
|
django/contrib/formtools/tests/wizard/storage.py
|
6
|
from datetime import datetime
from importlib import import_module
import os
import tempfile
from django.http import HttpRequest
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import SimpleUploadedFile
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
def get_request():
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class TestStorage(object):
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
def test_current_step(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
my_step = 2
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
self.assertEqual(storage.current_step, my_step)
storage.reset()
self.assertEqual(storage.current_step, None)
storage.current_step = my_step
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.current_step, None)
def test_step_data(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
step1 = 'start'
step_data1 = {'field1': 'data1',
'field2': 'data2',
'field3': datetime.now(),
'field4': self.testuser}
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
self.assertEqual(storage.get_step_data(step1), step_data1)
storage.reset()
self.assertEqual(storage.get_step_data(step1), None)
storage.set_step_data(step1, step_data1)
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.get_step_data(step1), None)
def test_extra_context(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
extra_context = {'key1': 'data1',
'key2': 'data2',
'key3': datetime.now(),
'key4': self.testuser}
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
self.assertEqual(storage.extra_data, extra_context)
storage.reset()
self.assertEqual(storage.extra_data, {})
storage.extra_data = extra_context
storage2 = self.get_storage()('wizard2', request, None)
self.assertEqual(storage2.extra_data, {})
def test_extra_context_key_persistence(self):
request = get_request()
storage = self.get_storage()('wizard1', request, None)
self.assertFalse('test' in storage.extra_data)
storage.extra_data['test'] = True
self.assertTrue('test' in storage.extra_data)
def test_reset_deletes_tmp_files(self):
request = get_request()
storage = self.get_storage()('wizard1', request, temp_storage)
step = 'start'
file_ = SimpleUploadedFile('file.txt', b'content')
storage.set_step_files(step, {'file': file_})
tmp_name = storage.get_step_files(step)['file'].name
self.assertTrue(storage.file_storage.exists(tmp_name))
storage.reset()
self.assertFalse(storage.file_storage.exists(tmp_name))
|
smilezino/shadowsocks
|
refs/heads/master
|
tests/graceful_cli.py
|
977
|
#!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
libvirt/autotest
|
refs/heads/master
|
client/bin/net/net_tc_unittest.py
|
1
|
#!/usr/bin/python
# TODO(chavey) complete all the unit test in this file
import unittest, os, socket, time, sys
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.bin import utils
from autotest_lib.client.bin.net import net_tc, net_utils, net_utils_mock
from autotest_lib.client.common_lib.test_utils import mock
from autotest_lib.client.common_lib import error
class TestNetUtils(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
self.god.stub_function(utils, "system")
self.god.stub_function(utils, "system_output")
os.environ['AUTODIR'] = "autodir"
def tearDown(self):
self.god.unstub_all()
del os.environ['AUTODIR']
#
# test tcclass
#
def test_tcclass_get_leaf_qdisc(self):
pass
def test_tcclass_get_parent_class(self):
pass
def test_tcclass_set_parent_class(self):
pass
def test_tcclass_get_minor(self):
pass
def test_tcclass_id(self):
pass
def test_tcclass_add_child(self):
pass
def test_tcclass_setup(self):
pass
def test_tcclass_restore(self):
pass
#
# test tcfilter
#
def test_tcfilter_get_parent_qdisc(self):
pass
def test_tcfilter_set_parent_qdisc(self):
pass
def test_tcfilter_get_dest_qdisc(self):
pass
def test_tcfilter_set_dest_qdisc(self):
pass
def test_tcfilter_get_protocol(self):
pass
def test_tcfilter_set_protocol(self):
pass
def test_tcfilter_get_priority(self):
pass
def test_tcfilter_set_priority(self):
pass
def test_tcfilter_get_handle(self):
pass
def test_tcfilter_set_handle(self):
pass
def test_tcfilter_tc_cmd(self):
pass
def test_tcfilter_setup(self):
pass
def test_tcfilter_restore(self):
pass
#
# test u32filter
#
def test_u32filter_add_rule(self):
pass
def test_u32filter_setup(self):
pass
def test_u32filter_restore(self):
pass
#
# test qdisc
#
def test_qdisc_add_class(self):
pass
def test_qdisc_add_filter(self):
pass
def test_qdisc_setup(self):
pass
def test_qdisc_restore(self):
pass
#
# test prio
#
def test_prio_setup(self):
pass
def test_prio_get_class(self):
pass
#
# test pfifo
#
def test_pfifo_setup(self):
pass
#
# test netem
#
def test_netem_add_param(self):
pass
def test_netem_setup(self):
pass
if __name__ == "__main__":
unittest.main()
|
6112/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/wptserve/tests/functional/test_server.py
|
299
|
import os
import unittest
import urllib2
import json
import wptserve
from base import TestUsingServer, doc_root
class TestFileHandler(TestUsingServer):
def test_not_handled(self):
with self.assertRaises(urllib2.HTTPError) as cm:
resp = self.request("/not_existing")
self.assertEquals(cm.exception.code, 404)
class TestRewriter(TestUsingServer):
def test_rewrite(self):
@wptserve.handlers.handler
def handler(request, response):
return request.request_path
route = ("GET", "/test/rewritten", handler)
self.server.rewriter.register("GET", "/test/original", route[1])
self.server.router.register(*route)
resp = self.request("/test/original")
self.assertEquals(200, resp.getcode())
self.assertEquals("/test/rewritten", resp.read())
class TestRequestHandler(TestUsingServer):
def test_exception(self):
@wptserve.handlers.handler
def handler(request, response):
raise Exception
route = ("GET", "/test/raises", handler)
self.server.router.register(*route)
with self.assertRaises(urllib2.HTTPError) as cm:
resp = self.request("/test/raises")
self.assertEquals(cm.exception.code, 500)
if __name__ == "__main__":
unittest.main()
|
eicher31/compassion-switzerland
|
refs/heads/10.0
|
sbc_switzerland/reports/translation_yearly_report.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models
class TranslationYearlyReport(models.Model):
_inherit = "translation.daily.report" # pylint: disable=R7980
_name = "translation.yearly.report"
_table = "translation_yearly_report"
_description = "Yearly translations report"
def _date_format(self):
"""
Used to aggregate data in various formats (in subclasses) "
:return: (date_trunc value, date format)
"""""
return 'year', 'YYYY'
|
neuroidss/nupic
|
refs/heads/master
|
src/nupic/support/decorators.py
|
28
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import functools
import logging
import sys
import time
import traceback
# TODO Need unit tests
def logExceptions(logger=None):
""" Returns a closure suitable for use as function/method decorator for
logging exceptions that leave the scope of the decorated function. Exceptions
are logged at ERROR level.
logger: user-supplied logger instance. Defaults to logging.getLogger.
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
@logExceptions()
def myFunctionFoo():
...
raise RuntimeError("something bad happened")
...
"""
logger = (logger if logger is not None else logging.getLogger(__name__))
def exceptionLoggingDecorator(func):
@functools.wraps(func)
def exceptionLoggingWrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
logger.exception(
"Unhandled exception %r from %r. Caller stack:\n%s",
sys.exc_info()[1], func, ''.join(traceback.format_stack()), )
raise
return exceptionLoggingWrap
return exceptionLoggingDecorator
def logEntryExit(getLoggerCallback=logging.getLogger,
entryExitLogLevel=logging.DEBUG, logArgs=False,
logTraceback=False):
""" Returns a closure suitable for use as function/method decorator for
logging entry/exit of function/method.
getLoggerCallback: user-supplied callback function that takes no args and
returns the logger instance to use for logging.
entryExitLogLevel: Log level for logging entry/exit of decorated function;
e.g., logging.DEBUG; pass None to disable entry/exit
logging.
logArgs: If True, also log args
logTraceback: If True, also log Traceback information
Usage Examples:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
@logEntryExit()
def myFunctionBar():
...
@logEntryExit(logTraceback=True)
@logExceptions()
def myFunctionGamma():
...
raise RuntimeError("something bad happened")
...
"""
def entryExitLoggingDecorator(func):
@functools.wraps(func)
def entryExitLoggingWrap(*args, **kwargs):
if entryExitLogLevel is None:
enabled = False
else:
logger = getLoggerCallback()
enabled = logger.isEnabledFor(entryExitLogLevel)
if not enabled:
return func(*args, **kwargs)
funcName = str(func)
if logArgs:
argsRepr = ', '.join(
[repr(a) for a in args] +
['%s=%r' % (k,v,) for k,v in kwargs.iteritems()])
else:
argsRepr = ''
logger.log(
entryExitLogLevel, "ENTERING: %s(%s)%s", funcName, argsRepr,
'' if not logTraceback else '; ' + repr(traceback.format_stack()))
try:
return func(*args, **kwargs)
finally:
logger.log(
entryExitLogLevel, "LEAVING: %s(%s)%s", funcName, argsRepr,
'' if not logTraceback else '; ' + repr(traceback.format_stack()))
return entryExitLoggingWrap
return entryExitLoggingDecorator
def retry(timeoutSec, initialRetryDelaySec, maxRetryDelaySec,
retryExceptions=(Exception,),
retryFilter=lambda e, args, kwargs: True,
logger=None, clientLabel=""):
""" Returns a closure suitable for use as function/method decorator for
retrying a function being decorated.
timeoutSec: How many seconds from time of initial call to stop
retrying (floating point); 0 = no retries
initialRetryDelaySec: Number of seconds to wait for first retry.
Subsequent retries will occur at geometrically
doubling intervals up to a maximum interval of
maxRetryDelaySec (floating point)
maxRetryDelaySec: Maximum amount of seconds to wait between retries
(floating point)
retryExceptions: A tuple (must be a tuple) of exception classes that,
including their subclasses, should trigger retries;
Default: any Exception-based exception will trigger
retries
retryFilter: Optional filter function used to further filter the
exceptions in the retryExceptions tuple; called if the
current exception meets the retryExceptions criteria:
takes the current exception instance, args, and kwargs
that were passed to the decorated function, and returns
True to retry, False to allow the exception to be
re-raised without retrying. Default: permits any
exception that matches retryExceptions to be retried.
logger: User-supplied logger instance to use for logging.
None=defaults to logging.getLogger(__name__).
Usage Example:
NOTE: logging must be initialized *before* any loggers are created, else
there will be no output; see nupic.support.initLogging()
_retry = retry(timeoutSec=300, initialRetryDelaySec=0.2,
maxRetryDelaySec=10, retryExceptions=[socket.error])
@_retry
def myFunctionFoo():
...
raise RuntimeError("something bad happened")
...
"""
assert initialRetryDelaySec > 0, str(initialRetryDelaySec)
assert timeoutSec >= 0, str(timeoutSec)
assert maxRetryDelaySec >= initialRetryDelaySec, \
"%r < %r" % (maxRetryDelaySec, initialRetryDelaySec)
assert isinstance(retryExceptions, tuple), (
"retryExceptions must be tuple, but got %r") % (type(retryExceptions),)
if logger is None:
logger = logging.getLogger(__name__)
def retryDecorator(func):
@functools.wraps(func)
def retryWrap(*args, **kwargs):
numAttempts = 0
delaySec = initialRetryDelaySec
startTime = time.time()
# Make sure it gets called at least once
while True:
numAttempts += 1
try:
result = func(*args, **kwargs)
except retryExceptions, e:
if not retryFilter(e, args, kwargs):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
'[%s] Failure in %r; retries aborted by custom retryFilter. '
'Caller stack:\n%s', clientLabel, func,
''.join(traceback.format_stack()), exc_info=True)
raise
now = time.time()
# Compensate for negative time adjustment so we don't get stuck
# waiting way too long (python doesn't provide monotonic time yet)
if now < startTime:
startTime = now
if (now - startTime) >= timeoutSec:
logger.exception(
'[%s] Exhausted retry timeout (%s sec.; %s attempts) for %r. '
'Caller stack:\n%s', clientLabel, timeoutSec, numAttempts, func,
''.join(traceback.format_stack()))
raise
if numAttempts == 1:
logger.warning(
'[%s] First failure in %r; initial retry in %s sec.; '
'timeoutSec=%s. Caller stack:\n%s', clientLabel, func, delaySec,
timeoutSec, ''.join(traceback.format_stack()), exc_info=True)
else:
logger.debug(
'[%s] %r failed %s times; retrying in %s sec.; timeoutSec=%s. '
'Caller stack:\n%s',
clientLabel, func, numAttempts, delaySec, timeoutSec,
''.join(traceback.format_stack()), exc_info=True)
time.sleep(delaySec)
delaySec = min(delaySec*2, maxRetryDelaySec)
else:
if numAttempts > 1:
logger.info('[%s] %r succeeded on attempt # %d',
clientLabel, func, numAttempts)
return result
return retryWrap
return retryDecorator
|
Agana/MyBlogAgain
|
refs/heads/master
|
django/contrib/auth/models.py
|
95
|
import datetime
import urllib
from django.contrib import auth
from django.contrib.auth.signals import user_logged_in
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.hashcompat import md5_constructor, sha_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils.crypto import constant_time_compare
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = datetime.datetime.now()
user.save()
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('codename',)
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"""
Creates and saves a User with the given username, e-mail and password.
"""
now = datetime.datetime.now()
# Normalize the address by lowercasing the domain part of the email
# address.
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = self.model(username=username, email=email, is_staff=False,
is_active=True, is_superuser=False, last_login=now,
date_joined=now)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
anon = user.is_anonymous()
for backend in auth.get_backends():
if not anon or backend.supports_anonymous_user:
if hasattr(backend, "get_all_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_all_permissions(user, obj)
)
else:
permissions.update(backend.get_all_permissions(user))
return permissions
def _user_has_perm(user, perm, obj):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_perm"):
if obj is not None:
if (backend.supports_object_permissions and
backend.has_perm(user, perm, obj)):
return True
else:
if backend.has_perm(user, perm):
return True
return False
def _user_has_module_perms(user, app_label):
anon = user.is_anonymous()
active = user.is_active
for backend in auth.get_backends():
if (not active and not anon and backend.supports_inactive_user) or \
(not anon or backend.supports_anonymous_user):
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class User(models.Model):
"""
Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, help_text=_("Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters"))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
if raw_password is None:
self.set_unusable_password()
else:
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
if self.password is None \
or self.password == UNUSABLE_PASSWORD:
return False
else:
return True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
If an object is passed in, only permissions matching this object
are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
if obj is not None:
if backend.supports_object_permissions:
permissions.update(
backend.get_group_permissions(self, obj)
)
else:
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object
is provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions.
If object is passed, it checks if the user has all required perms
for this object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
def _get_message_set(self):
import warnings
warnings.warn('The user messaging API is deprecated. Please update'
' your code to use the new messages framework.',
category=DeprecationWarning)
return self._message_set
message_set = property(_get_message_set)
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User, related_name='_message_set')
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
bensk/CS11
|
refs/heads/gh-pages
|
Code Examples/Classes.py
|
3
|
class Pet(object):
"""Represents a pet"""
my_pet_1 = Pet()
my_pet_1.type = 'direwolf'
my_pet_1.noise = 'howl'
my_pet_1.full_name = 'Ghost'
my_pet_2 = Pet()
my_pet_2.type = 'direwolf'
my_pet_2.noise = 'howl'
my_pet_2.full_name = 'Nymeria'
my_pet_3 = Pet()
my_pet_3.type = 'direwolf'
my_pet_3.noise = 'howl'
my_pet_3.full_name = 'Summer'
my_pets = [my_pet_1, my_pet_2, my_pet_3]
print type(my_pets)
print type(my_pet_1)
print type('Summer')
for pet in my_pets:
print pet.full_name
|
Batterfii/zulip
|
refs/heads/master
|
zerver/management/commands/check_apns_tokens.py
|
125
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.lib.push_notifications import check_apns_feedback
class Command(BaseCommand):
help = """Checks the Apple Push Notifications Service for any tokens that have been
invalidated, and removes them from the database.
Usage: ./manage.py check_apns_tokens"""
def handle(self, *args, **options):
check_apns_feedback()
|
SerCeMan/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/updateUsage_after.py
|
79
|
__author__ = 'ktisha'
def foo():
print("Hello Pycharm!")
class A:
pass
foo()
|
dlazz/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/edgeswitch/edgeswitch_interface.py
|
52
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
class InterfaceConfiguration:
def __init__(self):
self.commands = []
self.merged = False
def has_same_commands(self, interface):
len1 = len(self.commands)
len2 = len(interface.commands)
return len1 == len2 and len1 == len(frozenset(self.commands).intersection(interface.commands))
def merge_interfaces(interfaces):
""" to reduce commands generated by an edgeswitch module
we take interfaces one by one and we try to merge them with neighbors if everyone has same commands to run
"""
merged = {}
for i, interface in interfaces.items():
if interface.merged:
continue
interface.merged = True
match = re.match(r'(\d+)\/(\d+)', i)
group = int(match.group(1))
start = int(match.group(2))
end = start
while True:
try:
start = start - 1
key = '{0}/{1}'.format(group, start)
neighbor = interfaces[key]
if not neighbor.merged and interface.has_same_commands(neighbor):
neighbor.merged = True
else:
break
except KeyError:
break
start = start + 1
while True:
try:
end = end + 1
key = '{0}/{1}'.format(group, end)
neighbor = interfaces[key]
if not neighbor.merged and interface.has_same_commands(neighbor):
neighbor.merged = True
else:
break
except KeyError:
break
end = end - 1
if end == start:
key = '{0}/{1}'.format(group, start)
else:
key = '{0}/{1}-{2}/{3}'.format(group, start, group, end)
merged[key] = interface
return merged
|
joelpinheiro/safebox-smartcard-auth
|
refs/heads/master
|
Server/veserver/lib/python2.7/site-packages/setuptools/tests/test_sdist.py
|
332
|
# -*- coding: utf-8 -*-
"""sdist tests"""
import locale
import os
import shutil
import sys
import tempfile
import unittest
import unicodedata
import re
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
from setuptools.compat import StringIO, unicode
from setuptools.tests.py26compat import skipIf
from setuptools.command.sdist import sdist, walk_revctrl
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools import svn_utils
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals for Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert to POSIX path
def posix(path):
if sys.version_info >= (3,) and not isinstance(path, str):
return path.replace(os.sep.encode('ascii'), b('/'))
else:
return path.replace(os.sep, '/')
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u_contents = contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(posix(filename) in u_contents)
else:
self.assertTrue(posix(filename) in contents)
# Python 3 only
if sys.version_info >= (3,):
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
# Add filename and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
self.assertTrue(posix(filename) in contents)
# The filelist should have been updated as well
self.assertTrue(u_filename in mm.filelist.files)
def test_write_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The Latin-1 filename should have been skipped
self.assertFalse(posix(filename) in contents)
# The filelist should have been updated as well
self.assertFalse(u_filename in mm.filelist.files)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add UTF-8 filename to manifest
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
cmd.read_manifest()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.version_info >= (3,):
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
# Python 3 only
if sys.version_info >= (3,):
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add Latin-1 filename to manifest
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
try:
cmd.read_manifest()
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
finally:
unquiet()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
@skipIf(sys.version_info >= (3,) and locale.getpreferredencoding() != 'UTF-8',
'Unittest fails if locale is not utf-8 but the manifests is recorded correctly')
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.platform == 'darwin':
filename = decompose(filename)
if sys.version_info >= (3,):
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python 3 mangles the UTF-8 filename
filename = filename.decode('cp1252')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('mbcs')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
else:
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
self.assertTrue(os.path.isfile(filename))
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.version_info >= (3,):
#not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc == 'mbcs':
filename = filename.decode('mbcs')
else:
filename = filename.decode('latin-1')
self.assertTrue(filename in cmd.filelist.files)
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
else:
# No conversion takes place under Python 2 and the file
# is included. We shall keep it that way for BBB.
self.assertTrue(filename in cmd.filelist.files)
class TestDummyOutput(environment.ZippedEnvironment):
def setUp(self):
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', "dummy.zip")
self.dataname = "dummy"
super(TestDummyOutput, self).setUp()
def _run(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=0)
if code:
info = "DIR: " + os.path.abspath('.')
info += "\n SDIST RETURNED: %i\n\n" % code
info += data
raise AssertionError(info)
datalines = data.splitlines()
possible = (
"running sdist",
"running egg_info",
"creating dummy\.egg-info",
"writing dummy\.egg-info",
"writing top-level names to dummy\.egg-info",
"writing dependency_links to dummy\.egg-info",
"writing manifest file 'dummy\.egg-info",
"reading manifest file 'dummy\.egg-info",
"reading manifest template 'MANIFEST\.in'",
"writing manifest file 'dummy\.egg-info",
"creating dummy-0.1.1",
"making hard links in dummy-0\.1\.1",
"copying files to dummy-0\.1\.1",
"copying \S+ -> dummy-0\.1\.1",
"copying dummy",
"copying dummy\.egg-info",
"hard linking \S+ -> dummy-0\.1\.1",
"hard linking dummy",
"hard linking dummy\.egg-info",
"Writing dummy-0\.1\.1",
"creating dist",
"creating 'dist",
"Creating tar archive",
"running check",
"adding 'dummy-0\.1\.1",
"tar .+ dist/dummy-0\.1\.1\.tar dummy-0\.1\.1",
"gzip .+ dist/dummy-0\.1\.1\.tar",
"removing 'dummy-0\.1\.1' \\(and everything under it\\)",
)
print(" DIR: " + os.path.abspath('.'))
for line in datalines:
found = False
for pattern in possible:
if re.match(pattern, line):
print(" READ: " + line)
found = True
break
if not found:
raise AssertionError("Unexpexected: %s\n-in-\n%s"
% (line, data))
return data
def test_sources(self):
self._run()
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_walksvn(self):
if self.base_version >= (1, 6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
#TODO is this right
expected = set([
os.path.join('a file'),
os.path.join(folder2, 'Changes.txt'),
os.path.join(folder2, 'MD5SUMS'),
os.path.join(folder2, 'README.txt'),
os.path.join(folder3, 'Changes.txt'),
os.path.join(folder3, 'MD5SUMS'),
os.path.join(folder3, 'README.txt'),
os.path.join(folder3, 'TODO.txt'),
os.path.join(folder3, 'fin'),
os.path.join('third_party', 'README.txt'),
os.path.join('folder', folder2, 'Changes.txt'),
os.path.join('folder', folder2, 'MD5SUMS'),
os.path.join('folder', folder2, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'Changes.txt'),
os.path.join('folder', folder3, 'fin'),
os.path.join('folder', folder3, 'MD5SUMS'),
os.path.join('folder', folder3, 'oops'),
os.path.join('folder', folder3, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'ZuMachen.txt'),
os.path.join('folder', 'third_party', 'WatashiNiYomimasu.txt'),
os.path.join('folder', 'lalala.txt'),
os.path.join('folder', 'quest.txt'),
# The example will have a deleted file
# (or should) but shouldn't return it
])
self.assertEqual(set(x for x in walk_revctrl()), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
OndrejIT/pyload
|
refs/heads/stable
|
module/plugins/hoster/DepositfilesCom.py
|
6
|
# -*- coding: utf-8 -*-
import re
import urllib
from ..captcha.SolveMedia import SolveMedia
from ..internal.SimpleHoster import SimpleHoster
class DepositfilesCom(SimpleHoster):
__name__ = "DepositfilesCom"
__type__ = "hoster"
__version__ = "0.63"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(depositfiles\.com|dfiles\.(eu|ru))(/\w{1,3})?/files/(?P<ID>\w+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Depositfiles.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
NAME_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\''
SIZE_PATTERN = r'>File size: <b>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</b>'
OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>'
TEMP_OFFLINE_PATTERN = r'^unmatchable$'
NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))),
(r'.*<b title="(?P<N>.+?)".*', "\g<N>")]
URL_REPLACEMENTS = [(__pattern__ + ".*", "https://depositfiles.com/files/\g<ID>")]
COOKIES = [("depositfiles.com", "lang_current", "en")]
WAIT_PATTERN = r'(?:download_waiter_remain">|html_download_api-limit_interval">|>Please wait|>Try in).+'
ERROR_PATTER = r'File is checked, please try again in a minute'
LINK_FREE_PATTERN = r'<form id="downloader_file_form" action="(https?://.+?)" method="post"'
LINK_PREMIUM_PATTERN = r'class="repeat"><a href="(.+?)"'
LINK_MIRROR_PATTERN = r'class="repeat_mirror"><a href="(.+?)"'
def handle_free(self, pyfile):
self.data = self.load(pyfile.url, post={'gateway_result': "1"})
self.check_errors()
m = re.search(r"var fid = '(\w+)';", self.data)
if m is None:
self.retry(wait=5)
params = {'fid': m.group(1)}
self.log_debug("FID: %s" % params['fid'])
self.data = self.load("https://depositfiles.com/get_file.php", get=params)
m = re.search(r'ACPuzzleKey = \'(.*?)\'', self.data)
if m is not None:
self.captcha = SolveMedia(pyfile)
captcha_key = m.group(1)
params['acpuzzle'] = 1
params['response'], params['challenge'] = self.captcha.challenge(captcha_key)
else:
self.log_error(_("Captcha pattern not found"))
self.fail(_("Captcha pattern not found"))
self.data = self.load("https://depositfiles.com/get_file.php", get=params)
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
self.link = urllib.unquote(m.group(1))
def handle_premium(self, pyfile):
if '<span class="html_download_api-gold_traffic_limit">' in self.data:
self.log_warning(_("Download limit reached"))
self.retry(25, 60 * 60, "Download limit reached")
elif 'onClick="show_gold_offer' in self.data:
self.account.relogin()
self.retry()
else:
link = re.search(self.LINK_PREMIUM_PATTERN, self.data)
mirror = re.search(self.LINK_MIRROR_PATTERN, self.data)
if link:
self.link = link.group(1)
elif mirror:
self.link = mirror.group(1)
|
bobsilverberg/oneanddone-sugardough
|
refs/heads/master
|
oneanddone/tasks/serializers.py
|
4
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from rest_framework import serializers
from oneanddone.tasks.models import Task, TaskKeyword, TaskAttempt
class TaskAttemptSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(many=False, slug_field='email')
class Meta:
model = TaskAttempt
fields = ('user', 'state')
class TaskKeywordSerializer(serializers.ModelSerializer):
class Meta:
model = TaskKeyword
fields = ('name',)
class TaskSerializer(serializers.ModelSerializer):
taskattempt_set = TaskAttemptSerializer(required=False, many=True)
keyword_set = TaskKeywordSerializer(required=False, many=True)
project = serializers.SlugRelatedField(many=False, slug_field='name')
team = serializers.SlugRelatedField(many=False, slug_field='name')
type = serializers.SlugRelatedField(many=False, slug_field='name')
owner = serializers.SlugRelatedField(many=False, slug_field='email')
class Meta:
model = Task
fields = ('id', 'name', 'short_description', 'instructions', 'owner',
'prerequisites', 'execution_time', 'start_date', 'end_date',
'is_draft', 'is_invalid', 'project', 'team', 'type', 'repeatable',
'difficulty', 'why_this_matters', 'keyword_set', 'taskattempt_set')
|
a25kk/ssm-buildout
|
refs/heads/master
|
src/ssm.sitecontent/ssm/sitecontent/interfaces.py
|
1
|
# -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from plone.theme.interfaces import IDefaultPloneLayer
class ISsmSitecontentLayer(IDefaultPloneLayer):
"""Marker interface that defines a Zope 3 browser layer."""
|
Maspear/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_chart.py
|
271
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_chart(osv.osv_memory):
"""
For Chart of Accounts
"""
_name = "account.chart"
_description = "Account chart"
_columns = {
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
help='Keep empty for all open fiscal years'),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_fiscalyear(self, cr, uid, context=None):
"""Return default Fiscalyear value"""
return self.pool.get('account.fiscalyear').find(cr, uid, context=context)
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
if fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC, p.special DESC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
else:
res['value'] = {'period_from': False, 'period_to': False}
return res
def account_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
fiscalyear_id = data.get('fiscalyear', False) and data['fiscalyear'][0] or False
result['periods'] = []
if data['period_from'] and data['period_to']:
period_from = data.get('period_from', False) and data['period_from'][0] or False
period_to = data.get('period_to', False) and data['period_to'][0] or False
result['periods'] = period_obj.build_ctx_periods(cr, uid, period_from, period_to)
result['context'] = str({'fiscalyear': fiscalyear_id, 'periods': result['periods'], \
'state': data['target_move']})
if fiscalyear_id:
result['name'] += ':' + fy_obj.read(cr, uid, [fiscalyear_id], context=context)[0]['code']
return result
_defaults = {
'target_move': 'posted',
'fiscalyear': _get_fiscalyear,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tvtsoft/odoo8
|
refs/heads/master
|
addons/l10n_multilang/__openerp__.py
|
2
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Multi Language Chart of Accounts',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
* Multi language support for Chart of Accounts, Taxes, Tax Codes, Journals,
Accounting Templates, Analytic Chart of Accounts and Analytic Journals.
* Setup wizard changes
- Copy translations for COA, Tax, Tax Code and Fiscal Position from
templates to target objects.
""",
'website': 'http://www.openerp.com',
'depends' : ['account'],
'data': [],
'demo': [],
'installable': True,
'auto_install': False,
}
|
gautam1858/tensorflow
|
refs/heads/master
|
tensorflow/contrib/eager/python/examples/spinn/spinn_test.py
|
20
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gc
import glob
import os
import shutil
import tempfile
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# pylint: disable=g-bad-import-order
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.spinn import data
from third_party.examples.eager.spinn import spinn
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.checkpointable import util as checkpointable_utils
# pylint: enable=g-bad-import-order
def _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size):
"""Generate a fake batch of SNLI data for testing."""
with tf.device("cpu:0"):
labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64)
prem = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
prem_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
hypo = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
hypo_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
if tfe.num_gpus():
labels = labels.gpu()
prem = prem.gpu()
prem_trans = prem_trans.gpu()
hypo = hypo.gpu()
hypo_trans = hypo_trans.gpu()
return labels, prem, prem_trans, hypo, hypo_trans
def _test_spinn_config(d_embed, d_out, logdir=None, inference_sentences=None):
"""Generate a config tuple for testing.
Args:
d_embed: Embedding dimensions.
d_out: Model output dimensions.
logdir: Optional logdir.
inference_sentences: A 2-tuple of strings representing the sentences (with
binary parsing result), e.g.,
("( ( The dog ) ( ( is running ) . ) )", "( ( The dog ) ( moves . ) )").
Returns:
A config tuple.
"""
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict",
"embed_dropout", "mlp_dropout", "n_mlp_layers", "d_mlp",
"d_out", "projection", "lr", "batch_size", "epochs",
"force_cpu", "logdir", "log_every", "dev_every", "save_every",
"lr_decay_every", "lr_decay_by", "inference_premise",
"inference_hypothesis"])
inference_premise = inference_sentences[0] if inference_sentences else None
inference_hypothesis = inference_sentences[1] if inference_sentences else None
return config_tuple(
d_hidden=d_embed,
d_proj=d_embed * 2,
d_tracker=8,
predict=False,
embed_dropout=0.1,
mlp_dropout=0.1,
n_mlp_layers=2,
d_mlp=32,
d_out=d_out,
projection=True,
lr=2e-2,
batch_size=2,
epochs=20,
force_cpu=False,
logdir=logdir,
log_every=1,
dev_every=2,
save_every=2,
lr_decay_every=1,
lr_decay_by=0.75,
inference_premise=inference_premise,
inference_hypothesis=inference_hypothesis)
class SpinnTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SpinnTest, self).setUp()
self._test_device = "gpu:0" if tfe.num_gpus() else "cpu:0"
self._temp_data_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(SpinnTest, self).tearDown()
def testBundle(self):
with tf.device(self._test_device):
lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32),
np.array([[0, -1], [-2, -3]], dtype=np.float32),
np.array([[0, 2], [4, 6]], dtype=np.float32),
np.array([[0, -2], [-4, -6]], dtype=np.float32)]
out = spinn._bundle(lstm_iter)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T,
out[0].numpy())
self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T,
out[1].numpy())
def testUnbunbdle(self):
with tf.device(self._test_device):
state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32),
np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)]
out = spinn._unbundle(state)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]),
out[0].numpy())
self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]),
out[1].numpy())
def testReducer(self):
with tf.device(self._test_device):
batch_size = 3
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
left_in = []
right_in = []
tracking = []
for _ in range(batch_size):
left_in.append(tf.random_normal((1, size * 2)))
right_in.append(tf.random_normal((1, size * 2)))
tracking.append(tf.random_normal((1, tracker_size * 2)))
out = reducer(left_in, right_in, tracking=tracking)
self.assertEqual(batch_size, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual((1, size * 2), out[0].shape)
def testReduceTreeLSTM(self):
with tf.device(self._test_device):
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]],
dtype=np.float32)
c1 = np.array([[0, 1], [2, 3]], dtype=np.float32)
c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32)
h, c = reducer._tree_lstm(c1, c2, lstm_in)
self.assertEqual(tf.float32, h.dtype)
self.assertEqual(tf.float32, c.dtype)
self.assertEqual((2, 2), h.shape)
self.assertEqual((2, 2), c.shape)
def testTracker(self):
with tf.device(self._test_device):
batch_size = 2
size = 10
tracker_size = 8
buffer_length = 18
stack_size = 3
tracker = spinn.Tracker(tracker_size, False)
tracker.reset_state()
# Create dummy inputs for testing.
bufs = []
buf = []
for _ in range(buffer_length):
buf.append(tf.random_normal((batch_size, size * 2)))
bufs.append(buf)
self.assertEqual(1, len(bufs))
self.assertEqual(buffer_length, len(bufs[0]))
self.assertEqual((batch_size, size * 2), bufs[0][0].shape)
stacks = []
stack = []
for _ in range(stack_size):
stack.append(tf.random_normal((batch_size, size * 2)))
stacks.append(stack)
self.assertEqual(1, len(stacks))
self.assertEqual(3, len(stacks[0]))
self.assertEqual((batch_size, size * 2), stacks[0][0].shape)
for _ in range(2):
out1, out2 = tracker(bufs, stacks)
self.assertIsNone(out2)
self.assertEqual(batch_size, len(out1))
self.assertEqual(tf.float32, out1[0].dtype)
self.assertEqual((1, tracker_size * 2), out1[0].shape)
self.assertEqual(tf.float32, tracker.state.c.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.c.shape)
self.assertEqual(tf.float32, tracker.state.h.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.h.shape)
def testSPINN(self):
with tf.device(self._test_device):
embedding_dims = 10
d_tracker = 8
sequence_length = 15
num_transitions = 27
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict"])
config = config_tuple(
embedding_dims, embedding_dims * 2, d_tracker, False)
s = spinn.SPINN(config)
# Create some fake data.
buffers = tf.random_normal((sequence_length, 1, config.d_proj))
transitions = tf.constant(
[[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3],
[2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2],
[3], [2], [2]], dtype=tf.int64)
self.assertEqual(tf.int64, transitions.dtype)
self.assertEqual((num_transitions, 1), transitions.shape)
out = s(buffers, transitions, training=True)
self.assertEqual(tf.float32, out.dtype)
self.assertEqual((1, embedding_dims), out.shape)
def testSNLIClassifierAndTrainer(self):
with tf.device(self._test_device):
vocab_size = 40
batch_size = 2
d_embed = 10
sequence_length = 15
d_out = 4
config = _test_spinn_config(d_embed, d_out)
# Create fake embedding matrix.
embed = tf.random_normal((vocab_size, d_embed))
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
# Invoke model under non-training mode.
logits = model(prem, prem_trans, hypo, hypo_trans, training=False)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Invoke model under training model.
logits = model(prem, prem_trans, hypo, hypo_trans, training=True)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Calculate loss.
loss1 = trainer.loss(labels, logits)
self.assertEqual(tf.float32, loss1.dtype)
self.assertEqual((), loss1.shape)
loss2, logits = trainer.train_batch(
labels, prem, prem_trans, hypo, hypo_trans)
self.assertEqual(tf.float32, loss2.dtype)
self.assertEqual((), loss2.shape)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Training on the batch should have led to a change in the loss value.
self.assertNotEqual(loss1.numpy(), loss2.numpy())
def _create_test_data(self, snli_1_0_dir):
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
# Four sentences in total.
with open(fake_train_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
return fake_train_file
def testInferSpinnWorks(self):
"""Test inference with the spinn model."""
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"),
inference_sentences=("( foo ( bar . ) )", "( bar ( foo . ) )"))
logits = spinn.train_or_infer_spinn(
embed, word2index, None, None, None, config)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((3,), logits.shape)
def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self):
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"),
inference_sentences=("( foo ( bar . ) )", None))
with self.assertRaises(ValueError):
spinn.train_or_infer_spinn(embed, word2index, None, None, None, config)
def testTrainSpinn(self):
"""Test with fake toy SNLI data and GloVe vectors."""
# 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = self._create_test_data(snli_1_0_dir)
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
dev_data = data.SnliData(fake_train_file, word2index)
test_data = data.SnliData(fake_train_file, word2index)
# 2. Create a fake config.
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"))
# 3. Test training of a SPINN model.
trainer = spinn.train_or_infer_spinn(
embed, word2index, train_data, dev_data, test_data, config)
# 4. Load train loss values from the summary files and verify that they
# decrease with training.
summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
events = summary_test_util.events_from_file(summary_file)
train_losses = [event.summary.value[0].simple_value for event in events
if event.summary.value
and event.summary.value[0].tag == "train/loss"]
self.assertEqual(config.epochs, len(train_losses))
# 5. Verify that checkpoints exist and contains all the expected variables.
self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*")))
object_graph = checkpointable_utils.object_metadata(
checkpoint_management.latest_checkpoint(config.logdir))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
self.assertIn("global_step", ckpt_variable_names)
for v in trainer.variables:
variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name
self.assertIn(variable_name, ckpt_variable_names)
class EagerSpinnSNLIClassifierBenchmark(test.Benchmark):
def benchmarkEagerSpinnSNLIClassifier(self):
test_device = "gpu:0" if tfe.num_gpus() else "cpu:0"
with tf.device(test_device):
burn_in_iterations = 2
benchmark_iterations = 10
vocab_size = 1000
batch_size = 128
sequence_length = 15
d_embed = 200
d_out = 4
embed = tf.random_normal((vocab_size, d_embed))
config = _test_spinn_config(d_embed, d_out)
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
for _ in range(burn_in_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
gc.collect()
start_time = time.time()
for _ in xrange(benchmark_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
wall_time = time.time() - start_time
# Named "examples"_per_sec to conform with other benchmarks.
extras = {"examples_per_sec": benchmark_iterations / wall_time}
self.report_benchmark(
name="Eager_SPINN_SNLIClassifier_Benchmark",
iters=benchmark_iterations,
wall_time=wall_time,
extras=extras)
if __name__ == "__main__":
test.main()
|
cherrygirl/micronaet7
|
refs/heads/master
|
pricelist_model/wizard/print_report_wizard.py
|
1
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import openerp.netsvc
import logging
from openerp.osv import osv, fields
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class product_pricelist_report_wizard(osv.osv_memory):
""" Print pricelist wizard
"""
_name = "product.pricelist.report.wizard"
_description = "Print pricelist wizard"
_columns = {
'description': fields.char('Description', size=100),
'pricelist_id': fields.many2one('product.pricelist', 'Print pricelist',
required=True, help="Choose pricelist to print"),
'partner_id': fields.many2one('res.partner', 'Customer',
required=False, help="For type partner need to select"),
'type': fields.selection([
('product', 'Product selected'),
('category', 'Category list'),
('partner', 'Partner product'),
], 'Type', select=True),
'structured': fields.boolean('Structured',
help="List splitted into categories"),
'commented': fields.boolean('Commented',
help="With comment in report for write filter used"),
'with_category': fields.boolean('With category',
help="Write category extra info in product"),
'with_cost': fields.boolean('With cost indication',
help="Print cost of product and % of on total"),
'with_bom': fields.boolean('With BOM indication',
help="Print BOM for cost computation"),
'decimal': fields.integer('Decimal', required=True),
}
_defaults = {
'type': lambda *x: 'product',
'structured': lambda *x: False,
'commented': lambda *x: False,
'with_category': lambda *x: False,
'with_bom': lambda *x: False,
'decimal': lambda *x: 3,
}
def print_report(self, cr, uid, ids, context = None):
"""
Print with selected parameters
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of ids
@return: Action dictionary
"""
if context is None:
context = {}
datas = {}
wiz_browse = self.browse(cr, uid, ids[0], context=context)
datas['description'] = wiz_browse.description
datas['pricelist_id'] = wiz_browse.pricelist_id.id
datas['type'] = wiz_browse.type
datas['partner_id'] = wiz_browse.partner_id.id if wiz_browse.partner_id else False
datas['structured'] = wiz_browse.structured
datas['commented'] = wiz_browse.commented
datas['with_category'] = wiz_browse.with_category
datas['category_ids'] = []
datas['decimal'] = wiz_browse.decimal
datas['with_cost'] = wiz_browse.with_cost
datas['with_bom'] = wiz_browse.with_bom
for category in wiz_browse.category_ids:
datas['category_ids'].append(
(category.category_id.id, category.with_child, category.all))
return { # action report
'type': 'ir.actions.report.xml',
'report_name': "aeroo_pricelist_model_report",
'datas': datas,
}
class product_pricelist_report_category(osv.osv_memory):
""" List of category to print
"""
_name = "product.pricelist.report.category"
_description = "Category list for print"
_rec_name = 'wizard_id'
_columns = {
'wizard_id': fields.many2one('product.pricelist.report.wizard',
'Wizard'),
'category_id': fields.many2one('product.category', 'Category',
required=True, help="Category for print in pricelist"),
'with_child': fields.boolean('With child'),
'all': fields.boolean('All product'),
}
_defaults = {
'with_child': lambda *a: True,
'all': lambda *a: False,
}
class product_pricelist_report_wizard(osv.osv_memory):
""" Print pricelist wizard
"""
_inherit = "product.pricelist.report.wizard"
_columns = {
'category_ids': fields.one2many('product.pricelist.report.category',
'wizard_id', 'Category'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
teochenglim/ansible-modules-extras
|
refs/heads/devel
|
cloud/amazon/ec2_vpc_nacl_facts.py
|
41
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_nacl_facts
short_description: Gather facts about Network ACLs in an AWS VPC
description:
- Gather facts about Network ACLs in an AWS VPC
version_added: "2.2"
author: "Brad Davidson (@brandond)"
requires: [ boto3 ]
options:
nacl_ids:
description:
- A list of Network ACL IDs to retrieve facts about.
required: false
default: []
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \
names and values are case sensitive.
required: false
default: {}
notes:
- By default, the module will return all Network ACLs.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all Network ACLs:
- name: Get All NACLs
register: all_nacls
ec2_vpc_nacl_facts:
region: us-west-2
# Retrieve default Network ACLs:
- name: Get Default NACLs
register: default_nacls
ec2_vpc_nacl_facts:
region: us-west-2
filters:
'default': 'true'
'''
RETURN = '''
nacl:
description: Returns an array of complex objects as described below.
returned: success
type: list of complex
contains:
nacl_id:
description: The ID of the Network Access Control List.
returned: always
type: string
vpc_id:
description: The ID of the VPC that the NACL is attached to.
returned: always
type: string
is_default:
description: True if the NACL is the default for its VPC.
returned: always
type: boolean
tags:
description: A dict of tags associated with the NACL.
returned: always
type: dict
subnets:
description: A list of subnet IDs that are associated with the NACL.
returned: always
type: list of string
ingress:
description: A list of NACL ingress rules.
returned: always
type: list of list
egress:
description: A list of NACL egress rules.
returned: always
type: list of list
'''
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
def list_ec2_vpc_nacls(connection, module):
nacl_ids = module.params.get("nacl_ids")
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters)
except (ClientError, NoCredentialsError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_nacls = []
for nacl in nacls['NetworkAcls']:
snaked_nacls.append(camel_dict_to_snake_dict(nacl))
# Turn the boto3 result in to ansible friendly tag dictionary
for nacl in snaked_nacls:
if 'tags' in nacl:
nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'])
if 'entries' in nacl:
nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries']
if e['rule_number'] != 32767 and e['egress']]
nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries']
if e['rule_number'] != 32767 and not e['egress']]
del nacl['entries']
if 'associations' in nacl:
nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
del nacl['associations']
if 'network_acl_id' in nacl:
nacl['nacl_id'] = nacl['network_acl_id']
del nacl['network_acl_id']
module.exit_json(nacls=snaked_nacls)
def nacl_entry_to_list(entry):
elist = [entry['rule_number'],
PROTOCOL_NAMES[entry['protocol']],
entry['rule_action'],
entry['cidr_block']
]
if entry['protocol'] == '1':
elist = elist + [-1, -1]
else:
elist = elist + [None, None, None, None]
if 'icmp_type_code' in entry:
elist[4] = entry['icmp_type_code']['type']
elist[5] = entry['icmp_type_code']['code']
if 'port_range' in entry:
elist[6] = entry['port_range']['from']
elist[7] = entry['port_range']['to']
return elist
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
nacl_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['nacl_ids', 'filters']
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_vpc_nacls(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
GdZ/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/yaml/lib/yaml/error.py
|
692
|
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end].encode('utf-8')
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
|
Ujjwal29/ansible
|
refs/heads/devel
|
test/units/utils/test_vars.py
|
155
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015, Toshio Kuraotmi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import defaultdict
from ansible.compat.tests import mock, unittest
from ansible.errors import AnsibleError
from ansible.utils.vars import combine_vars, merge_hash
class TestVariableUtils(unittest.TestCase):
test_merge_data = (
dict(
a=dict(a=1),
b=dict(b=2),
result=dict(a=1, b=2)
),
dict(
a=dict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=dict(a=1, b=2, c=dict(foo='bar', baz='bam'))
),
dict(
a=defaultdict(a=1, c=defaultdict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=defaultdict(a=1, b=2, c=defaultdict(foo='bar', baz='bam'))
),
)
test_replace_data = (
dict(
a=dict(a=1),
b=dict(b=2),
result=dict(a=1, b=2)
),
dict(
a=dict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=dict(baz='bam')),
result=dict(a=1, b=2, c=dict(baz='bam'))
),
dict(
a=defaultdict(a=1, c=dict(foo='bar')),
b=dict(b=2, c=defaultdict(baz='bam')),
result=defaultdict(a=1, b=2, c=defaultdict(baz='bam'))
),
)
def setUp(self):
pass
def tearDown(self):
pass
def test_merge_hash(self):
for test in self.test_merge_data:
self.assertEqual(merge_hash(test['a'], test['b']), test['result'])
def test_improper_args(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
with self.assertRaises(AnsibleError):
combine_vars([1, 2, 3], dict(a=1))
with self.assertRaises(AnsibleError):
combine_vars(dict(a=1), [1, 2, 3])
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
with self.assertRaises(AnsibleError):
combine_vars([1, 2, 3], dict(a=1))
with self.assertRaises(AnsibleError):
combine_vars(dict(a=1), [1, 2, 3])
def test_combine_vars_replace(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'replace'):
for test in self.test_replace_data:
self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
def test_combine_vars_merge(self):
with mock.patch('ansible.constants.DEFAULT_HASH_BEHAVIOUR', 'merge'):
for test in self.test_merge_data:
self.assertEqual(combine_vars(test['a'], test['b']), test['result'])
|
sodexis/odoo
|
refs/heads/8.0
|
addons/web_graph/__init__.py
|
1350
|
import controllers
|
abhishekmurthy/Calligra
|
refs/heads/master
|
plan/plugins/scripting/tests/resource_readwrite.py
|
7
|
#!/usr/bin/env kross
# -*- coding: utf-8 -*-
import traceback
import Kross
import Plan
import TestResult
TestResult.setResult( True )
asserttext1 = "Test of property '{0}' failed:\n Expected: '{2}'\n Result: '{1}'"
asserttext2 = "Failed to set property '{0}' to '{1}'. Result: {2}"
try:
project = Plan.project()
assert project is not None, "Project not found"
group1 = project.createResourceGroup()
assert group1 is not None, "Failed to create resource group"
property = 'Name'
data = "G1"
res = project.setData(group1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(group1, property)
text = asserttext1.format(property, result, data)
assert result == data, text
property = 'Type'
data = "Material"
res = project.setData(group1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
result = project.data(group1, property)
text = asserttext1.format(property, result, data)
assert result == data, text
group2 = project.createResourceGroup()
assert group2 is not None, "Failed to create resource group"
props = project.resourcePropertyList()
for p in props:
data = project.data(group1, p, 'ProgramRole')
res = project.setData(group2, p, data)
if res != 'ReadOnly':
text = asserttext2.format(property, data, res)
assert res == 'Success', text
r1 = project.createResource( group1 )
assert r1 is not None, "Could not create resource"
property = 'Name'
data = "R1"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Type'
data = "Material"
before = project.data(r1, property)
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Initials'
data = "RR"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Email'
data = "R1@work.org"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Email'
data = "R1@work.org"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Limit'
data = 10
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'AvailableFrom'
data = "2011-07-01T08:00:00"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'AvailableUntil'
data = "2011-07-02T08:00:00"
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'NormalRate'
data = 111
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'OvertimeRate'
data = 222
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
account = project.createAccount(0)
data = 'A1'
assert account is not None, "Failed to create account"
res = project.setData(account, 'Name', data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
property = 'Account'
res = project.setData(r1, property, data)
text = asserttext2.format(property, data, res)
assert res == 'Success', text
r2 = project.createResource( group2 )
assert r2 is not None, "Could not create resource"
for p in props:
data = project.data(r1, p, 'ProgramRole')
res = project.setData(r2, p, data)
if res != 'ReadOnly':
text = asserttext2.format(property, data, res)
assert res == 'Success', text
except:
TestResult.setResult( False )
TestResult.setMessage("\n" + traceback.format_exc(1))
|
zanderle/django
|
refs/heads/master
|
django/contrib/flatpages/forms.py
|
357
|
from django import forms
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext, ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/\.~]+$',
help_text=_("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_messages={
"invalid": _("This value must contain only letters, numbers,"
" dots, underscores, dashes, slashes or tildes."),
},
)
class Meta:
model = FlatPage
fields = '__all__'
def clean_url(self):
url = self.cleaned_data['url']
if not url.startswith('/'):
raise forms.ValidationError(
ugettext("URL is missing a leading slash."),
code='missing_leading_slash',
)
if (settings.APPEND_SLASH and
'django.middleware.common.CommonMiddleware' in settings.MIDDLEWARE_CLASSES and
not url.endswith('/')):
raise forms.ValidationError(
ugettext("URL is missing a trailing slash."),
code='missing_trailing_slash',
)
return url
def clean(self):
url = self.cleaned_data.get('url')
sites = self.cleaned_data.get('sites')
same_url = FlatPage.objects.filter(url=url)
if self.instance.pk:
same_url = same_url.exclude(pk=self.instance.pk)
if sites and same_url.filter(sites__in=sites).exists():
for site in sites:
if same_url.filter(sites=site).exists():
raise forms.ValidationError(
_('Flatpage with url %(url)s already exists for site %(site)s'),
code='duplicate_url',
params={'url': url, 'site': site},
)
return super(FlatpageForm, self).clean()
|
yamateh/robotframework
|
refs/heads/master
|
src/robot/utils/unic.py
|
1
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# Need different unic implementations for different Pythons because:
# 1) Importing unicodedata module on Jython takes a very long time, and doesn't
# seem to be necessary as Java probably already handles normalization.
# Furthermore, Jython on Java 1.5 doesn't even have unicodedata.normalize.
# 2) IronPython 2.6 doesn't have unicodedata and probably doesn't need it.
# 3) CPython doesn't automatically normalize Unicode strings.
if sys.platform.startswith('java'):
from java.lang import Object, Class
def unic(item, *args):
# http://bugs.jython.org/issue1564
if isinstance(item, Object) and not isinstance(item, Class):
try:
item = item.toString() # http://bugs.jython.org/issue1563
except:
return _unrepresentable_object(item)
return _unic(item, *args)
elif sys.platform == 'cli':
def unic(item, *args):
return _unic(item, *args)
else:
from unicodedata import normalize
def unic(item, *args):
return normalize('NFC', _unic(item, *args))
def _unic(item, *args):
# Based on a recipe from http://code.activestate.com/recipes/466341
try:
return unicode(item, *args)
except UnicodeError:
try:
return u''.join(c if ord(c) < 128 else c.encode('string_escape')
for c in str(item))
except:
return _unrepresentable_object(item)
except:
return _unrepresentable_object(item)
def safe_repr(item):
try:
return unic(repr(item))
except UnicodeError:
return repr(unic(item))
except:
return _unrepresentable_object(item)
if sys.platform == 'cli':
# IronPython omits `u` prefix from `repr(u'foo')`. We add it back to have
# consistent and easier to test log messages.
_safe_repr = safe_repr
def safe_repr(item):
if isinstance(item, list):
return '[%s]' % ', '.join(safe_repr(i) for i in item)
ret = _safe_repr(item)
if isinstance(item, unicode) and not ret.startswith('u'):
ret = 'u' + ret
return ret
def _unrepresentable_object(item):
from robot.utils.error import get_error_message
return u"<Unrepresentable object '%s'. Error: %s>" \
% (item.__class__.__name__, get_error_message())
|
aewallin/opencamlib
|
refs/heads/master
|
src/attic/oct_test3.py
|
1
|
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
red= (1,0,0)
green= (0,1,0)
blue= (0,0,1)
cyan= (0,1,1)
yellow= (1,1,0)
pink = ( float(255)/255,float(192)/255,float(203)/255)
grey = ( float(127)/255,float(127)/255,float(127)/255)
orange = ( float(255)/255,float(165)/255,float(0)/255)
#OCType = Enum('black', 'grey', 'white')
OCTMax = 8
def buildOCTree(volume, nodecenter=cam.Point(0,0,0), level=0):
# build octree of volume, return root node
node = OCTNode( level, center = nodecenter , type = 1, childlist=None)
flags = []
for n in range(0,9): # test all points
flags.append( volume.isInside( node.nodePoint(n) ) )
if (sum(flags) == 0): # nothing is inside
node.type = 0
#print "nothing inside!"
return node
if (sum(flags) == 9): # everything is inside
node.type = 2
#print "all inside!"
return node
if level== OCTMax: # reached max levels
return node #OCTNode(level, center= nodecenter, type = 2, childlist = None)
# have to subdivide:
childs = []
child_centers = []
for n in range(1,9):
child_center = node.childCenter(n)
childs.append( buildOCTree( volume , nodecenter = child_center, level= level+1) )
node.setChildren(childs)
return node
def searchOCTree(node, list):
# return list of nodes in the whole tree starting at node
if node.children is not None:
for chi in node.children:
searchOCTree(chi, list)
else:
list.append(node)
class Volume():
def __init__(self):
self.center = cam.Point(0,0,0)
self.radius = 0.45
def isInside(self, point):
p = point - self.center
if p.norm() < self.radius:
return 1
else:
return 0
def nodeColor(oct):
offset = 2
n = oct.level-offset
return (float(n)/(OCTMax-offset), float(OCTMax-offset - n)/(OCTMax-offset), 0)
def drawNode(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in range(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[1].x,p[1].y,p[1].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[2].x,p[2].y,p[2].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[4].x,p[4].y,p[4].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[5].x,p[5].y,p[5].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[6].x,p[6].y,p[6].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[7].x,p[7].y,p[7].z)) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode2(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in range(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
for n in range(0,8):
lines.append ( camvtk.Point(center=(p[n].x,p[n].y,p[n].z) ) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode3(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
if node.type == cam.OCType.WHITE:
ccolor = nodeColor(node)
if node.type == cam.OCType.GREY:
ccolor = camvtk.white
if node.type == cam.OCType.BLACK:
ccolor = camvtk.grey
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=camvtk.green)
#cube.SetWireframe()
#cube.SetOpacity(0.2)
myscreen.addActor( cube )
def drawOCT(myscreen, oct, color, opacity=1.0):
nodes = oct.get_white_nodes()
for node in nodes:
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=color)
cube.SetOpacity(opacity)
#cube.SetWireframe()
myscreen.addActor( cube )
if __name__ == "__main__":
#exit()
#oct = cam.OCTNode()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(20, 12, 2)
myscreen.camera.SetFocalPoint(0,0, 0)
#print oct.str()
"""
print("max scale=", oct.get_max_scale())
for n in range(0,9):
p1 = oct.nodePoint(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.red))
print("id=%i" % (n),)
print(p1.str())
print("child centers:")
for n in range(1,9):
p1 = oct.childCenter(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.yellow))
print("id=%i" % (n),)
print(p1.str())
"""
xar = camvtk.Arrow(color=red, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=green, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=blue, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
oc2 = cam.OCTest()
oc2.set_max_depth(5)
svol = cam.SphereOCTVolume()
svol.radius=3.1415
svol.center = cam.Point(-1,2,-1)
oc2.setVol(svol)
oc2.build_octree()
oc3 = cam.OCTest()
svol3 = cam.SphereOCTVolume()
svol3.radius=2
svol3.center = cam.Point(-1,2,1)
cvol = cam.CubeOCTVolume()
cvol.side = 3
cvol.center = cam.Point(2.0,2,-1)
oc3.setVol(cvol)
oc3.set_max_depth(5)
oc3.build_octree()
iters = oc3.prune_all()
iters = oc2.prune_all()
nlist = oc2.get_all_nodes()
print(" oc2 got ", len(nlist), " nodes")
nlist = oc2.get_white_nodes()
print(" oc2 got ", len(nlist), " white nodes")
nlist = oc3.get_all_nodes()
print(" oc3 got ", len(nlist), " nodes")
print("calling balance")
oc2.balance(oc3)
print("after balance:")
nlist = oc2.get_all_nodes()
print(" oc2 got ", len(nlist), " nodes")
nlist = oc2.get_white_nodes()
print(" oc2 got ", len(nlist), " white nodes")
print("calling diff")
oc2.diff(oc3)
print("after diff:")
nlist = oc2.get_all_nodes()
print(" oc2 got ", len(nlist), " nodes")
nlist = oc2.get_white_nodes()
print(" oc2 got ", len(nlist), " white nodes")
drawOCT(myscreen, oc2, camvtk.green)
#drawOCT(myscreen, oc3, camvtk.red, opacity=0.1)
#exit()
#for node in nlist2:
# pass
#print node.str()
#p1 = node.nodePoint(0)
# drawNode3( myscreen, node )
#myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=sph_color))
myscreen.render()
myscreen.iren.Start()
exit()
#oct = OCTNode(level=0)
testvol = Volume()
print("building tree...",)
tree = buildOCTree(testvol)
print("done.")
print(tree)
list =[]
searchOCTree(tree, list)
print(len(list), " nodes in tree")
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
n = 0
for node in list:
addNodes(myscreen, node)
if (n%50) == 0:
nodetext = "Nodes: %5i" % (n)
t2.SetText(nodetext)
t.SetText("OpenCAMLib 10.03-beta " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.render()
myscreen.camera.Azimuth( 3 )
print("frame %i of %i" % (n, len(list)))
w2if.Modified()
lwr.SetFileName("frames/oct"+ ('%05d' % n)+".png")
#lwr.Write()
n = n +1
#time.sleep(0.1)
print("done!")
#raw_input("Press Enter to terminate")
|
davidbgk/secateur
|
refs/heads/master
|
secateur/http.py
|
1
|
import json
import logging
import os
from http.client import ACCEPTED, CREATED, NOT_FOUND
from nameko.events import EventDispatcher
from nameko.rpc import rpc
from nameko.web.handlers import http
from .constants import RESULTS_FOLDER, STATUS_COMPLETE
from .logger import LoggingDependency
from .storages import RedisStorage
from .tools import generate_hash, send_file
log = logging.info
PRECONDITION_REQUIRED = 428
class HttpService(object):
name = 'http_server'
dispatch = EventDispatcher()
storage = RedisStorage()
logger = LoggingDependency(interval='ms')
@http('GET', '/process')
def process_url(self, request):
force = bool(int(request.args.get('force', 0)))
force_download = bool(int(request.args.get('force_download', 0)))
force_reduce = bool(int(request.args.get('force_reduce', 0)))
no_headers = bool(int(request.args.get('no_headers', 0)))
url = request.args.get('url')
filters = list(zip(request.args.getlist('column'),
request.args.getlist('value')))
log('Downloading url {url} to reduce with {filters}'.format(
url=url, filters=filters))
query_string = request.query_string.decode('utf-8')
job_hash = generate_hash(query_string)
url_hash = generate_hash(url)
if force or not self.storage.get_status(job_hash):
self.download({
'url': url,
'filters': filters,
'job_hash': job_hash,
'url_hash': url_hash,
'force_download': force_download,
'force_reduce': force_reduce,
'no_headers': no_headers
})
return ACCEPTED, json.dumps({'hash': job_hash}, indent=2)
@http('GET', '/status/<job_hash>')
def check_status_from_hash(self, request, job_hash):
log('Retrieving url hash {hash}'.format(hash=job_hash))
status = self.storage.get_status(job_hash)
if status is None:
return NOT_FOUND, ''
else:
status = int(status)
if status == STATUS_COMPLETE:
return CREATED, ''
else:
return PRECONDITION_REQUIRED, ''
@http('GET', '/file/<job_hash>')
def retrieve_file_from_hash(self, request, job_hash):
log('Retrieving file with hash {hash}'.format(hash=job_hash))
if not int(self.storage.get_status(job_hash)) == STATUS_COMPLETE:
return NOT_FOUND, ''
csvfile_out = os.path.join(RESULTS_FOLDER, job_hash)
attachment_filename = '{job_hash}.csv'.format(job_hash=job_hash)
return send_file(
request, csvfile_out, attachment_filename=attachment_filename)
@rpc
def download(self, job_data):
log('Dispatching download of {url}'.format(url=job_data['url']))
self.dispatch('url_to_download', job_data)
|
hand-iemura/lightpng
|
refs/heads/master
|
boost_1_53_0/libs/python/test/printer.py
|
46
|
# Copyright David Abrahams 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
class _printer(object):
def __init__(self):
self.results = [];
def __call__(self, *stuff):
for x in stuff:
self.results.append(str(x))
def check(self, x):
if self.results[0] != str(x):
print ' Expected:\n %s\n but the C++ interface gave:\n %s' % (x, self.results[0])
del self.results[0]
|
Bluehorn/requests
|
refs/heads/develop
|
requests/packages/urllib3/util.py
|
65
|
# urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from base64 import b64encode
from collections import namedtuple
from socket import error as SocketError
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
from .packages import six
from .exceptions import LocationParseError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> prase_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> prase_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn):
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll: # Platform-specific
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
|
jiangzhuo/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/mailcap.py
|
100
|
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except OSError:
continue
with fp:
morecaps = readmailcapfile(fp)
for key, value in morecaps.items():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# This is mostly a Unix thing, but we use the OS path separator anyway
if 'MAILCAPS' in os.environ:
pathstr = os.environ['MAILCAPS']
mailcaps = pathstr.split(os.pathsep)
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = [e for e in entries if key in e]
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print("usage: mailcap [MIMEtype file] ...")
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print("No viewer found for", type)
else:
print("Executing:", command)
sts = os.system(command)
if sts:
print("Exit status:", sts)
def show(caps):
print("Mailcap files:")
for fn in listmailcapfiles(): print("\t" + fn)
print()
if not caps: caps = getcaps()
print("Mailcap entries:")
print()
ckeys = sorted(caps)
for type in ckeys:
print(type)
entries = caps[type]
for e in entries:
keys = sorted(e)
for k in keys:
print(" %-15s" % k, e[k])
print()
if __name__ == '__main__':
test()
|
TeslaProject/external_chromium_org
|
refs/heads/lp5.1
|
third_party/tlslite/tlslite/utils/datefuncs.py
|
206
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
|
homework/nox
|
refs/heads/zaku
|
src/nox/netapps/user_event_log/networkeventsws.py
|
9
|
# -*- coding: utf8 -*-
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
from nox.lib.core import *
from twisted.python.failure import Failure
from nox.webapps.webserver.webauth import Capabilities
from nox.ext.apps.commonui.authui import UISection, UIResource
from nox.webapps.webserver import webauth
from nox.webapps.webserver.webserver import *
from nox.webapps.webservice.webservice import *
from nox.netapps.user_event_log.pyuser_event_log import pyuser_event_log, \
LogLevel, LogEntry
from nox.netapps.bindings_storage.pybindings_storage import pybindings_storage
from nox.webapps.webservice.webservice import json_parse_message_body
from nox.lib.netinet.netinet import *
from nox.webapps.webservice.web_arg_utils import *
from twisted.internet.defer import Deferred
from nox.netapps.data.datatypes_impl import Datatypes
from nox.netapps.data.datacache_impl import DataCache
import simplejson
import types
import copy
import re
# matches each instance of a format string, to be used with
# fmt_pattern.findall(log_message) to get a list of all format
# strings used in a log message
fmt_pattern = re.compile('{[a-z]*}')
lg = logging.getLogger("networkeventsws")
# makes sure a path component is a currently valid logid value
class WSPathValidLogID(WSPathComponent):
def __init__(self, uel):
WSPathComponent.__init__(self)
self.uel = uel
def __str__(self):
return "<logid>"
def extract(self, pc, data):
if pc == None:
return WSPathExtractResult(error="End of requested URI")
try:
max_logid = self.uel.get_max_logid()
logid = long(pc)
if logid > 0 and logid <= max_logid:
return WSPathExtractResult(value=pc)
except:
pass
e = "Invalid LogID value '" + pc + "'. Must be number 0 < n <= " \
+ str(max_logid)
return WSPathExtractResult(error=e)
def string_for_name_type(datatypes, type, is_plural):
s = ""
if type == datatypes.USER: s = "user"
elif type == datatypes.HOST: s = "host"
elif type == datatypes.LOCATION: s = "location"
elif type == datatypes.SWITCH: s = "switch"
elif type == datatypes.USER_GROUP: s = "user group"
elif type == datatypes.HOST_GROUP: s = "host group"
elif type == datatypes.LOCATION_GROUP: s = "location group"
elif type == datatypes.SWITCH_GROUP: s = "switch group"
if is_plural:
if type == datatypes.SWITCH:
s += "es"
else:
s += "s"
return s
def get_matching_ids(type, all_ids):
for_use = []
for n in all_ids:
if(n["type"] == type):
for_use.append((n["id"],n["type"]))
return for_use
def get_name_str(ids, datacache):
for_use = []
for n in ids:
for_use.append(datacache.get_name(n[1], n[0]))
if len(for_use) == 0:
for_use.append("<unknown>")
n = ""
for i in range(len(for_use)):
n += str(for_use[i])
if i < len(for_use) - 1:
n += ","
return "'%s'" % n
def fill_in_msg(uel, datatypes, datacache, msg, src_names, dst_names):
fmts_used = fmt_pattern.findall(msg)
fmts_used = map(lambda s: s[1:-1],fmts_used) # remove braces
ids = []
for fmt in fmts_used:
if fmt not in uel.principal_format_map:
lg.error("invalid format string '%s' in message '%s'" % (fmt,msg))
continue
name_type,dir = uel.principal_format_map[fmt]
if dir == LogEntry.SRC:
name_list = src_names
else:
name_list = dst_names
matching_ids = get_matching_ids(name_type, name_list)
name = get_name_str(matching_ids, datacache)
msg = msg.replace("{"+fmt+"}", name)
if len(matching_ids) == 1:
ids.append(matching_ids[0])
else:
ids.append((-1,0))
return (msg, ids)
def make_entry(uel, datatypes, datacache, logid, ts, app,
level, msg, src_names, dst_names):
msg,ids = fill_in_msg(uel, datatypes, datacache, msg,src_names,dst_names)
return { "logid" : logid,
"timestamp" : ts,
"app" : app,
"level" : level,
"msg" : msg,
"ids" : ids
}
def err(failure, request, fn_name, msg):
lg.error('%s: %s' % (fn_name, str(failure)))
return internalError(request, msg)
def dump_list_to_json(ret_list,request):
request.write(simplejson.dumps({
"identifier" : "logid",
"items" : ret_list
} ))
request.finish()
# get all log entries associated with a 'name' (ie a host or user)
# uses get_logids_for_name() and then uses process_list_op
class process_name_op:
def __init__(self, uel,datatypes,datacache):
self.uel = uel
self.datatypes = datatypes
self.datacache = datacache
def start(self, uid, principal_type, filter):
self.filter = filter
self.d = Deferred()
self.uel.get_logids_for_name(uid,principal_type,self.callback)
return self.d
def callback(self,logids):
p = process_list_op(self.uel,self.datatypes,self.datacache)
list_op_d = p.start(logids, self.filter)
def on_success(res):
self.d.callback(res)
def on_error(res):
seld.d.errback(res)
list_op_d.addCallback(on_success)
list_op_d.addErrback(on_error)
# class to get all log entries and writes them
# in JSON to a request object.
# the dict 'filter' describes how these results
# can be filtered before being returned (see below)
class process_list_op:
def __init__(self,uel,datatypes,datacache):
self.got = 0
self.items = []
self.all_spawned = False
self.uel = uel
self.datatypes = datatypes
self.datacache = datacache
self.name_to_dpid = {}
self.name_to_port = {}
self.unique_dpids = {}
def start(self, logids, filter):
self.d = Deferred()
self.filter = filter
max = self.uel.get_max_logid()
if max == 0:
self.done()
return self.d
# if nothing was provided, return ALL entries
if logids is None:
min = self.uel.get_min_logid()
logids = range(min,max+1)
self.needs = 0
for id in logids:
if id > 0 and id <= max and id > filter["after"]:
self.needs += 1
self.uel.get_log_entry(id,self.log_callback)
# needed for common case when we call self.done() from self.log_callback()
self.all_spawned = True
if self.needs == self.got :
self.done() # nothing actually spawned, or everything already done
return self.d
def done(self):
filtered_list = filter_item_list(self.items, ["app","msg"],
self.filter)
ret_list = sort_and_slice_results(self.filter, filtered_list)
self.d.callback(ret_list)
def log_callback(self, logid, ts, app, level, msg, src_names, dst_names):
self.got += 1
if level != LogLevel.INVALID and level <= self.filter["max_level"]:
e = make_entry(self.uel, self.datatypes, self.datacache,
logid,ts,app,level,msg,src_names,dst_names)
self.items.append(e)
if self.all_spawned and self.needs == self.got:
self.done()
class networkeventsws(Component):
""" Web service for network events (aka user_event_log)"""
def __init__(self,ctx):
Component.__init__(self,ctx)
def getInterface(self):
return str(networkeventsws)
# this is mainly for debugging, though in the future it could be
# a way for remote apps to integrate logging into our system.
def handle_add(self,request,data):
try:
if authorization_failed(request, [set(["add-network-events"])]):
return NOT_DONE_YET
content = json_parse_message_body(request)
if content == None:
content = {}
app = "via-netevent-webservice"
if "app" in content:
app = str(content["app"])
msg = "default webservice message"
if "msg" in content:
msg = str(content["msg"])
level = LogEntry.INFO
if "level" in content:
level = int(content["level"])
self.uel.log(app,level, msg)
except Exception, e:
err(Failure(), request, "handle_add",
"Could not add log message")
request.write(simplejson.dumps("success"))
request.finish()
return NOT_DONE_YET
# this is mainly for debugging.
def handle_remove(self,request,data):
if authorization_failed(request, [set(["remove-network-events"])]):
return NOT_DONE_YET
try:
msg = ""
def cb():
try:
request.write(simplejson.dumps("success:" + msg))
request.finish()
except Exception, e:
err(Failure(), request, "handle_remove",
"Could not remove log messages.")
if(request.args.has_key("max_logid")):
max_logid = int(request.args["max_logid"][0])
msg = "cleared entries with logid <= " + str(max_logid)
self.uel.remove(max_logid,cb)
else :
msg = "cleared all entries"
self.uel.clear(cb)
except Exception, e:
err(Failure(), request, "handle_remove",
"Could not remove log messages.")
return NOT_DONE_YET
# returns a deferred that is called with the list of all log entries
# for principal with name 'name' and type 'name_type', filtered by
# 'filter'. If filter is not specified, all matching entries are returned
def get_logs_for_name(self,uid,principal_type, filter=None):
if filter is None:
filter = self.get_default_filter()
p = process_name_op(self.uel, self.datatypes,self.datacache)
return p.start(uid,principal_type,filter)
# returns all logs if logid_list is None, or only the logs with logids
# specified in 'logid_list'. These results will be filtered if 'filter'
# is specified.
def get_logs(self, logid_list = None, filter=None):
if filter is None:
filter = self.get_default_filter()
p = process_list_op(self.uel,self.datatypes,self.datacache)
return p.start(logid_list, filter)
def get_default_filter(self):
return parse_mandatory_args({}, self.get_default_filter_arr())
def get_default_filter_arr(self):
filter_arr = get_default_filter_arr("logid")
filter_arr.extend([("after",0), ("max_level",LogLevel.INFO)])
return filter_arr
def handle_get_all(self,request,data):
try :
if authorization_failed(request, [set(["get-network-events"])]):
return NOT_DONE_YET
filter = parse_mandatory_args(request,
self.get_default_filter_arr())
for s in ["app","msg"]:
if s in request.args:
filter[s] = request.args[s][-1]
# handles all requests that are filtering based on a particular
# principal name (e.g., host=sepl_directory;bob )
type_map = { "host" : self.datatypes.HOST,
"user" : self.datatypes.USER,
"location" : self.datatypes.LOCATION,
"switch" : self.datatypes.SWITCH,
"group" : self.datatypes.GROUP
}
for name, type in type_map.iteritems():
if(request.args.has_key(name)):
uid = int(request.args[name][0])
d = self.get_logs_for_name(uid,type,filter)
d.addCallback(dump_list_to_json, request)
d.addErrback(err, request, "get_all",
"Could not retrieve log messages")
return NOT_DONE_YET
# otherwise, we just query directory for logids
# we query either for a single logid or for all
logid_list = None # default to query for all
if(request.args.has_key("logid")):
logid = int(request.args["logid"][0])
max = self.uel.get_max_logid()
if logid >= 1 and logid <= max:
logid_list = (logid)
else:
logid_list = ()
d = self.get_logs(logid_list, filter)
d.addCallback(dump_list_to_json, request)
d.addErrback(err, request, "get_all",
"Could not retrieve log messages")
except Exception, e:
err(Failure(), request, "get_all",
"Could not retrieve log messages.")
return NOT_DONE_YET
def install(self):
rwRoles = set(["Superuser", "Admin", "Demo"])
roRoles = rwRoles | set(["Readonly"])
webauth.Capabilities.register('get-network-events',
'Get network event log messages', roRoles)
webauth.Capabilities.register('add-network-events',
'Add network event log messages', rwRoles)
webauth.Capabilities.register('remove-network-events',
'Remove network event log messages', rwRoles)
self.uel = self.resolve(pyuser_event_log)
self.datatypes = self.resolve(Datatypes)
self.datacache = self.resolve(DataCache)
ws = self.resolve(webservice)
v1 = ws.get_version("1")
# returns a JSON object:
#
# { 'identifier' : 'logid' , items : [ .... ] }
#
# Query Params:
# * supports standard 'start' 'count' for pagination
# * supports 'sort_descending' and
get_all_path = ( WSPathStaticString("networkevents"),)
v1.register_request(self.handle_get_all, "GET", get_all_path,
"""Get a set of messages from the network events log""")
remove_path = ( WSPathStaticString("networkevents"),
WSPathStaticString("remove"))
v1.register_request(self.handle_remove, "PUT", remove_path,
"""Permanently remove all (or just some) network event log entries""")
add_path = ( WSPathStaticString("networkevents"),
WSPathStaticString("add"))
v1.register_request(self.handle_add, "PUT", add_path,
"""Add a simple network event log message""")
def getFactory():
class Factory:
def instance(self,ctx):
return networkeventsws(ctx)
return Factory()
|
ayoubg/gem5-graphics
|
refs/heads/master
|
gem5/src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CVTSS2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSS2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSS2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
'''
|
dpac-vlsi/SynchroTrace
|
refs/heads/master
|
src/arch/x86/isa/insts/general_purpose/rotate_and_shift/rotate.py
|
91
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop ROL_R_I
{
roli reg, reg, imm, flags=(OF,CF)
};
def macroop ROL_M_I
{
ldst t1, seg, sib, disp
roli t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
roli t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROL_1_R
{
roli reg, reg, 1, flags=(OF,CF)
};
def macroop ROL_1_M
{
ldst t1, seg, sib, disp
roli t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
roli t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROL_R_R
{
rol reg, reg, regm, flags=(OF,CF)
};
def macroop ROL_M_R
{
ldst t1, seg, sib, disp
rol t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rol t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_R_I
{
rori reg, reg, imm, flags=(OF,CF)
};
def macroop ROR_M_I
{
ldst t1, seg, sib, disp
rori t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rori t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_1_R
{
rori reg, reg, 1, flags=(OF,CF)
};
def macroop ROR_1_M
{
ldst t1, seg, sib, disp
rori t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rori t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop ROR_R_R
{
ror reg, reg, regm, flags=(OF,CF)
};
def macroop ROR_M_R
{
ldst t1, seg, sib, disp
ror t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop ROR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
ror t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_R_I
{
rcli reg, reg, imm, flags=(OF,CF)
};
def macroop RCL_M_I
{
ldst t1, seg, sib, disp
rcli t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rcli t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_1_R
{
rcli reg, reg, 1, flags=(OF,CF)
};
def macroop RCL_1_M
{
ldst t1, seg, sib, disp
rcli t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rcli t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCL_R_R
{
rcl reg, reg, regm, flags=(OF,CF)
};
def macroop RCL_M_R
{
ldst t1, seg, sib, disp
rcl t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rcl t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_R_I
{
rcri reg, reg, imm, flags=(OF,CF)
};
def macroop RCR_M_I
{
ldst t1, seg, sib, disp
rcri t1, t1, imm, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
rcri t1, t1, imm, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_1_R
{
rcri reg, reg, 1, flags=(OF,CF)
};
def macroop RCR_1_M
{
ldst t1, seg, sib, disp
rcri t1, t1, 1, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
rcri t1, t1, 1, flags=(OF,CF)
st t1, seg, riprel, disp
};
def macroop RCR_R_R
{
rcr reg, reg, regm, flags=(OF,CF)
};
def macroop RCR_M_R
{
ldst t1, seg, sib, disp
rcr t1, t1, reg, flags=(OF,CF)
st t1, seg, sib, disp
};
def macroop RCR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
rcr t1, t1, reg, flags=(OF,CF)
st t1, seg, riprel, disp
};
'''
|
vcarrera/ahmia
|
refs/heads/master
|
ahmia/solr_grouping_backend.py
|
5
|
# encoding: utf-8
"""Experimental Solr Grouping / Field Collapsing backend for Haystack 2.0"""
# NOTE: You must be running the latest Pysolr master - no PyPI release yet!
# See https://gist.github.com/3750774 for the current version of this code
# See http://wiki.apache.org/solr/FieldCollapsing for the Solr feature documentation
from __future__ import absolute_import
import logging
from django.db.models.loading import get_model
from haystack.backends import EmptyResults
from haystack.backends.solr_backend import (SolrEngine, SolrSearchBackend,
SolrSearchQuery)
from haystack.constants import DJANGO_CT, DJANGO_ID, ID
from haystack.models import SearchResult
from haystack.query import SearchQuerySet
# Since there's no chance of this being portable (yet!) we'll import explicitly
# rather than using the generic imports:
class GroupedSearchQuery(SolrSearchQuery):
def __init__(self, *args, **kwargs):
super(GroupedSearchQuery, self).__init__(*args, **kwargs)
self.grouping_field = None
self._total_document_count = None
def _clone(self, **kwargs):
clone = super(GroupedSearchQuery, self)._clone(**kwargs)
clone.grouping_field = self.grouping_field
return clone
def add_group_by(self, field_name):
self.grouping_field = field_name
def post_process_facets(self, results):
# FIXME: remove this hack once https://github.com/toastdriven/django-haystack/issues/750 lands
# See matches dance in _process_results below:
total = 0
if 'hits' in results:
total = int(results['hits'])
elif 'matches' in results:
total = int(results['matches'])
self._total_document_count = total
return super(GroupedSearchQuery, self).post_process_facets(results)
def get_total_document_count(self):
"""Return the total number of matching documents rather than document groups
If the query has not been run, this will execute the query and store the results.
"""
if self._total_document_count is None:
self.run()
return self._total_document_count
def build_params(self, *args, **kwargs):
res = super(GroupedSearchQuery, self).build_params(*args, **kwargs)
if self.grouping_field is not None:
res.update({'group': 'true',
'group.field': self.grouping_field,
'group.ngroups': 'true',
'group.limit': 2, # TODO: Don't hard-code this
'group.sort': 'django_ct desc, score desc',
'group.facet': 'true',
'result_class': GroupedSearchResult})
return res
class GroupedSearchResult(object):
def __init__(self, field_name, group_data, raw_results={}):
self.field_name = field_name
self.key = group_data['groupValue'] # TODO: convert _to_python
self.hits = group_data['doclist']['numFound']
self.documents = list(self.process_documents(group_data['doclist']['docs'],
raw_results=raw_results))
def __unicode__(self):
return 'GroupedSearchResult({0.field_name}={0.group_key}, hits={0.hits})'.format(self)
def process_documents(self, doclist, raw_results):
# TODO: tame import spaghetti
from haystack import connections
engine = connections["default"]
conn = engine.get_backend().conn
unified_index = engine.get_unified_index()
indexed_models = unified_index.get_indexed_models()
for raw_result in doclist:
app_label, model_name = raw_result[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in raw_result.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = conn._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
del(additional_fields['score'])
if raw_result[ID] in getattr(raw_results, 'highlighting', {}):
additional_fields['highlighted'] = raw_results.highlighting[raw_result[ID]]
result = SearchResult(app_label, model_name, raw_result[DJANGO_ID],
raw_result['score'], **additional_fields)
yield result
class GroupedSearchQuerySet(SearchQuerySet):
def __init__(self, *args, **kwargs):
super(GroupedSearchQuerySet, self).__init__(*args, **kwargs)
if not isinstance(self.query, GroupedSearchQuery):
raise TypeError("GroupedSearchQuerySet must be used with a GroupedSearchQuery query")
def group_by(self, field_name):
"""Have Solr group results based on the provided field name"""
clone = self._clone()
clone.query.add_group_by(field_name)
return clone
def post_process_results(self, results):
# Override the default model-specific processing
return results
def total_document_count(self):
"""Returns the count for the total number of matching documents rather than groups
A GroupedSearchQuerySet normally returns the number of document groups; this allows
you to indicate the total number of matching documents - quite handy for making facet counts match the
displayed numbers
"""
if self.query.has_run():
return self.query.get_total_document_count()
else:
clone = self._clone()
return clone.query.get_total_document_count()
class GroupedSolrSearchBackend(SolrSearchBackend):
def build_search_kwargs(self, *args, **kwargs):
group_kwargs = [(i, kwargs.pop(i)) for i in kwargs.keys() if i.startswith("group")]
res = super(GroupedSolrSearchBackend, self).build_search_kwargs(*args, **kwargs)
res.update(group_kwargs)
if group_kwargs and 'sort' not in kwargs:
res['sort'] = 'score desc'
return res
def _process_results(self, raw_results, result_class=None, **kwargs):
res = super(GroupedSolrSearchBackend, self)._process_results(raw_results,
result_class=result_class,
**kwargs)
if result_class and not issubclass(result_class, GroupedSearchResult):
return res
if len(raw_results.docs):
raise RuntimeError("Grouped Solr searches should return grouped elements, not docs!")
assert not res['results']
assert not res['hits']
if isinstance(raw_results, EmptyResults):
return res
assert len(raw_results.grouped) == 1, "Grouping on more than one field is not supported"
res['results'] = results = []
for field_name, field_group in raw_results.grouped.items():
res['hits'] = field_group['ngroups']
res['matches'] = field_group['matches']
for group in field_group['groups']:
if group['groupValue'] is None:
logging.warning("Unexpected NULL grouping", extra={'data': raw_results})
res['hits'] -= 1 # Avoid confusing Haystack with excluded bogon results
continue
results.append(result_class(field_name, group, raw_results=raw_results))
return res
class GroupedSolrEngine(SolrEngine):
backend = GroupedSolrSearchBackend
query = GroupedSearchQuery
|
sh-ft/mudwyrm_engine
|
refs/heads/master
|
mudwyrm_engine/console_message.py
|
1
|
import operator
from types import StringType, UnicodeType, TupleType, ListType, DictType
class ConsoleMessage(object):
def __init__(self, type_, attr, contents):
if type(type_) not in [StringType, UnicodeType]:
raise TypeError("type_ must be a string")
if type(attr) is not DictType:
raise TypeError("attr must be a dict")
if type(contents) is not ListType:
raise TypeError("contents must be a list")
self.type_ = type_
self.attr = attr
self.contents = contents
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.type_ == other.type_
and self.attr == other.attr
and (len(self.contents) == len(other.contents)
and all(map(operator.eq, self.contents, other.contents))))
def to_dict(self):
def make_dict(m):
if isinstance(m, ConsoleMessage):
return {'type': m.type_, 'attr': m.attr, 'contents': map(make_dict, m.contents)}
else:
return m
return make_dict(self)
@staticmethod
def construct(message):
"""A helper function to simplify the creation of the ``ConsoleMessage``
instances.
Some examples of the arguments that this function does accept:
('div', 'system', [
'This is system message with some ', ('span', 'bold', 'bold text'), ' in it.'
])
('ul', None, [
('li', 'First item'),
('li', 'Second item'),
('li', ('a', {'href': '#third_item'}, 'Third clickable item'))
])
If the argument is a string, ``construct`` does nothing but to return
the argument unchanged (i.e. returns a string).
"""
if type(message) in [StringType, UnicodeType]:
return message
elif isinstance(message, ConsoleMessage):
return message
elif type(message) is TupleType:
type_ = None
attr = None
contents = None
try:
type_, attr, contents = message
except ValueError:
attr = None
try:
type_, contents = message
except ValueError:
type_, = message
if type_ is None:
raise ValueError("type must not be None")
if attr is None:
attr = {}
if type(attr) in [StringType, UnicodeType]:
attr = {'class': attr}
if contents is None:
contents = []
if type(contents) in [StringType, UnicodeType, TupleType]:
contents = [contents]
if type(contents) is not ListType:
raise TypeError("wrong contents type")
return ConsoleMessage(type_, attr, map(ConsoleMessage.construct, contents))
else:
raise TypeError("message type must be a tuple or a string")
def message(*args):
return ConsoleMessage.construct(args)
|
BorisJeremic/Real-ESSI-Examples
|
refs/heads/master
|
education_examples/_Chapter_Modeling_and_Simulation_Examples_Static_Examples/Contact_Normal_Interface_Behaviour_SoftContact_Nonlinear_Hardening_Softening_Shear_Model/plot.py
|
8
|
#!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
shear_strain_x = finput["/Model/Elements/Element_Outputs"][4,:]
shear_strain_y = finput["/Model/Elements/Element_Outputs"][5,:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
shear_stress_x = finput["/Model/Elements/Element_Outputs"][7,:]
shear_stress_y = finput["/Model/Elements/Element_Outputs"][8,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4)
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
plt.show()
# #####################################################################
|
dropzonemathmo/SocialMediaLinksRecommend
|
refs/heads/master
|
TwitterScraper/SecretExample.py
|
1
|
"""
SecretExample.py contains an example the secret login information for TwitterScraper
"""
CONSUMER_KEY = 'CONSUMER_KEY'
CONSUMER_SECRET = 'CONSUMER_SECRET'
ACCESS_TOKEN_KEY = 'ACCESS_TOKEN_KEY'
ACCESS_TOKEN_SECRET = 'ACCESS_TOKEN_SECRET'
|
coolstar/coreboot
|
refs/heads/master
|
util/exynos/fixed_cksum.py
|
12
|
#!/usr/bin/env python2
#
# Copyright (C) 2013 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
This utility computes and fills Exynos ROM checksum (for BL1 or BL2).
(Algorithm from U-Boot: tools/mkexynosspl.c)
Input: IN OUT DATA_SIZE
Output:
IN padded out to DATA_SIZE, checksum at the end, written to OUT.
"""
import struct
import sys
def main(argv):
if len(argv) != 4:
exit('usage: %s IN OUT DATA_SIZE' % argv[0])
in_name, out_name = argv[1:3]
size = int(argv[3], 0)
checksum_format = "<I"
with open(in_name, "rb") as in_file, open(out_name, "wb") as out_file:
data = in_file.read()
checksum_size = struct.calcsize(checksum_format)
data_size = size - checksum_size
assert len(data) <= data_size
checksum = struct.pack(checksum_format, sum(map(ord, data)))
out_file.write(data + bytearray(data_size - len(data)) + checksum)
if __name__ == '__main__':
main(sys.argv)
|
web30s/odoo-9.0c-20160402
|
refs/heads/master
|
hello/templates/openerp/addons/account/models/account_journal_dashboard.py
|
2
|
import json
from datetime import datetime, timedelta
from babel.dates import format_datetime, format_date
from openerp import models, api, _, fields
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.misc import formatLang
class account_journal(models.Model):
_inherit = "account.journal"
@api.one
def _kanban_dashboard(self):
self.kanban_dashboard = json.dumps(self.get_journal_dashboard_datas())
@api.one
def _kanban_dashboard_graph(self):
if (self.type in ['sale', 'purchase']):
self.kanban_dashboard_graph = json.dumps(self.get_bar_graph_datas())
elif (self.type in ['cash', 'bank']):
self.kanban_dashboard_graph = json.dumps(self.get_line_graph_datas())
kanban_dashboard = fields.Text(compute='_kanban_dashboard')
kanban_dashboard_graph = fields.Text(compute='_kanban_dashboard_graph')
show_on_dashboard = fields.Boolean(string='Show journal on dashboard', help="Whether this journal should be displayed on the dashboard or not", default=True)
@api.multi
def toggle_favorite(self):
self.write({'show_on_dashboard': False if self.show_on_dashboard else True})
return False
@api.multi
def get_line_graph_datas(self):
data = []
today = datetime.today()
last_month = today + timedelta(days=-30)
bank_stmt = []
# Query to optimize loading of data for bank statement graphs
# Return a list containing the latest bank statement balance per day for the
# last 30 days for current journal
query = """SELECT a.date, a.balance_end
FROM account_bank_statement AS a,
(SELECT c.date, max(c.id) AS stmt_id
FROM account_bank_statement AS c
WHERE c.journal_id = %s
AND c.date > %s
AND c.date <= %s
GROUP BY date, id
ORDER BY date, id) AS b
WHERE a.id = b.stmt_id;"""
self.env.cr.execute(query, (self.id, last_month, today))
bank_stmt = self.env.cr.dictfetchall()
last_bank_stmt = self.env['account.bank.statement'].search([('journal_id', 'in', self.ids),('date', '<=', last_month.strftime(DF))], order="date desc, id desc", limit=1)
start_balance = last_bank_stmt and last_bank_stmt[0].balance_end or 0
locale = self._context.get('lang', 'en_US')
show_date = last_month
#get date in locale format
name = format_date(show_date, 'd LLLL Y', locale=locale)
short_name = format_date(show_date, 'd MMM', locale=locale)
data.append({'x':short_name,'y':start_balance, 'name':name})
for stmt in bank_stmt:
#fill the gap between last data and the new one
number_day_to_add = (datetime.strptime(stmt.get('date'), DF) - show_date).days
last_balance = data[len(data) - 1]['y']
for day in range(0,number_day_to_add + 1):
show_date = show_date + timedelta(days=1)
#get date in locale format
name = format_date(show_date, 'd LLLL Y', locale=locale)
short_name = format_date(show_date, 'd MMM', locale=locale)
data.append({'x': short_name, 'y':last_balance, 'name': name})
#add new stmt value
data[len(data) - 1]['y'] = stmt.get('balance_end')
#continue the graph if the last statement isn't today
if show_date != today:
number_day_to_add = (today - show_date).days
last_balance = data[len(data) - 1]['y']
for day in range(0,number_day_to_add):
show_date = show_date + timedelta(days=1)
#get date in locale format
name = format_date(show_date, 'd LLLL Y', locale=locale)
short_name = format_date(show_date, 'd MMM', locale=locale)
data.append({'x': short_name, 'y':last_balance, 'name': name})
return [{'values': data, 'area': True}]
@api.multi
def get_bar_graph_datas(self):
data = []
today = datetime.strptime(fields.Date.context_today(self), DF)
data.append({'label': _('Past'), 'value':0.0, 'type': 'past'})
day_of_week = int(format_datetime(today, 'e', locale=self._context.get('lang', 'en_US')))
first_day_of_week = today + timedelta(days=-day_of_week+1)
for i in range(-1,4):
if i==0:
label = _('This Week')
elif i==3:
label = _('Future')
else:
start_week = first_day_of_week + timedelta(days=i*7)
end_week = start_week + timedelta(days=6)
if start_week.month == end_week.month:
label = str(start_week.day) + '-' +str(end_week.day)+ ' ' + format_date(end_week, 'MMM', locale=self._context.get('lang', 'en_US'))
else:
label = format_date(start_week, 'd MMM', locale=self._context.get('lang', 'en_US'))+'-'+format_date(end_week, 'd MMM', locale=self._context.get('lang', 'en_US'))
data.append({'label':label,'value':0.0, 'type': 'past' if i<0 else 'future'})
# Build SQL query to find amount aggregated by week
select_sql_clause = """SELECT sum(residual_company_signed) as total, min(date) as aggr_date from account_invoice where journal_id = %(journal_id)s and state = 'open'"""
query = ''
start_date = (first_day_of_week + timedelta(days=-7))
for i in range(0,6):
if i == 0:
query += "("+select_sql_clause+" and date < '"+start_date.strftime(DF)+"')"
elif i == 6:
query += " UNION ALL ("+select_sql_clause+" and date >= '"+start_date.strftime(DF)+"')"
else:
next_date = start_date + timedelta(days=7)
query += " UNION ALL ("+select_sql_clause+" and date >= '"+start_date.strftime(DF)+"' and date < '"+next_date.strftime(DF)+"')"
start_date = next_date
self.env.cr.execute(query, {'journal_id':self.id})
query_results = self.env.cr.dictfetchall()
for index in range(0, len(query_results)):
if query_results[index].get('aggr_date') != None:
data[index]['value'] = query_results[index].get('total')
return [{'values': data}]
@api.multi
def get_journal_dashboard_datas(self):
currency = self.currency_id or self.company_id.currency_id
number_to_reconcile = last_balance = account_sum = 0
ac_bnk_stmt = []
title = ''
number_draft = number_waiting = number_late = sum_draft = sum_waiting = sum_late = 0
if self.type in ['bank', 'cash']:
last_bank_stmt = self.env['account.bank.statement'].search([('journal_id', 'in', self.ids)], order="date desc, id desc", limit=1)
last_balance = last_bank_stmt and last_bank_stmt[0].balance_end or 0
ac_bnk_stmt = self.env['account.bank.statement'].search([('journal_id', 'in', self.ids),('state', '=', 'open')])
for ac_bnk in ac_bnk_stmt:
for line in ac_bnk.line_ids:
if not line.journal_entry_ids:
number_to_reconcile += 1
# optimization to read sum of balance from account_move_line
account_ids = tuple(filter(None, [self.default_debit_account_id.id, self.default_credit_account_id.id]))
if account_ids:
amount_field = 'balance' if not self.currency_id else 'amount_currency'
query = """SELECT sum(%s) FROM account_move_line WHERE account_id in %%s;""" % (amount_field,)
self.env.cr.execute(query, (account_ids,))
query_results = self.env.cr.dictfetchall()
if query_results and query_results[0].get('sum') != None:
account_sum = query_results[0].get('sum')
#TODO need to check if all invoices are in the same currency than the journal!!!!
elif self.type in ['sale', 'purchase']:
title = _('Bills to pay') if self.type == 'purchase' else _('Invoices owed to you')
# optimization to find total and sum of invoice that are in draft, open state
query = """SELECT state, amount_total, currency_id AS currency FROM account_invoice WHERE journal_id = %s AND state NOT IN ('paid', 'cancel');"""
self.env.cr.execute(query, (self.id,))
query_results = self.env.cr.dictfetchall()
today = datetime.today()
query = """SELECT amount_total, currency_id AS currency FROM account_invoice WHERE journal_id = %s AND date < %s AND state = 'open';"""
self.env.cr.execute(query, (self.id, today))
late_query_results = self.env.cr.dictfetchall()
sum_draft = 0.0
number_draft = 0
number_waiting = 0
for result in query_results:
cur = self.env['res.currency'].browse(result.get('currency'))
if result.get('state') in ['draft', 'proforma', 'proforma2']:
number_draft += 1
sum_draft += cur.compute(result.get('amount_total'), currency)
elif result.get('state') == 'open':
number_waiting += 1
sum_waiting += cur.compute(result.get('amount_total'), currency)
sum_late = 0.0
number_late = 0
for result in late_query_results:
cur = self.env['res.currency'].browse(result.get('currency'))
number_late += 1
sum_late += cur.compute(result.get('amount_total'), currency)
return {
'number_to_reconcile': number_to_reconcile,
'account_balance': formatLang(self.env, account_sum, currency_obj=self.currency_id or self.company_id.currency_id),
'last_balance': formatLang(self.env, last_balance, currency_obj=self.currency_id or self.company_id.currency_id),
'number_draft': number_draft,
'number_waiting': number_waiting,
'number_late': number_late,
'sum_draft': formatLang(self.env, sum_draft or 0.0, currency_obj=self.currency_id or self.company_id.currency_id),
'sum_waiting': formatLang(self.env, sum_waiting or 0.0, currency_obj=self.currency_id or self.company_id.currency_id),
'sum_late': formatLang(self.env, sum_late or 0.0, currency_obj=self.currency_id or self.company_id.currency_id),
'currency_id': self.currency_id and self.currency_id.id or self.company_id.currency_id.id,
'bank_statements_source': self.bank_statements_source,
'title': title,
}
@api.multi
def action_create_new(self):
ctx = self._context.copy()
model = 'account.invoice'
if self.type == 'sale':
ctx.update({'journal_type': self.type, 'default_type': 'out_invoice', 'type': 'out_invoice', 'default_journal_id': self.id})
if ctx.get('refund'):
ctx.update({'default_type':'out_refund', 'type':'out_refund'})
view_id = self.env.ref('account.invoice_form').id
elif self.type == 'purchase':
ctx.update({'journal_type': self.type, 'default_type': 'in_invoice', 'type': 'in_invoice', 'default_journal_id': self.id})
if ctx.get('refund'):
ctx.update({'default_type': 'in_refund', 'type': 'in_refund'})
view_id = self.env.ref('account.invoice_supplier_form').id
else:
ctx.update({'default_journal_id': self.id})
view_id = self.env.ref('account.view_move_form').id
model = 'account.move'
return {
'name': _('Create invoice/bill'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': model,
'view_id': view_id,
'context': ctx,
}
@api.multi
def create_cash_statement(self):
ctx = self._context.copy()
ctx.update({'journal_id': self.id, 'default_journal_id': self.id, 'default_journal_type': 'cash'})
return {
'name': _('Create cash statement'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement',
'context': ctx,
}
@api.multi
def action_open_reconcile(self):
if self.type in ['bank', 'cash']:
# Open reconciliation view for bank statements belonging to this journal
bank_stmt = self.env['account.bank.statement'].search([('journal_id', 'in', self.ids)])
return {
'type': 'ir.actions.client',
'tag': 'bank_statement_reconciliation_view',
'context': {'statement_ids': bank_stmt.ids},
}
else:
# Open reconciliation view for customers/suppliers
action_context = {'show_mode_selector': False}
if self.type == 'sale':
action_context.update({'mode': 'customers'})
elif self.type == 'purchase':
action_context.update({'mode': 'suppliers'})
return {
'type': 'ir.actions.client',
'tag': 'manual_reconciliation_view',
'context': action_context,
}
@api.multi
def open_action(self):
"""return action based on type for related journals"""
action_name = self._context.get('action_name', False)
if not action_name:
if self.type == 'bank':
action_name = 'action_bank_statement_tree'
elif self.type == 'cash':
action_name = 'action_view_bank_statement_tree'
elif self.type == 'sale':
action_name = 'action_invoice_tree1'
elif self.type == 'purchase':
action_name = 'action_invoice_tree2'
else:
action_name = 'action_move_journal_line'
_journal_invoice_type_map = {
'sale': 'out_invoice',
'purchase': 'in_invoice',
'bank': 'bank',
'cash': 'cash',
'general': 'general',
}
invoice_type = _journal_invoice_type_map[self.type]
ctx = self._context.copy()
ctx.update({
'journal_type': self.type,
'default_journal_id': self.id,
'search_default_journal_id': self.id,
'default_type': invoice_type,
'type': invoice_type
})
ir_model_obj = self.pool['ir.model.data']
model, action_id = ir_model_obj.get_object_reference(self._cr, self._uid, 'account', action_name)
action = self.pool[model].read(self._cr, self._uid, action_id, context=self._context)
action['context'] = ctx
action['domain'] = self._context.get('use_domain', [])
return action
@api.multi
def open_spend_money(self):
return self.open_payments_action('outbound')
@api.multi
def open_collect_money(self):
return self.open_payments_action('inbound')
@api.multi
def open_transfer_money(self):
return self.open_payments_action('transfer')
@api.multi
def open_payments_action(self, payment_type):
ctx = self._context.copy()
ctx.update({
'default_payment_type': payment_type,
'default_journal_id': self.id
})
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_account_payments')
if action_rec:
action = action_rec.read([])[0]
action['context'] = ctx
action['domain'] = [('journal_id','=',self.id),('payment_type','=',payment_type)]
return action
@api.multi
def open_action_with_context(self):
action_name = self.env.context.get('action_name', False)
if not action_name:
return False
ctx = dict(self.env.context, default_journal_id=self.id)
if ctx.get('search_default_journal', False):
ctx.update(search_default_journal_id=self.id)
ir_model_obj = self.pool['ir.model.data']
model, action_id = ir_model_obj.get_object_reference(self._cr, self._uid, 'account', action_name)
action = self.pool[model].read(self._cr, self._uid, action_id, context=self._context)
action['context'] = ctx
if ctx.get('use_domain', False):
action['domain'] = ['|', ('journal_id', '=', self.id), ('journal_id', '=', False)]
action['name'] += ' for journal '+self.name
return action
@api.multi
def create_bank_statement(self):
"""return action to create a bank statements. This button should be called only on journals with type =='bank'"""
self.bank_statements_source = 'manual'
action = self.env.ref('account.action_bank_statement_tree').read()[0]
action.update({
'views': [[False, 'form']],
'context': "{'default_journal_id': " + str(self.id) + "}",
})
return action
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.