repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
anisku11/sublimeku
|
refs/heads/master
|
Packages/CodeComplice/libs/chardet/mbcharsetprober.py
|
8
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .constants import eStart, eError, eItsMe
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mDistributionAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
DougFirErickson/qgisSpaceSyntaxToolkit
|
refs/heads/master
|
esstoolkit/external/networkx/algorithms/isomorphism/matchhelpers.py
|
51
|
"""Functions which help end users define customize node_match and
edge_match functions to use during isomorphism checks.
"""
from itertools import permutations
import types
import networkx as nx
__all__ = ['categorical_node_match',
'categorical_edge_match',
'categorical_multiedge_match',
'numerical_node_match',
'numerical_edge_match',
'numerical_multiedge_match',
'generic_node_match',
'generic_edge_match',
'generic_multiedge_match',
]
def copyfunc(f, name=None):
"""Returns a deepcopy of a function."""
try:
# Python <3
return types.FunctionType(f.func_code, f.func_globals,
name or f.__name__, f.func_defaults,
f.func_closure)
except AttributeError:
# Python >=3
return types.FunctionType(f.__code__, f.__globals__,
name or f.__name__, f.__defaults__,
f.__closure__)
def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close, elementwise.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
for xi, yi in zip(x,y):
if not ( abs(xi-yi) <= atol + rtol * abs(yi) ):
return False
return True
def close(x, y, rtol=1.0000000000000001e-05, atol=1e-08):
"""Returns True if x and y are sufficiently close.
Parameters
----------
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
"""
# assume finite weights, see numpy.allclose() for reference
return abs(x-y) <= atol + rtol * abs(y)
categorical_doc = """
Returns a comparison function for a categorical node attribute.
The value(s) of the attr(s) must be hashable and comparable via the ==
operator since they are placed into a set([]) object. If the sets from
G1 and G2 are the same, then the constructed function returns True.
Parameters
----------
attr : string | list
The categorical node attribute to compare, or a list of categorical
node attributes to compare.
default : value | list
The default value for the categorical node attribute, or a list of
default values for the categorical node attributes.
Returns
-------
match : function
The customized, categorical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.categorical_node_match('size', 1)
>>> nm = iso.categorical_node_match(['color', 'size'], ['red', 2])
"""
def categorical_node_match(attr, default):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = set([data1.get(attr, d) for attr, d in attrs])
values2 = set([data2.get(attr, d) for attr, d in attrs])
return values1 == values2
return match
try:
categorical_edge_match = copyfunc(categorical_node_match, 'categorical_edge_match')
except NotImplementedError:
# IronPython lacks support for types.FunctionType.
# https://github.com/networkx/networkx/issues/949
# https://github.com/networkx/networkx/issues/1127
def categorical_edge_match(*args, **kwargs):
return categorical_node_match(*args, **kwargs)
def categorical_multiedge_match(attr, default):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = set([data.get(attr, default) for data in datasets1.values()])
values2 = set([data.get(attr, default) for data in datasets2.values()])
return values1 == values2
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = set([])
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.add(x)
values2 = set([])
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.add(x)
return values1 == values2
return match
# Docstrings for categorical functions.
categorical_node_match.__doc__ = categorical_doc
categorical_edge_match.__doc__ = categorical_doc.replace('node', 'edge')
tmpdoc = categorical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('categorical_edge_match', 'categorical_multiedge_match')
categorical_multiedge_match.__doc__ = tmpdoc
numerical_doc = """
Returns a comparison function for a numerical node attribute.
The value(s) of the attr(s) must be numerical and sortable. If the
sorted list of values from G1 and G2 are the same within some
tolerance, then the constructed function returns True.
Parameters
----------
attr : string | list
The numerical node attribute to compare, or a list of numerical
node attributes to compare.
default : value | list
The default value for the numerical node attribute, or a list of
default values for the numerical node attributes.
rtol : float
The relative error tolerance.
atol : float
The absolute error tolerance.
Returns
-------
match : function
The customized, numerical `node_match` function.
Examples
--------
>>> import networkx.algorithms.isomorphism as iso
>>> nm = iso.numerical_node_match('weight', 1.0)
>>> nm = iso.numerical_node_match(['weight', 'linewidth'], [.25, .5])
"""
def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return close(data1.get(attr, default),
data2.get(attr, default),
rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
values1 = [data1.get(attr, d) for attr, d in attrs]
values2 = [data2.get(attr, d) for attr, d in attrs]
return allclose(values1, values2, rtol=rtol, atol=atol)
return match
try:
numerical_edge_match = copyfunc(numerical_node_match, 'numerical_edge_match')
except NotImplementedError:
# IronPython lacks support for types.FunctionType.
# https://github.com/networkx/networkx/issues/949
# https://github.com/networkx/networkx/issues/1127
def numerical_edge_match(*args, **kwargs):
return numerical_node_match(*args, **kwargs)
def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08):
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = sorted([data.get(attr, default) for data in datasets1.values()])
values2 = sorted([data.get(attr, default) for data in datasets2.values()])
return allclose(values1, values2, rtol=rtol, atol=atol)
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
values1.sort()
values2.sort()
for xi, yi in zip(values1, values2):
if not allclose(xi, yi, rtol=rtol, atol=atol):
return False
else:
return True
return match
# Docstrings for numerical functions.
numerical_node_match.__doc__ = numerical_doc
numerical_edge_match.__doc__ = numerical_doc.replace('node', 'edge')
tmpdoc = numerical_doc.replace('node', 'edge')
tmpdoc = tmpdoc.replace('numerical_edge_match', 'numerical_multiedge_match')
numerical_multiedge_match.__doc__ = tmpdoc
generic_doc = """
Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True.
Parameters
----------
attr : string | list
The node attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the node attribute, or a list of
default values for the node attributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `node_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'], [1.0, 'red'], [close, eq])
"""
def generic_node_match(attr, default, op):
if nx.utils.is_string_like(attr):
def match(data1, data2):
return op(data1.get(attr, default), data2.get(attr, default))
else:
attrs = list(zip(attr, default, op)) # Python 3
def match(data1, data2):
for attr, d, operator in attrs:
if not operator(data1.get(attr, d), data2.get(attr, d)):
return False
else:
return True
return match
try:
generic_edge_match = copyfunc(generic_node_match, 'generic_edge_match')
except NotImplementedError:
# IronPython lacks support for types.FunctionType.
# https://github.com/networkx/networkx/issues/949
# https://github.com/networkx/networkx/issues/1127
def generic_edge_match(*args, **kwargs):
return generic_node_match(*args, **kwargs)
def generic_multiedge_match(attr, default, op):
"""Returns a comparison function for a generic attribute.
The value(s) of the attr(s) are compared using the specified
operators. If all the attributes are equal, then the constructed
function returns True. Potentially, the constructed edge_match
function can be slow since it must verify that no isomorphism
exists between the multiedges before it returns False.
Parameters
----------
attr : string | list
The edge attribute to compare, or a list of node attributes
to compare.
default : value | list
The default value for the edge attribute, or a list of
default values for the dgeattributes.
op : callable | list
The operator to use when comparing attribute values, or a list
of operators to use when comparing values for each attribute.
Returns
-------
match : function
The customized, generic `edge_match` function.
Examples
--------
>>> from operator import eq
>>> from networkx.algorithms.isomorphism.matchhelpers import close
>>> from networkx.algorithms.isomorphism import generic_node_match
>>> nm = generic_node_match('weight', 1.0, close)
>>> nm = generic_node_match('color', 'red', eq)
>>> nm = generic_node_match(['weight', 'color'],
... [1.0, 'red'],
... [close, eq])
...
"""
# This is slow, but generic.
# We must test every possible isomorphism between the edges.
if nx.utils.is_string_like(attr):
def match(datasets1, datasets2):
values1 = [data.get(attr, default) for data in datasets1.values()]
values2 = [data.get(attr, default) for data in datasets2.values()]
for vals2 in permutations(values2):
for xi, yi in zip(values1, vals2):
if not op(xi, yi):
# This is not an isomorphism, go to next permutation.
break
else:
# Then we found an isomorphism.
return True
else:
# Then there are no isomorphisms between the multiedges.
return False
else:
attrs = list(zip(attr, default)) # Python 3
def match(datasets1, datasets2):
values1 = []
for data1 in datasets1.values():
x = tuple( data1.get(attr, d) for attr, d in attrs )
values1.append(x)
values2 = []
for data2 in datasets2.values():
x = tuple( data2.get(attr, d) for attr, d in attrs )
values2.append(x)
for vals2 in permutations(values2):
for xi, yi, operator in zip(values1, vals2, op):
if not operator(xi, yi):
return False
else:
return True
return match
# Docstrings for numerical functions.
generic_node_match.__doc__ = generic_doc
generic_edge_match.__doc__ = generic_doc.replace('node', 'edge')
|
rananda/cfme_tests
|
refs/heads/master
|
utils/ftp.py
|
15
|
# -*- coding: utf-8 -*-
""" FTP manipulation library
@author: Milan Falešník <mfalesni@redhat.com>
"""
import fauxfactory
import ftplib
import re
from datetime import datetime
from time import strptime, mktime
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class FTPException(Exception):
pass
class FTPDirectory(object):
""" FTP FS Directory encapsulation
This class represents one directory.
Contains pointers to all child directories (self.directories)
and also all files in current directory (self.files)
"""
def __init__(self, client, name, items, parent_dir=None, time=None):
""" Constructor
Args:
client: ftplib.FTP instance
name: Name of this directory
items: Content of this directory
parent_dir: Pointer to a parent directory to maintain hierarchy. None if root
time: Time of this object
"""
self.client = client
self.parent_dir = parent_dir
self.time = time
self.name = name
self.files = []
self.directories = []
for item in items:
if isinstance(item, dict): # Is a directory
self.directories.append(FTPDirectory(self.client,
item["dir"],
item["content"],
parent_dir=self,
time=item["time"]))
else:
self.files.append(FTPFile(self.client, item[0], self, item[1]))
@property
def path(self):
"""
Returns:
whole path for this directory
"""
if self.parent_dir:
return self.parent_dir.path + self.name + "/"
else:
return self.name
def __repr__(self):
return "<FTPDirectory {}>".format(self.path)
def cd(self, path):
""" Change to a directory
Changes directory to a path specified by parameter path. There are three special cases:
/ - climbs by self.parent_dir up in the hierarchy until it reaches root element.
. - does nothing
.. - climbs one level up in hierarchy, if present, otherwise does the same as preceeding.
Args:
path: Path to change
"""
if path == ".":
return self
elif path == "..":
result = self
if result.parent_dir:
result = result.parent_dir
return result
elif path == "/":
result = self
while result.parent_dir:
result = result.parent_dir
return result
enter = path.strip("/").split("/", 1)
remainder = None
if len(enter) == 2:
enter, remainder = enter
for item in self.directories:
if item.name == enter:
if remainder:
return item.cd("/".join(remainder))
else:
return item
raise FTPException("Directory {}{} does not exist!".format(self.path, enter))
def search(self, by, files=True, directories=True):
""" Recursive search by string or regexp.
Searches throughout all the filesystem structure from top till the bottom until
it finds required files or dirctories.
You can specify either plain string or regexp. String search does classic ``in``,
regexp matching is done by exact matching (by.match).
Args:
by: Search string or regexp
files: Whether look for files
directories: Whether look for directories
Returns:
List of all objects found in FS
"""
def _scan(what, in_what):
if isinstance(what, re._pattern_type):
return what.match(in_what) is not None
else:
return what in in_what
results = []
if files:
for f in self.files:
if _scan(by, f.name):
results.append(f)
for d in self.directories:
if directories:
if _scan(by, d.name):
results.append(d)
results.extend(d.search(by, files=files, directories=directories))
return results
class FTPFile(object):
""" FTP FS File encapsulation
This class represents one file in the FS hierarchy.
It encapsulates mainly its position in FS and adds the possibility
of downloading the file.
"""
def __init__(self, client, name, parent_dir, time):
""" Constructor
Args:
client: ftplib.FTP instance
name: File name (without path)
parent_dir: Directory in which this file is
"""
self.client = client
self.parent_dir = parent_dir
self.name = name
self.time = time
@property
def path(self):
"""
Returns:
whole path for this file
"""
if self.parent_dir:
return self.parent_dir.path + self.name
else:
return self.name
@property
def local_time(self):
"""
Returns:
time modified to match local computer's time zone
"""
return self.client.dt + self.time
def __repr__(self):
return "<FTPFile {}>".format(self.path)
def retr(self, callback):
""" Retrieve file
Wrapper around ftplib.FTP.retrbinary().
This function cd's to the directory where this file is present, then calls the
FTP's retrbinary() function with provided callable and then cd's back where it started
to keep it consistent.
Args:
callback: Any callable that accepts one parameter as the data
Raises:
AssertionError: When any of the CWD or CDUP commands fail.
ftplib.error_perm: When retrbinary call of ftplib fails
"""
dirs, f = self.path.rsplit("/", 1)
dirs = dirs.lstrip("/").split("/")
# Dive in
for d in dirs:
assert self.client.cwd(d), "Could not change into the directory {}!".format(d)
self.client.retrbinary(f, callback)
# Dive out
for d in dirs:
assert self.client.cdup(), "Could not get out of directory {}!".format(d)
def download(self, target=None):
""" Download file into this machine
Wrapper around self.retr function. It downloads the file from remote filesystem
into local filesystem. Name is either preserved original, or can be changed.
Args:
target: Target file name (None to preserver the original)
"""
if target is None:
target = self.name
with open(target, "wb") as output:
self.retr(output.write)
class FTPClient(object):
""" FTP Client encapsulation
This class provides basic encapsulation around ftplib's FTP class.
It wraps some methods and allows to easily delete whole directory or walk
through the directory tree.
Usage:
>>> from utils.ftp import FTPClient
>>> ftp = FTPClient("host", "user", "password")
>>> only_files_with_EVM_in_name = ftp.filesystem.search("EVM", directories=False)
>>> only_files_by_regexp = ftp.filesystem.search(re.compile("regexp"), directories=False)
>>> some_directory = ftp.filesystem.cd("a/b/c") # cd's to this directory
>>> root = some_directory.cd("/")
Always going through filesystem property is a bit slow as it parses the structure on each use.
If you are sure that the structure will remain intact between uses, you can do as follows
to save the time::
>>> fs = ftp.filesystem
Let's download some files::
>>> for f in ftp.filesystem.search("IMPORTANT_FILE", directories=False):
... f.download() # To pickup its original name
... f.download("custom_name")
We finished the testing, so we don't need the content of the directory::
>>> ftp.recursively_delete()
And it's gone.
"""
def __init__(self, host, login, password, upload_dir="/"):
""" Constructor
Args:
host: FTP server host
login: FTP login
password: FTP password
"""
self.host = host
self.login = login
self.password = password
self.ftp = None
self.dt = None
self.upload_dir = upload_dir
self.connect()
self.update_time_difference()
def connect(self):
self.ftp = ftplib.FTP(self.host)
self.ftp.login(self.login, self.password)
def update_time_difference(self):
""" Determine the time difference between the FTP server and this computer.
This is done by uploading a fake file, reading its time and deleting it.
Then the self.dt variable captures the time you need to ADD to the remote
time or SUBTRACT from local time.
The FTPFile object carries this automatically as it has .local_time property
which adds the client's .dt to its time.
"""
TIMECHECK_FILE_NAME = fauxfactory.gen_alphanumeric(length=16)
void_file = StringIO(fauxfactory.gen_alpha())
self.cwd(self.upload_dir)
assert "Transfer complete" in self.storbinary(TIMECHECK_FILE_NAME, void_file),\
"Could not upload a file for time checking with name {}!".format(TIMECHECK_FILE_NAME)
void_file.close()
now = datetime.now()
for d, name, time in self.ls():
if name == TIMECHECK_FILE_NAME:
self.dt = now - time
self.dele(TIMECHECK_FILE_NAME)
self.cwd("/")
return True
raise FTPException("The timecheck file was not found in the current FTP directory")
def ls(self):
""" Lists the content of a directory.
Returns:
List of all items in current directory
Return format is [(is_dir?, "name", remote_time), ...]
"""
result = []
def _callback(line):
is_dir = line.upper().startswith("D")
# Max 8, then the final is file which can contain something blank
fields = re.split(r"\s+", line, maxsplit=8)
# This is because how informations in LIST are presented
# Nov 11 12:34 filename (from the end)
date = strptime(str(datetime.now().year) + " " + fields[-4] + " " + fields[-3] + " " +
fields[-2],
"%Y %b %d %H:%M")
# convert time.struct_time into datetime
date = datetime.fromtimestamp(mktime(date))
result.append((is_dir, fields[-1], date))
self.ftp.dir(_callback)
return result
def pwd(self):
""" Get current directory
Returns:
Current directory
Raises:
AssertionError: PWD command fails
"""
result = self.ftp.sendcmd("PWD")
assert "is the current directory" in result, "PWD command failed"
x, d, y = result.strip().split("\"")
return d.strip()
def cdup(self):
""" Goes one level up in directory hierarchy (cd ..)
"""
return self.ftp.sendcmd("CDUP")
def mkd(self, d):
""" Create a directory
Args:
d: Directory name
Returns:
Success of the action
"""
try:
return self.ftp.sendcmd("MKD {}".format(d)).startswith("250")
except ftplib.error_perm:
return False
def rmd(self, d):
""" Remove a directory
Args:
d: Directory name
Returns:
Success of the action
"""
try:
return self.ftp.sendcmd("RMD {}".format(d)).startswith("250")
except ftplib.error_perm:
return False
def dele(self, f):
""" Remove a file
Args:
f: File name
Returns:
Success of the action
"""
try:
return self.ftp.sendcmd("DELE {}".format(f)).startswith("250")
except ftplib.error_perm:
return False
def cwd(self, d):
""" Enter a directory
Args:
d: Directory name
Returns:
Success of the action
"""
try:
return self.ftp.sendcmd("CWD {}".format(d)).startswith("250")
except ftplib.error_perm:
return False
def close(self):
""" Finish work and close connection
"""
self.ftp.quit()
self.ftp.close()
self.ftp = None
def retrbinary(self, f, callback):
""" Download file
You need to specify the callback function, which accepts one parameter
(data), to be processed.
Args:
f: Requested file name
callback: Callable with one parameter accepting the data
"""
return self.ftp.retrbinary("RETR {}".format(f), callback)
def storbinary(self, f, file_obj):
""" Store file
You need to specify the file object.
Args:
f: Requested file name
file_obj: File object to be stored
"""
return self.ftp.storbinary("STOR {}".format(f), file_obj)
def recursively_delete(self, d=None):
""" Recursively deletes content of pwd
WARNING: Destructive!
Args:
d: Directory to enter (None for not entering - root directory)
d: str or None
Raises:
AssertionError: When some of the FTP commands fail.
"""
# Enter the directory
if d:
assert self.cwd(d), "Could not enter directory {}".format(d)
# Work in it
for isdir, name, time in self.ls():
if isdir:
self.recursively_delete(name)
else:
assert self.dele(name), "Could not delete {}!".format(name)
# Go out of it
if d:
# Go to parent directory
assert self.cdup(), "Could not go to parent directory of {}!".format(d)
# And delete it
assert self.rmd(d), "Could not remove directory {}!".format(d)
def tree(self, d=None):
""" Walks the tree recursively and creates a tree
Base structure is a list. List contains directory content and the type decides whether
it's a directory or a file:
- tuple: it's a file, therefore it represents file's name and time
- dict: it's a directory. Then the dict structure is as follows::
dir: directory name
content: list of directory content (recurse)
Args:
d: Directory to enter(None for no entering - root directory)
Returns:
Directory structure in lists nad dicts.
Raises:
AssertionError: When some of the FTP commands fail.
"""
# Enter the directory
items = []
if d:
assert self.cwd(d), "Could not enter directory {}".format(d)
# Work in it
for isdir, name, time in self.ls():
if isdir:
items.append({"dir": name, "content": self.tree(name), "time": time})
else:
items.append((name, time))
# Go out of it
if d:
# Go to parent directory
assert self.cdup(), "Could not go to parent directory of {}!".format(d)
return items
@property
def filesystem(self):
""" Returns the object structure of the filesystem
Returns:
Root directory
"""
return FTPDirectory(self, "/", self.tree())
# Context management methods
def __enter__(self):
""" Entering the context does nothing, because the client is already connected
"""
return self
def __exit__(self, type, value, traceback):
""" Exiting the context means just calling .close() on the client.
"""
self.close()
|
MrLoick/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/gdata/blogger/data.py
|
136
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Blogger API."""
__author__ = 'j.s@google.com (Jeff Scudder)'
import re
import atom.core
import gdata.data
LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
THR_TEMPLATE = '{http://purl.org/syndication/thread/1.0}%s'
BLOG_NAME_PATTERN = re.compile('(http://)(\w*)')
BLOG_ID_PATTERN = re.compile('(tag:blogger.com,1999:blog-)(\w*)')
BLOG_ID2_PATTERN = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)')
POST_ID_PATTERN = re.compile(
'(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)')
COMMENT_ID_PATTERN = re.compile('.*-(\w*)$')
class BloggerEntry(gdata.data.GDEntry):
"""Adds convenience methods inherited by all Blogger entries."""
def get_blog_id(self):
"""Extracts the Blogger id of this blog.
This method is useful when contructing URLs by hand. The blog id is
often used in blogger operation URLs. This should not be confused with
the id member of a BloggerBlog. The id element is the Atom id XML element.
The blog id which this method returns is a part of the Atom id.
Returns:
The blog's unique id as a string.
"""
if self.id.text:
match = BLOG_ID_PATTERN.match(self.id.text)
if match:
return match.group(2)
else:
return BLOG_ID2_PATTERN.match(self.id.text).group(2)
return None
GetBlogId = get_blog_id
def get_blog_name(self):
"""Finds the name of this blog as used in the 'alternate' URL.
An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
entry representing the above example, this method would return 'blogName'.
Returns:
The blog's URL name component as a string.
"""
for link in self.link:
if link.rel == 'alternate':
return BLOG_NAME_PATTERN.match(link.href).group(2)
return None
GetBlogName = get_blog_name
class Blog(BloggerEntry):
"""Represents a blog which belongs to the user."""
class BlogFeed(gdata.data.GDFeed):
entry = [Blog]
class BlogPost(BloggerEntry):
"""Represents a single post on a blog."""
def add_label(self, label):
"""Adds a label to the blog post.
The label is represented by an Atom category element, so this method
is shorthand for appending a new atom.Category object.
Args:
label: str
"""
self.category.append(atom.data.Category(scheme=LABEL_SCHEME, term=label))
AddLabel = add_label
def get_post_id(self):
"""Extracts the postID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return POST_ID_PATTERN.match(self.id.text).group(4)
return None
GetPostId = get_post_id
class BlogPostFeed(gdata.data.GDFeed):
entry = [BlogPost]
class InReplyTo(atom.core.XmlElement):
_qname = THR_TEMPLATE % 'in-reply-to'
href = 'href'
ref = 'ref'
source = 'source'
type = 'type'
class Comment(BloggerEntry):
"""Blog post comment entry in a feed listing comments on a post or blog."""
in_reply_to = InReplyTo
def get_comment_id(self):
"""Extracts the commentID string from the entry's Atom id.
Returns: A string of digits which identify this post within the blog.
"""
if self.id.text:
return COMMENT_ID_PATTERN.match(self.id.text).group(1)
return None
GetCommentId = get_comment_id
class CommentFeed(gdata.data.GDFeed):
entry = [Comment]
|
40223148/finaltest
|
refs/heads/master
|
static/Brython3.1.0-20150301-090019/Lib/unittest/result.py
|
727
|
"""Test result object"""
import io
import sys
import traceback
from . import util
from functools import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(object):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_testRunEntered = False
_moduleSetUpFailed = False
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def printErrors(self):
"Called by TestRunner after test run"
#fixme brython
pass
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
def _setupStdout(self):
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = io.StringIO()
self._stdout_buffer = io.StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
self._restoreStdout()
self._mirrorOutput = False
def _restoreStdout(self):
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return len(self.failures) == len(self.errors) == 0
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
#fix me brython
#return '__unittest' in tb.tb_frame.f_globals
return True #for now, lets just return False
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return ("<%s run=%i errors=%i failures=%i>" %
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures)))
|
marqueedev/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_yesno.py
|
430
|
from django.template.defaultfilters import yesno
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_true(self):
self.assertEqual(yesno(True), 'yes')
def test_false(self):
self.assertEqual(yesno(False), 'no')
def test_none(self):
self.assertEqual(yesno(None), 'maybe')
def test_true_arguments(self):
self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'), 'certainly')
def test_false_arguments(self):
self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'), 'get out of town')
def test_none_two_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town'), 'get out of town')
def test_none_three_arguments(self):
self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'), 'perhaps')
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/contrib/postgres/operations.py
|
111
|
from django.contrib.postgres.signals import register_hstore_handler
from django.db.migrations.operations.base import Operation
class CreateExtension(Operation):
reversible = True
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("CREATE EXTENSION IF NOT EXISTS %s" % self.name)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
schema_editor.execute("DROP EXTENSION %s" % self.name)
def describe(self):
return "Creates extension %s" % self.name
class HStoreExtension(CreateExtension):
def __init__(self):
self.name = 'hstore'
def database_forwards(self, app_label, schema_editor, from_state, to_state):
super(HStoreExtension, self).database_forwards(app_label, schema_editor, from_state, to_state)
# Register hstore straight away as it cannot be done before the
# extension is installed, a subsequent data migration would use the
# same connection
register_hstore_handler(schema_editor.connection)
class UnaccentExtension(CreateExtension):
def __init__(self):
self.name = 'unaccent'
|
binary-signal/mass-apk-installer
|
refs/heads/master
|
mass_apk/ziptools.py
|
1
|
"""Compression related functions."""
import os
from zipfile import ZipFile, ZIP_DEFLATED, is_zipfile
from typing import Optional, Union
from pathlib import Path
__all__ = ["unzipify", "zipify"]
def zipify(src_path: Union[str, os.PathLike], dest_path: Union[str, os.PathLike]):
"""Compress a folder into a zip archive."""
with ZipFile(dest_path, "w", ZIP_DEFLATED) as zip_file:
def dir_to_zip(path, out_file: ZipFile = zip_file):
abs_src = os.path.abspath(path)
if os.path.isdir(abs_src):
apks = [
item
for item in os.listdir(abs_src)
if os.path.isfile(item) and item.endswith(".apk")
]
for apk in apks:
# don't preserver folder structure inside zip file
abs_path = Path(os.path.join(abs_src, apk))
apk_name = abs_path.parts[-1]
out_file.write(os.path.join(path, apk), apk_name)
dir_to_zip(src_path, zip_file)
def unzipify(
file: Union[str, os.PathLike],
dest_dir: Optional[Union[str, os.PathLike]] = None,
):
"""Decompress zip file into `dest_dir` path.
If `dest_dir` is None use current working directory as `dest_dir`
to extract data .
raises ValueError if `file` arg is not a zip file.
"""
# create output directory if doesn't exist
if dest_dir is None:
dest_dir = os.getcwd()
if not is_zipfile(file):
raise ValueError(f"Not a zip file {file}")
os.makedirs(dest_dir)
with ZipFile(file, "r") as file:
file.extractall(dest_dir)
|
fpsluozi/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/drtv.py
|
112
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
)
class DRTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
_TEST = {
'url': 'https://www.dr.dk/tv/se/boern/ultra/panisk-paske/panisk-paske-5',
'md5': 'dc515a9ab50577fa14cc4e4b0265168f',
'info_dict': {
'id': 'panisk-paske-5',
'ext': 'mp4',
'title': 'Panisk Påske (5)',
'description': 'md5:ca14173c5ab24cd26b0fcc074dff391c',
'timestamp': 1426984612,
'upload_date': '20150322',
'duration': 1455,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % video_id, expected=True)
video_id = self._search_regex(
r'data-(?:material-identifier|episode-slug)="([^"]+)"',
webpage, 'video id')
programcard = self._download_json(
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id,
video_id, 'Downloading video JSON')
data = programcard['Data'][0]
title = data['Title']
description = data['Description']
timestamp = parse_iso8601(data['CreatedTime'])
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for asset in data['Assets']:
if asset['Kind'] == 'Image':
thumbnail = asset['Uri']
elif asset['Kind'] == 'VideoResource':
duration = asset['DurationInMilliseconds'] / 1000.0
restricted_to_denmark = asset['RestrictedToDenmark']
spoken_subtitles = asset['Target'] == 'SpokenSubtitles'
for link in asset['Links']:
uri = link['Uri']
target = link['Target']
format_id = target
preference = None
if spoken_subtitles:
preference = -1
format_id += '-spoken-subtitles'
if target == 'HDS':
formats.extend(self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id))
elif target == 'HLS':
formats.extend(self._extract_m3u8_formats(
uri, video_id, 'mp4', preference=preference,
m3u8_id=format_id))
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': bitrate,
'ext': link.get('FileFormat'),
})
subtitles_list = asset.get('SubtitlesList')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'dk',
}
for subs in subtitles_list:
lang = subs['Language']
subtitles[LANGS.get(lang, lang)] = [{'url': subs['Uri'], 'ext': 'vtt'}]
if not formats and restricted_to_denmark:
raise ExtractorError(
'Unfortunately, DR is not allowed to show this program outside Denmark.', expected=True)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
legionus/billing
|
refs/heads/master
|
tests/test_database.py
|
1
|
import unithelper
import uuid
import time
import random
import unittest2 as unittest
from bc import database
class Test(unithelper.DBTestCase):
def setUp(self):
with database.DBConnect() as db:
test_base_dropper = """
DROP TABLE IF EXISTS new_table;
"""
test_base_creator="""
CREATE TABLE new_table (
uuid varchar(36) NOT NULL PRIMARY KEY,
big bigint NOT NULL,
time int NOT NULL
);
"""
db.execute(test_base_dropper)
db.execute(test_base_creator)
@unittest.skipUnless(unithelper.haveDatabase(), True)
def test_insert(self):
"""insert test"""
with database.DBConnect() as db:
dictionary = {
'uuid': str(uuid.uuid4()),
'big': 2**32,
'time': int(time.time())
}
db.insert('new_table', dictionary)
c = db.query("SELECT * FROM new_table WHERE uuid='{0}';".format(dictionary['uuid']))
self.assertEqual(dictionary, c.one())
@unittest.skipUnless(unithelper.haveDatabase(), True)
def test_insert_many(self):
"""insert of many objects test"""
with database.DBConnect() as db:
l = [
{ 'uuid': str(uuid.uuid4()), 'big': 2, 'time': int(time.time()) },
{ 'uuid': str(uuid.uuid4()), 'big': 4, 'time': int(time.time()) },
{ 'uuid': str(uuid.uuid4()), 'big': 8, 'time': int(time.time()) },
]
c = db.insert('new_table', l, returning={})
self.assertEqual(l, c.all())
def test_select_nowait(self):
"""test nowait scipping lock"""
o = {
'uuid': str(uuid.uuid4()),
'big': 2**32,
'time': int(time.time())
}
with database.DBConnect() as db:
db.insert('new_table', o)
with database.DBConnect(autocommit=False) as db1:
db1.find('new_table', {'uuid':o['uuid']}, lock='update')
with database.DBConnect() as db2:
with self.assertRaises(database.OperationalError):
db2.find('new_table', {'uuid':o['uuid']}, lock='update', nowait=True)
def test_insert_return(self):
"""insert with return test"""
with database.DBConnect() as db:
o = {
'uuid': str(uuid.uuid4()),
'big': 2**32,
'time': int(time.time())
}
c = db.insert('new_table', o, returning={})
self.assertEqual(o, c.one())
@unittest.skipUnless(unithelper.haveDatabase(), True)
def test_insert_autocommit_false(self):
"""transaction insert test"""
with database.DBConnect(autocommit=False) as db:
data = []
for i in range(random.randint(5,10)):
dictionary = {
'uuid': str(uuid.uuid4()),
'big': 2**32,
'time': int(time.time())
}
db.insert('new_table', dictionary)
data.append(dictionary)
with database.DBConnect() as db1:
#Must return empty set, because not commited yet
self.assertEqual(
set(),
set(list(db1.query("SELECT * FROM new_table;").all()))
)
get_id = lambda x:x['uuid']
#Must return all inserted data, because in transaction
self.assertEqual(
set(map(get_id, data)),
set(map(get_id, db.query("SELECT * FROM new_table;").all()))
)
db.commit()
with database.DBConnect() as db2:
#Must return all inserted data, because transaction was commited
self.assertEqual(
set(map(get_id, data)),
set(map(get_id, db2.query("SELECT * FROM new_table;").all()))
)
@unittest.skipUnless(unithelper.haveDatabase(), True)
def test_update(self):
"""update test"""
with database.DBConnect() as db:
dictionary = {
'uuid': str(uuid.uuid4()),
'big': 2**32,
'time': int(time.time())
}
db.insert('new_table', dictionary)
dictionary['big'] = 2**30
dictionary['time'] = int(time.time())
dset = dictionary.copy()
dsearch = {'uuid':dset['uuid']}
del(dset['uuid'])
db.update('new_table', dsearch, dset)
c = db.query("SELECT * FROM new_table WHERE uuid='{0}';".format(dictionary['uuid']))
self.assertEqual(dictionary, c.one())
@unittest.skipUnless(unithelper.haveDatabase(), True)
def test_update_return(self):
"""update with return test"""
with database.DBConnect() as db:
uid = str(uuid.uuid4())
ts = int(time.time())
db.insert('new_table', { 'uuid':uid, 'big':2**32, 'time':ts })
c = db.update('new_table', { 'uuid': uid }, { 'big': 2**30 }, returning={})
self.assertEqual(c.all(), [{ 'uuid':uid, 'big':2**30, 'time':ts }])
|
jodal/comics
|
refs/heads/main
|
comics/comics/nemibt.py
|
1
|
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Nemi (bt.no)"
language = "no"
url = "https://www.bt.no/kultur/tegneserier/"
start_date = "1997-01-01"
rights = "Lise Myhre"
class Crawler(CrawlerBase):
history_capable_date = "2008-07-01"
schedule = "Mo,Tu,We,Th,Fr,Sa"
time_zone = "Europe/Oslo"
def crawl(self, pub_date):
url = "https://cartoon-prod.schibsted.tech/nemi/{}.gif".format(
pub_date.strftime("%d%m%y"),
)
return CrawlerImage(url)
|
Orav/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_userlist.py
|
2
|
# Check every path through every method of UserList
from collections import UserList
from test import support, list_tests
class UserListTest(list_tests.CommonTest):
type2test = UserList
def test_getslice(self):
super().test_getslice()
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
for i in range(-3, 6):
self.assertEqual(u[:i], l[:i])
self.assertEqual(u[i:], l[i:])
for j in range(-3, 6):
self.assertEqual(u[i:j], l[i:j])
def test_add_specials(self):
u = UserList("spam")
u2 = u + "eggs"
self.assertEqual(u2, list("spameggs"))
def test_radd_specials(self):
u = UserList("eggs")
u2 = "spam" + u
self.assertEqual(u2, list("spameggs"))
u2 = u.__radd__(UserList("spam"))
self.assertEqual(u2, list("spameggs"))
def test_iadd(self):
super().test_iadd()
u = [0, 1]
u += UserList([0, 1])
self.assertEqual(u, [0, 1, 0, 1])
def test_mixedcmp(self):
u = self.type2test([0, 1])
self.assertEqual(u, [0, 1])
self.assertNotEqual(u, [0])
self.assertNotEqual(u, [0, 2])
def test_mixedadd(self):
u = self.type2test([0, 1])
self.assertEqual(u + [], u)
self.assertEqual(u + [2], [0, 1, 2])
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides *are* recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(next(iter(T((1,2)))), "0!!!")
def test_userlist_copy(self):
u = self.type2test([6, 8, 1, 9, 1])
v = u.copy()
self.assertEqual(u, v)
self.assertEqual(type(u), type(v))
def test_main():
support.run_unittest(UserListTest)
if __name__ == "__main__":
test_main()
|
chrishas35/django-travis-ci
|
refs/heads/master
|
tests/regressiontests/i18n/commands/compilation.py
|
4
|
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.management import CommandError
from django.core.management.commands.compilemessages import compile_messages
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import translation
test_dir = os.path.abspath(os.path.dirname(__file__))
class MessageCompilationTests(TestCase):
def setUp(self):
self._cwd = os.getcwd()
def tearDown(self):
os.chdir(self._cwd)
class PoFileTests(MessageCompilationTests):
LOCALE='es_AR'
MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
os.chdir(test_dir)
# We don't use the django.core.management infrastructure (call_command()
# et al) because CommandError's cause exit(1) there. We test the
# underlying compile_messages function instead
out = StringIO()
self.assertRaises(CommandError, compile_messages, out, locale=self.LOCALE)
self.assertFalse(os.path.exists(self.MO_FILE))
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE='fr'
MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE
def setUp(self):
super(PoFileContentsTests, self).setUp()
self.addCleanup(os.unlink, os.path.join(test_dir, self.MO_FILE))
def test_percent_symbol_in_po_file(self):
os.chdir(test_dir)
# We don't use the django.core.management infrastructure (call_command()
# et al) because CommandError's cause exit(1) there. We test the
# underlying compile_messages function instead
out = StringIO()
compile_messages(out, locale=self.LOCALE)
self.assertTrue(os.path.exists(self.MO_FILE))
class PercentRenderingTests(MessageCompilationTests):
# Ticket #11240 -- Testing rendering doesn't belong here but we are trying
# to keep tests for all the stack together
LOCALE='it'
MO_FILE='locale/%s/LC_MESSAGES/django.mo' % LOCALE
@override_settings(LOCALE_PATHS=(os.path.join(test_dir, 'locale'),))
def test_percent_symbol_escaping(self):
from django.template import Template, Context
os.chdir(test_dir)
# We don't use the django.core.management infrastructure (call_command()
# et al) because CommandError's cause exit(1) there. We test the
# underlying compile_messages function instead
out = StringIO()
compile_messages(out, locale=self.LOCALE)
with translation.override(self.LOCALE):
t = Template('{% load i18n %}{% trans "Looks like a str fmt spec %% o but shouldn\'t be interpreted as such" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation contains %% for the above string')
t = Template('{% load i18n %}{% trans "Completed 50%% of all the tasks" %}')
rendered = t.render(Context({}))
self.assertEqual(rendered, 'IT translation of Completed 50%% of all the tasks')
|
karimbahgat/Pipy
|
refs/heads/master
|
pipy/pip/__main__.py
|
834
|
from __future__ import absolute_import
import os
import sys
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
import pip # noqa
if __name__ == '__main__':
sys.exit(pip.main())
|
archyufa/CloudFerry
|
refs/heads/master
|
cloudferrylib/base/action/get_info_iter.py
|
2
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.base.action import action
from cloudferrylib.utils import utils as utl
class GetInfoIter(action.Action):
def __init__(self, init, iter_info_name='info_iter', info_name='info',
resource_name=utl.INSTANCES_TYPE):
self.iter_info_name = iter_info_name
self.info_name = info_name
self.resource_name = resource_name
super(GetInfoIter, self).__init__(init)
def run(self, **kwargs):
info = kwargs[self.iter_info_name]
objs = info[self.resource_name]
obj_id = objs.keys()[0]
obj = objs.pop(obj_id)
new_info = {
self.resource_name: {obj_id: obj}
}
return {
self.iter_info_name: info,
self.info_name: new_info
}
|
Elucidation/schmeckle_bot
|
refs/heads/master
|
sb_helpers.py
|
1
|
# -*- coding: utf-8 -*-
import re
import time
import locale
from datetime import datetime
from math import isinf
# For converting from string numbers with english-based commas to floats
#locale.setlocale(locale.LC_ALL, 'eng_USA') # Windows
locale.setlocale(locale.LC_ALL, 'en_GB.utf8') # Linux (Raspberry Pi 2)
def getResponseFooter():
return "\n\n---\n\n[^(1 Schmeckle = $148 USD)](https://www.reddit.com/r/IAmA/comments/202owt/we_are_dan_harmon_and_justin_roiland_creators_of/cfzfv79)^( | price not guaranteed |) [^(`what is my purpose`)](https://github.com/Elucidation/schmeckle_bot 'convert Schmeckles to USD')"
def schmeckle2usd(schmeckle):
"""1 Schmeckle = $148 USD
https://www.reddit.com/r/IAmA/comments/202owt/we_are_dan_harmon_and_justin_roiland_creators_of/cfzfv79"""
return schmeckle * 148.0
def getValue(value_str):
# Strings with more than 9000 characters are considered too big to handle so
# we don't run into char limits when generating a reply
if (len(value_str)) > 9000:
value = locale.atof('inf')
else:
value = locale.atof(value_str)
return value
def getCommentDepth(comment):
depth = 0
while not comment.is_root:
comment = comment.parent()
depth += 1
return depth
def generateResponseMessage(search_result):
match = search_result.groups()[1] # Middle group
value_str = match.split()[0]
if len(match) > 1000 or len(value_str) > 300:
# message or value was too big, generate a different message
msg = u"# **Nope.**\n"
else:
value = getValue(value_str) # pass found value string
usd = schmeckle2usd(value)
quote = u"> ... {}{}{} ...".format(search_result.groups()[0], match, search_result.groups()[2])
if value > 1e15:
msg = u"{}\n\n* {:,g} Schmeckles → **${:,g} USD**\n".format(
quote, value, usd)
elif value.is_integer():
msg = u"{}\n\n* {:,d} Schmeckles → **${:,d} USD**\n".format(
quote, int(value), int(usd))
else:
msg = u"{}\n\n* {:,.8g} Schmeckles → **${:,.2f} USD**\n".format(
quote, value, usd)
return u"{}{}".format(msg, getResponseFooter())
# Look for '<number> schmeckle' ignore case (schmeckles accepted implicitly)
# Also handles common mispellings of schmeckle
# Works for positive negative floats, but fails softly on EXP
# Also catches neighboring region around it
# p = re.compile('(-?[\d|,]*\.{0,1}\d+ sc?hmeck?(?:le|el)[\w]{0,80})', re.IGNORECASE)
# Ignore numbers > 300 chars on either side of decimal
# Also require a question-mark in statement
p = re.compile('([^\n\.\,\r\d-]{0,30})(-?[\d|,]{0,300}\.{0,1}\d{1,300} schmeckle[\w]{0,80})([^\n\.\,\r\d-]{0,30})', re.IGNORECASE)
def searchForSchmeckles(body_text):
if any([x in body_text.lower() for x in ['?', 'how much', 'what is']]):
return p.search(body_text)
return None
# Check if comment has a comment by this bot already, or is a comment by bot
def previouslyRepliedTo(comment, me):
# Check if comment author is self, skip if so
if comment.author == me:
return True
# Check if author of parent of comment is self
if comment.parent().author == me:
# Check if comment contains github self-link, skip if so as it's probably
# a quote
if 'github.com/Elucidation/schmeckle_bot' in comment.body:
return True
comment.refresh() # So we can see replies
for reply in comment.replies.list():
if reply.author == me:
return True
return False
def waitWithComments(sleep_time, segment=60):
"""Sleep for sleep_time seconds, printing to stdout every segment of time"""
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
while sleep_time > segment:
time.sleep(segment) # sleep in increments of 1 minute
sleep_time -= segment
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
time.sleep(sleep_time)
def logMessage(comment, status=""):
print("{} | {} {}: {}".format(datetime.now(), comment.id, status, comment.body[:80].replace('\n','\\n').encode('utf-8')))
|
jymannob/CouchPotatoServer
|
refs/heads/develop
|
libs/suds/umx/basic.py
|
211
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides basic unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
class Basic(Core):
"""
A object builder (unmarshaller).
"""
def process(self, node):
"""
Process an object graph representation of the xml I{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
return Core.process(self, content)
|
zisko/swift
|
refs/heads/master
|
utils/pass-pipeline/src/passes.py
|
22
|
from pass_pipeline import Pass
# TODO: This should not be hard coded. Create a tool in the compiler that knows
# how to dump the passes and the pipelines themselves.
AADumper = Pass('AADumper')
ABCOpt = Pass('ABCOpt')
AddressLowering = Pass('AddressLowering')
AllocBoxToStack = Pass('AllocBoxToStack')
CFGPrinter = Pass('CFGPrinter')
COWArrayOpts = Pass('COWArrayOpts')
CSE = Pass('CSE')
CapturePromotion = Pass('CapturePromotion')
CapturePropagation = Pass('CapturePropagation')
ClosureSpecializer = Pass('ClosureSpecializer')
CodeMotion = Pass('CodeMotion')
CopyForwarding = Pass('CopyForwarding')
DCE = Pass('DCE')
DeadFunctionElimination = Pass('DeadFunctionElimination')
DeadObjectElimination = Pass('DeadObjectElimination')
DefiniteInitialization = Pass('DefiniteInitialization')
DiagnoseUnreachable = Pass('DiagnoseUnreachable')
DiagnosticConstantPropagation = Pass('DiagnosticConstantPropagation')
EarlyInliner = Pass('EarlyInliner')
EmitDFDiagnostics = Pass('EmitDFDiagnostics')
FunctionSignatureOpts = Pass('FunctionSignatureOpts')
GlobalARCOpts = Pass('GlobalARCOpts')
GlobalLoadStoreOpts = Pass('GlobalLoadStoreOpts')
GlobalOpt = Pass('GlobalOpt')
IVInfoPrinter = Pass('IVInfoPrinter')
InstCount = Pass('InstCount')
LICM = Pass('LICM')
LateInliner = Pass('LateInliner')
LoopInfoPrinter = Pass('LoopInfoPrinter')
LoopRotate = Pass('LoopRotate')
LowerAggregateInstrs = Pass('LowerAggregateInstrs')
MandatoryInlining = Pass('MandatoryInlining')
Mem2Reg = Pass('Mem2Reg')
NoReturnFolding = Pass('NoReturnFolding')
PerfInliner = Pass('PerfInliner')
PerformanceConstantPropagation = Pass('PerformanceConstantPropagation')
PredictableMemoryOptimizations = Pass('PredictableMemoryOptimizations')
SILCleanup = Pass('SILCleanup')
SILCombine = Pass('SILCombine')
SILLinker = Pass('SILLinker')
SROA = Pass('SROA')
SimplifyCFG = Pass('SimplifyCFG')
SpeculativeDevirtualizer = Pass('SpeculativeDevirtualizer')
SplitAllCriticalEdges = Pass('SplitAllCriticalEdges')
SplitNonCondBrCriticalEdges = Pass('SplitNonCondBrCriticalEdges')
StripDebugInfo = Pass('StripDebugInfo')
SwiftArrayOpts = Pass('SwiftArrayOpts')
PASSES = [
AADumper,
ABCOpt,
AddressLowering,
AllocBoxToStack,
CFGPrinter,
COWArrayOpts,
CSE,
CapturePromotion,
CapturePropagation,
ClosureSpecializer,
CodeMotion,
CopyForwarding,
DCE,
DeadFunctionElimination,
DeadObjectElimination,
DefiniteInitialization,
DiagnoseUnreachable,
DiagnosticConstantPropagation,
EarlyInliner,
EmitDFDiagnostics,
FunctionSignatureOpts,
GlobalARCOpts,
GlobalLoadStoreOpts,
GlobalOpt,
IVInfoPrinter,
InstCount,
LICM,
LateInliner,
LoopInfoPrinter,
LoopRotate,
LowerAggregateInstrs,
MandatoryInlining,
Mem2Reg,
NoReturnFolding,
PerfInliner,
PerformanceConstantPropagation,
PredictableMemoryOptimizations,
SILCleanup,
SILCombine,
SILLinker,
SROA,
SimplifyCFG,
SpeculativeDevirtualizer,
SplitAllCriticalEdges,
SplitNonCondBrCriticalEdges,
StripDebugInfo,
SwiftArrayOpts,
]
|
mmnelemane/neutron
|
refs/heads/master
|
neutron/tests/unit/db/test_securitygroups_db.py
|
30
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron import context
from neutron.db import common_db_mixin
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup
from neutron.tests.unit import testlib_api
class SecurityGroupDbMixinImpl(securitygroups_db.SecurityGroupDbMixin,
common_db_mixin.CommonDbMixin):
pass
class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase):
def setUp(self):
super(SecurityGroupDbMixinTestCase, self).setUp()
self.ctx = context.get_admin_context()
self.mixin = SecurityGroupDbMixinImpl()
def test_create_security_group_conflict(self):
with mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
secgroup = {'security_group': mock.ANY}
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.create_security_group(self.ctx, secgroup)
def test_delete_security_group_in_use(self):
with mock.patch.object(self.mixin,
'_get_port_security_group_bindings'),\
mock.patch.object(self.mixin, '_get_security_group'),\
mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(
securitygroup.SecurityGroupInUse):
self.mixin.delete_security_group(self.ctx, mock.ANY)
def test_update_security_group_conflict(self):
with mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
secgroup = {'security_group': mock.ANY}
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.update_security_group(self.ctx, 'foo_id', secgroup)
def test_create_security_group_rule_conflict(self):
with mock.patch.object(self.mixin, '_validate_security_group_rule'),\
mock.patch.object(self.mixin,
'_check_for_duplicate_rules_in_db'),\
mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(
securitygroup.SecurityGroupConflict):
self.mixin.create_security_group_rule(
self.ctx, mock.MagicMock())
def test_delete_security_group_rule_in_use(self):
with mock.patch.object(registry, "notify") as mock_notify:
mock_notify.side_effect = exceptions.CallbackFailure(Exception())
with testtools.ExpectedException(
securitygroup.SecurityGroupRuleInUse):
self.mixin.delete_security_group_rule(self.ctx, mock.ANY)
def test_delete_security_group_rule_raise_error_on_not_found(self):
with testtools.ExpectedException(
securitygroup.SecurityGroupRuleNotFound):
self.mixin.delete_security_group_rule(self.ctx, 'foo_rule')
|
jackchi/interview-prep
|
refs/heads/master
|
trees/tree_trace.py
|
1
|
# Your previous Plain Text content is preserved below:
#
# ValueError ValueError ValueError
# | | | |
# foo foo -> foo zoo
# | | / \ |
# bar baz bar baz far
#
#
# ValueError
# |
# zoo
# |
# var
#
# Given the list of exception traces. Merge them into a tree given a prefix merge criterion
# illustrated above.
#
# Input -> k lists of exception traces
# OUtput -> Tree
#
# // ["ValueError", "foo", "bar"]
# // if tree's values doesn't contain exception or if node is null:
# // create node[String]
# // recurse pass children
# Tree merge (List<List<String>> ets)
#
#
# TODO:
# Change list_of_nodes into dictionary
Class ExceptionNode(object):
def __init__(self, trace_name, list_of_nodes = {}):
"""
:type:
:type:
"""
self.trace_name = trace_name
self.list_of_nodes = list_of_nodes
def addTrace(self, node):
self.list_of_nodes.append(node)
def getTraceName(self):
pass
def getNodes(self):
return self.list_of_nodes
def findNode(self, test_trace):
for t in list_of_nodes:
if t.getTraceName() == test_trace:
return t
return None
trace1 = []
trace2 = []
def merge(list_of_exceptions):
# ValueError as first
stack_tree = ExceptionNode(list_of_exceptions[0][0])
root = stack_tree
# Depth-first approach
for trace in list_of_exceptions:
for exp in trace[1:]:
if exp is not in stack_tree.getNodes():
new_exp_tree = ExceptionNode(exp)
stack_tree.addTrace(new_exp_tree)
stack_tree = new_exp_tree
else:
stack_tree = stack_tree.findNode(exp)
stack_tree = root
return root
|
fjfnaranjo/fjfnaranjo-bot
|
refs/heads/dev
|
tests/test_auth.py
|
1
|
from contextlib import contextmanager
from unittest import TestCase
from unittest.mock import patch, sentinel
from telegram.ext.dispatcher import DispatcherHandlerStop
from fjfnaranjobot.auth import friends, logger, only_friends, only_owner, only_real
from fjfnaranjobot.common import SORRY_TEXT, User
from .base import (
BOT_USER,
FIRST_FRIEND_USER,
OWNER_USER,
SECOND_FRIEND_USER,
UNKNOWN_USER,
BotHandlerTestCase,
BotTestCase,
)
MODULE_PATH = "fjfnaranjobot.auth"
class AuthTests(BotHandlerTestCase):
def test_only_real_no_user_no_message(self):
noop = only_real(lambda _update, _context: True)
self.user_is_none(remove_message=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
"Message received with no user "
"trying to access a only_real command. "
"Command text: '<unknown>' (cropped to 10 chars)."
) in logs.output[0]
def test_only_real_no_user_empty_command(self):
noop = only_real(lambda _update, _context: True)
self.user_is_none(remove_text=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
"Message received with no user "
"trying to access a only_real command. "
"Command text: '<empty>' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_real_no_user(self):
noop = only_real(lambda _update, _context: True)
self.user_is_none()
self.set_string_command("cmd")
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
"Message received with no user "
"trying to access a only_real command. "
"Command text: 'cmd' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_real_bot_no_message(self):
noop = only_real(lambda _update, _context: True)
self.user_is_bot(remove_message=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"Bot with username {BOT_USER.username} and id {BOT_USER.id} "
"tried to access a only_real command. "
"Command text: '<unknown>' (cropped to 10 chars)."
) in logs.output[0]
def test_only_real_bot_empty_command(self):
noop = only_real(lambda _update, _context: True)
self.user_is_bot(remove_text=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"Bot with username {BOT_USER.username} and id {BOT_USER.id} "
"tried to access a only_real command. "
"Command text: '<empty>' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_real_bot(self):
noop = only_real(lambda _update, _context: True)
self.user_is_bot()
self.set_string_command("cmd")
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"Bot with username {BOT_USER.username} and id {BOT_USER.id} "
"tried to access a only_real command. "
"Command text: 'cmd' (cropped to 10 chars)."
) in logs.output[0]
def test_only_real_user_ok(self):
noop = only_real(lambda _update, _context: True)
assert noop(*self.update_and_context) is True
self.assert_message_calls([])
def test_only_owner_no_message(self):
noop = only_owner(lambda _update, _context: True)
self.user_is_unknown(remove_message=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_owner command. "
"Command text: '<unknown>' (cropped to 10 chars)."
) in logs.output[0]
def test_only_owner_empty_command(self):
noop = only_owner(lambda _update, _context: True)
self.user_is_unknown(remove_text=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_owner command. "
"Command text: '<empty>' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_owner_no_owner(self):
noop = only_owner(lambda _update, _context: True)
self.set_string_command("cmd")
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
assert noop(*self.update_and_context) is None
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_owner command. "
"Command text: 'cmd' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_owner_ok(self):
noop = only_owner(lambda _update, _context: True)
self.user_is_owner()
assert noop(*self.update_and_context) is True
self.assert_message_calls([])
def test_only_friends_no_message(self):
noop = only_friends(lambda _update, _context: True)
self.user_is_unknown(remove_message=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_friends command. "
"Command text: '<unknown>' (cropped to 10 chars)."
) in logs.output[0]
def test_only_friends_empty_command(self):
noop = only_friends(lambda _update, _context: True)
self.user_is_unknown(remove_text=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_friends command. "
"Command text: '<empty>' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_friends_not_friend(self):
noop = only_friends(lambda _update, _context: True)
self.set_string_command("cmd")
with self.set_friends([FIRST_FRIEND_USER]):
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
noop(*self.update_and_context)
assert (
f"User u with id {UNKNOWN_USER.id} "
"tried to access a only_friends command. "
"Command text: 'cmd' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_friends_ok(self):
noop = only_friends(lambda _update, _context: True)
self.user_is_friend(FIRST_FRIEND_USER)
with self.set_friends([FIRST_FRIEND_USER]):
assert noop(*self.update_and_context) is True
self.assert_message_calls([])
def test_only_owner_not_defined_no_message(self):
with self.mocked_environ("fjfnaranjobot.auth.environ", None, ["BOT_OWNER_ID"]):
noop = only_owner(lambda _update, _context: True)
self.user_is_owner(remove_message=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
assert noop(*self.update_and_context) is None
assert (
f"User o with id {OWNER_USER.id} "
"tried to access a only_owner command. "
"Command text: '<unknown>' (cropped to 10 chars)."
) in logs.output[0]
def test_only_owner_not_defined_empty_command(self):
with self.mocked_environ("fjfnaranjobot.auth.environ", None, ["BOT_OWNER_ID"]):
noop = only_owner(lambda _update, _context: True)
self.user_is_owner(remove_text=True)
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
assert noop(*self.update_and_context) is None
assert (
f"User o with id {OWNER_USER.id} "
"tried to access a only_owner command. "
"Command text: '<empty>' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
def test_only_owner_not_defined(self):
with self.mocked_environ("fjfnaranjobot.auth.environ", None, ["BOT_OWNER_ID"]):
noop = only_owner(lambda _update, _context: True)
self.user_is_owner()
self.set_string_command("cmd")
with self.assertLogs(logger) as logs:
with self.assertRaises(DispatcherHandlerStop):
assert noop(*self.update_and_context) is None
assert (
f"User o with id {OWNER_USER.id} "
"tried to access a only_owner command. "
"Command text: 'cmd' (cropped to 10 chars)."
) in logs.output[0]
self.assert_message_chat_text(sentinel.chat_id_from_update, SORRY_TEXT)
class FriendsTests(TestCase):
def setUp(self):
super().setUp()
friends.clear()
@contextmanager
def friends(self, friends_list):
friends.clear()
for friend in friends_list:
friends.add(friend)
yield
friends.clear()
def test_le_not_implemented(self):
with self.assertRaises(NotImplementedError):
bool(friends <= 0)
def test_no_friends(self):
assert 0 == len(friends)
def test_one_friend(self):
with self.friends([FIRST_FRIEND_USER]):
assert 1 == len(friends)
assert FIRST_FRIEND_USER in friends
def test_auth_get_friends_many_friends(self):
with self.friends([FIRST_FRIEND_USER, SECOND_FRIEND_USER]):
assert 2 == len(friends)
assert FIRST_FRIEND_USER in friends
assert SECOND_FRIEND_USER in friends
def test_auth_get_friends_many_friends_sorted(self):
with self.friends([SECOND_FRIEND_USER, FIRST_FRIEND_USER]):
first_friend, second_friend = friends.sorted()
assert FIRST_FRIEND_USER.id == first_friend.id
assert SECOND_FRIEND_USER.id == second_friend.id
def test_auth_add_friend(self):
friends.add(FIRST_FRIEND_USER)
assert 1 == len(friends)
assert FIRST_FRIEND_USER in friends
def test_auth_add_friend_already_friend(self):
with self.friends([FIRST_FRIEND_USER]):
friends.add(User(FIRST_FRIEND_USER.id, "x"))
assert 1 == len(friends)
assert FIRST_FRIEND_USER in friends
for friend in friends:
assert "x" == friend.username
def test_auth_add_friend_is_owner(self):
friends.add(OWNER_USER)
assert 1 == len(friends)
assert OWNER_USER in friends
def test_auth_del_friend_not_friends(self):
friends.discard(FIRST_FRIEND_USER)
assert 0 == len(friends)
def test_auth_del_friend_not_a_friend(self):
with self.friends([FIRST_FRIEND_USER]):
friends.discard(SECOND_FRIEND_USER)
assert 1 == len(friends)
assert FIRST_FRIEND_USER in friends
def test_auth_del_friend_one_friend(self):
with self.friends([FIRST_FRIEND_USER, SECOND_FRIEND_USER]):
friends.discard(FIRST_FRIEND_USER)
assert 1 == len(friends)
assert SECOND_FRIEND_USER in friends
def test_auth_del_friend_last_friend(self):
with self.friends([FIRST_FRIEND_USER]):
friends.discard(FIRST_FRIEND_USER)
assert 0 == len(friends)
|
fritsvanveen/QGIS
|
refs/heads/master
|
python/plugins/db_manager/db_plugins/__init__.py
|
3
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
class NotSupportedDbType(Exception):
def __init__(self, dbtype):
self.msg = self.tr("%s is not supported yet") % dbtype
Exception(self, self.msg)
def __str__(self):
return self.msg.encode('utf-8')
def initDbPluginList():
import os
current_dir = os.path.dirname(__file__)
for name in os.listdir(current_dir):
if name == '__pycache__':
continue
if not os.path.isdir(os.path.join(current_dir, name)):
continue
try:
exec(u"from .%s import plugin as mod" % name, globals())
except ImportError as e:
DBPLUGIN_ERRORS.append(u"%s: %s" % (name, str(e)))
continue
pluginclass = mod.classFactory() # NOQA
SUPPORTED_DBTYPES[pluginclass.typeName()] = pluginclass
return len(SUPPORTED_DBTYPES) > 0
def supportedDbTypes():
return sorted(SUPPORTED_DBTYPES.keys())
def getDbPluginErrors():
return DBPLUGIN_ERRORS
def createDbPlugin(dbtype, conn_name=None):
if dbtype not in SUPPORTED_DBTYPES:
raise NotSupportedDbType(dbtype)
dbplugin = SUPPORTED_DBTYPES[dbtype]
return dbplugin if conn_name is None else dbplugin(conn_name)
# initialize the plugin list
SUPPORTED_DBTYPES = {}
DBPLUGIN_ERRORS = []
initDbPluginList()
|
jn7163/django
|
refs/heads/master
|
tests/expressions/models.py
|
261
|
"""
Tests for F() query expression syntax.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Employee(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
salary = models.IntegerField(blank=True, null=True)
def __str__(self):
return '%s %s' % (self.firstname, self.lastname)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=100)
num_employees = models.PositiveIntegerField()
num_chairs = models.PositiveIntegerField()
ceo = models.ForeignKey(
Employee,
models.CASCADE,
related_name='company_ceo_set')
point_of_contact = models.ForeignKey(
Employee,
models.SET_NULL,
related_name='company_point_of_contact_set',
null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Number(models.Model):
integer = models.BigIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
estimated_time = models.DurationField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
@python_2_unicode_compatible
class Time(models.Model):
time = models.TimeField(null=True)
def __str__(self):
return "%s" % self.time
@python_2_unicode_compatible
class UUID(models.Model):
uuid = models.UUIDField(null=True)
def __str__(self):
return "%s" % self.uuid
|
rupran/ansible
|
refs/heads/devel
|
lib/ansible/utils/module_docs_fragments/asa.py
|
123
|
#
# (c) 2016, Peter Sprygada <psprygada@ansible.com>
# (c) 2016, Patrick Ogenstad <@ogenstad>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
context:
description:
- Specifies which context to target if you are running in the ASA in
multiple context mode. Defaults to the current context you login to.
default: null
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the value
is not specified in the task, the value of environment variable
C(ANSIBLE_NET_AUTHORIZE) will be used instead.
default: no
choices: ['yes', 'no']
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead.
default: none
timeout:
description:
- Specifies idle timeout in seconds for the connection, in seconds. Useful
if the console freezes before continuing. For example when saving
configurations.
default: 10
"""
|
eayunstack/ceilometer
|
refs/heads/master
|
ceilometer/tests/unit/compute/pollsters/test_location_metadata.py
|
6
|
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the compute pollsters.
"""
import mock
from oslotest import base
import six
from ceilometer.agent import manager
from ceilometer.compute.pollsters import util
class FauxInstance(object):
def __init__(self, **kwds):
for name, value in kwds.items():
setattr(self, name, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default):
try:
return getattr(self, key)
except AttributeError:
return default
class TestLocationMetadata(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
self.manager = manager.AgentManager()
super(TestLocationMetadata, self).setUp()
# Mimics an instance returned from nova api call
self.INSTANCE_PROPERTIES = {'name': 'display name',
'id': ('234cbe81-4e09-4f64-9b2a-'
'714f6b9046e3'),
'OS-EXT-SRV-ATTR:instance_name':
'instance-000001',
'OS-EXT-AZ:availability_zone':
'foo-zone',
'reservation_id': 'reservation id',
'architecture': 'x86_64',
'kernel_id': 'kernel id',
'os_type': 'linux',
'ramdisk_id': 'ramdisk id',
'status': 'active',
'ephemeral_gb': 0,
'root_gb': 20,
'disk_gb': 20,
'image': {'id': 1,
'links': [{"rel": "bookmark",
'href': 2}]},
'hostId': '1234-5678',
'flavor': {'name': 'm1.tiny',
'id': 1,
'disk': 20,
'ram': 512,
'vcpus': 2,
'ephemeral': 0},
'metadata': {'metering.autoscale.group':
'X' * 512,
'metering.ephemeral_gb': 42}}
self.instance = FauxInstance(**self.INSTANCE_PROPERTIES)
def test_metadata(self):
md = util._get_metadata_from_object(self.instance)
for prop, value in six.iteritems(self.INSTANCE_PROPERTIES):
if prop not in ("metadata"):
# Special cases
if prop == 'name':
prop = 'display_name'
elif prop == 'hostId':
prop = "host"
elif prop == 'OS-EXT-SRV-ATTR:instance_name':
prop = 'name'
elif prop == "id":
prop = "instance_id"
self.assertEqual(value, md[prop])
user_metadata = md['user_metadata']
expected = self.INSTANCE_PROPERTIES[
'metadata']['metering.autoscale.group'][:256]
self.assertEqual(expected, user_metadata['autoscale_group'])
self.assertEqual(1, len(user_metadata))
def test_metadata_empty_image(self):
self.INSTANCE_PROPERTIES['image'] = None
self.instance = FauxInstance(**self.INSTANCE_PROPERTIES)
md = util._get_metadata_from_object(self.instance)
self.assertIsNone(md['image'])
self.assertIsNone(md['image_ref'])
self.assertIsNone(md['image_ref_url'])
def test_metadata_image_through_conductor(self):
# There should be no links here, should default to None
self.INSTANCE_PROPERTIES['image'] = {'id': 1}
self.instance = FauxInstance(**self.INSTANCE_PROPERTIES)
md = util._get_metadata_from_object(self.instance)
self.assertEqual(1, md['image_ref'])
self.assertIsNone(md['image_ref_url'])
|
Adnn/django
|
refs/heads/master
|
django/core/mail/backends/dummy.py
|
835
|
"""
Dummy email backend that does nothing.
"""
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
|
sonaht/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels_facts.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_affinity_labels_facts
short_description: Retrieve facts about one or more oVirt/RHV affinity labels
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV affinity labels."
notes:
- "This module creates a new top-level C(ovirt_affinity_labels) fact, which
contains a list of affinity labels."
options:
name:
description:
- "Name of the affinity labels which should be listed."
vm:
description:
- "Name of the VM, which affinity labels should be listed."
host:
description:
- "Name of the host, which affinity labels should be listed."
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all affinity labels, which names start with C(label):
- ovirt_affinity_labels_facts:
name: label*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to VMs
# which names start with C(postgres):
- ovirt_affinity_labels_facts:
vm: postgres*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to hosts
# which names start with C(west):
- ovirt_affinity_labels_facts:
host: west*
- debug:
var: affinity_labels
# Gather facts about all affinity labels, which are assigned to hosts
# which names start with C(west) or VMs which names start with C(postgres):
- ovirt_affinity_labels_facts:
host: west*
vm: postgres*
- debug:
var: affinity_labels
'''
RETURN = '''
ovirt_affinity_labels:
description: "List of dictionaries describing the affinity labels. Affinity labels attribues are mapped to dictionary keys,
all affinity labels attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/affinity_label."
returned: On success.
type: list
'''
import fnmatch
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
name=dict(default=None),
host=dict(default=None),
vm=dict(default=None),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
affinity_labels_service = connection.system_service().affinity_labels_service()
labels = []
all_labels = affinity_labels_service.list()
if module.params['name']:
labels.extend([
l for l in all_labels
if fnmatch.fnmatch(l.name, module.params['name'])
])
if module.params['host']:
hosts_service = connection.system_service().hosts_service()
labels.extend([
label
for label in all_labels
for host in connection.follow_link(label.hosts)
if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
])
if module.params['vm']:
vms_service = connection.system_service().vms_service()
labels.extend([
label
for label in all_labels
for vm in connection.follow_link(label.vms)
if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
])
if not (module.params['vm'] or module.params['host'] or module.params['name']):
labels = all_labels
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_affinity_labels=[
get_dict_of_struct(
struct=l,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for l in labels
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
alexei-matveev/ccp1gui
|
refs/heads/master
|
interfaces/mndo.py
|
1
|
#
# This file is part of the CCP1 Graphical User Interface (ccp1gui)
#
# (C) 2002-2007 CCLRC Daresbury Laboratory
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import os,sys
if __name__ == "__main__":
# Need to add the gui directory to the python path so
# that all the modules can be imported
gui_path = os.path.split(os.path.dirname( os.path.realpath( __file__ ) ))[0]
sys.path.append(gui_path)
else:
from viewer.paths import gui_path
# Import python modules
import unittest
# Import our modules
import qm
import tools
import viewer
import objects
import jobmanager
from objects.periodic import z_to_el
# Import external modules
import Tkinter
import Pmw
homolumoa = 0
MENU_ENER = "Energy"
MENU_GRAD = "Gradient"
MENU_OPT = "Geometry Optimisation"
class MNDOCalc(qm.QMCalc):
"""MNDO specific calculation class"""
def __init__(self,**kw):
qm.QMCalc.__init__(self,**kw)
self.set_name('unnamed')
self.set_parameter("task",MENU_ENER)
self.set_parameter("theory","AM1")
self.set_parameter("symmetry",1)
self.set_parameter("scf_method","rhf")
self.set_parameter("hamiltonian","am1")
self.set_parameter("basis","STO")
self.set_parameter("scf_maxcyc","200")
self.set_parameter("restart","0")
self.set_parameter("accuracy","medium")
self.set_output("ana_frequencies",0)
# need to replace with MNDO's accuracy parameter
self.set_parameter("scf_threshold",6)
# this stuff almost certainly shouldnt be here
# but it enables WriteInput to run
directory = self.get_parameter("directory")
job_name = self.get_name()
self.infile = directory+os.sep+job_name+'.in'
self.outfile = directory+os.sep+job_name+'.out'
def get_editor_class(self):
return MNDOCalcEd
def WriteInput(self,filename=None):
mol_name = self.get_input("mol_name")
mol_obj = self.get_input("mol_obj")
job_name = self.get_name()
directory = self.get_parameter("directory")
if filename:
self.infile=filename
else:
filename = self.infile
writeinput_err = self.__WriteInput(mol_obj,filename)
if writeinput_err:
return
# load contents of input for viewing
file = open(self.infile,"r")
input = file.readlines()
self.set_input("input_file",input)
file.close()
def __WriteInput(self,mol,filename):
"""MNDO input writer, based on parts of the ChemShell function written by
the group of Walter Thiel
"""
file = open(filename,'w')
task = self.get_parameter("task")
scf_method = self.get_parameter("scf_method")
link_atom_indices=[]
link_atom_option=None
# Set to 1 when old vectors are available on fort.11
self.chk = 0
# SCF type / multiplicity
# scftype mult imult iuhf
#-------------------------------
# rhf 1 undef undef
# rhf N N -1
# rohf 1 1 -1
# rohf N N -1
# uhf 1 1 1
# uhf N N 1
scftype = self.get_parameter("scf_method")
mult = self.get_parameter("spin")
if scftype == "rhf":
if mult == 1:
imult=None
iuhf=None
else:
imult=mult
iuhf=-1
elif scftype == "rohf":
if mult == 1:
imult=1
iuhf=-1
else:
imult=mult
iuhf=-1
elif scftype == "uhf":
if mult == 1:
imult=1
iuhf=1
else:
imult=mult
iuhf=1
# mndo string settings based on the keyword setting
charge = self.get_parameter("charge")
if charge != 0:
khargestr = "kharge"+str(charge)
else:
khargestr = ""
hamiltonian = self.get_parameter("hamiltonian")
# hamiltonian (iop=)
ham_to_iop = {
"mndo/d" : -10,
"pm3" : -7,
"am1" : -2,
"mndo" : 0,
"mindo/3" : 1,
"cndo/2" : 2,
"om1" : -5,
"om2" : -6 }
iop = None
if iop == None:
try:
iopstr = "iop="+str(ham_to_iop[hamiltonian])
except KeyError:
print 'unrecognised hamiltonian'
return None
else:
iopstr="iop="+str(iop)
if imult == None:
imultstr=""
else:
imultstr="imult="+str(imult)
if iuhf == None:
iuhfstr=""
else:
iuhfstr="iuhf="+str(iuhf)
nprint = None
if nprint == None:
nprintstr=""
else:
nprintstr="nprint="+str(nprint)
iscf = None
if iscf == None:
iscfstr=""
else:
iscfstr="iscf="+str(iscf)
idiis = None
if idiis == None:
idiisstr=""
else:
idiisstr="idiis="+str(idiis)
ipsana = None
if ipsana == None:
ipsanastr =""
else:
ipsanastr ="ipsana="+str(ipsana)
mprint = None
if mprint == None:
mprintstr =""
else:
mprintstr ="mprint="+str(mprint)
nstart = None
if nstart == None:
nstartstr =""
else:
nstartstr ="nstart="+str(nstart)
# Build up the input lines for the mndo input file
optstr1 = khargestr + " " + iopstr + " " + idiisstr + " " + ipsanastr + " " + \
nstartstr + " " + imultstr + " " + iuhfstr
optstr2 = nprintstr + " " + mprintstr + " " + iscfstr
optstr3="igeom=1 iform=1 nsav15=4 ipubo=1"
if self.chk:
trialstr="ktrial=11"
else:
trialstr=""
if task == MENU_ENER:
optstr3 = optstr3 + " jop=-1"
enerflag = 1
gradflag = 0
elif task == MENU_GRAD:
optstr3 = optstr3 + " jop=-2"
enerflag = 1
gradflag = 1
elif task == MENU_OPT:
optstr3 = optstr3 + " jop=0"
enerflag = 1
gradflag = 1
#
# ================= Generate MNDO97 input file =================
#
# Keyword cards
file.write(optstr1+" + \n")
file.write(optstr2+" + \n")
optstr = ""
if len(optstr) or len(trialstr) != 0:
file.write(optstr+" "+trialstr)
###set nbq [ get_number_of_bqs coords=$coords ]
nbq=0
file.write(optstr3 + "\n")
file.write("MNDO file from the CCP1 GUI\n\n")
#
# Output coordinate information
# (see helper functions in ../interface_gamess/interface.c)
#
if task == MENU_OPT:
tt = ' 1 '
else:
tt = ' 0 '
for a in mol.atom:
file.write(
str(a.get_number()) + ' ' +
str(a.coord[0]) + tt +
str(a.coord[1]) + tt +
str(a.coord[2]) + tt + '\n')
# Termination of coordinates
file.write("0 0.0 0 0.0 0 0.0 0\n")
# Output of point charges
#if { $binary == 1 } {
#format_mndo_bq_list_long file $jobname.in
#} else {
#format_mndo_bq_list file $jobname.in
#}
# Input writing finishes here
file.close()
return 0
def makejob(self,writeinput=1,graph=None):
"""Build the MNDO job"""
self.GetModel()
mol_obj = self.get_input("mol_obj")
job_name = self.get_name()
directory = self.get_parameter("directory")
self.infile = directory+os.sep+job_name+'.in'
self.outfile = directory+os.sep+job_name+'.out'
if writeinput:
self.WriteInput()
else:
input = self.get_input("input_file")
file = open(self.infile,'w')
for a in input:
file.write(a)
file.close()
job = self.get_job()
if not job:
job = self.create_job()
job.name = job_name
# Delete old vectors
if self.chk == 0:
job.add_step(jobmanager.job.DELETE_FILE,'remove old vectors',remote_filename="fort.11",kill_on_error=0)
job.add_step(jobmanager.job.DELETE_FILE,'remove old output',remote_filename=self.outfile,kill_on_error=0)
job.add_step(jobmanager.job.COPY_OUT_FILE,'transfer input',local_filename=self.infile)
# Local windows job, search for local executable
if sys.platform[:3] == 'win':
# Name of executable, assume install of exe into exe subdirectory
try:
install_dir = os.environ['MNDO_BIN']
mndo_exe=install_dir+'\mndo.exe'
except KeyError:
mndo_exe=gui_path+'/exe/mndo.exe'
print 'Using MNDO path ' + mndo_exe
job.add_step(jobmanager.job.RUN_APP,'run MNDO',local_command=mndo_exe,stdin_file=None,stdout_file=None)
else:
mndo_exe="mndo"
job.add_step(jobmanager.job.RUN_APP,'run MNDO',local_command=mndo_exe,stdin_file=self.infile,stdout_file=self.outfile)
job.add_step(jobmanager.job.COPY_BACK_FILE,'recover log',remote_filename=self.outfile)
job.add_step(jobmanager.job.PYTHON_CMD,'load results',proc=lambda s=self,g=graph: s.endjob(g))
job.add_tidy(self.endjob2)
return job
def endjob(self,graph):
"""This is executed when the job completes successfully
from within the job thread
it should not perform Tk operations
"""
return 0,""
def endjob2(self,code=0):
"""This function is executed in the main thread if the job completes
satisfactorily"""
print 'endjob2'
if self.debug:
print 'running endjob2 code=',code
# load contents of listing for viewing
if self.debug_slave:
print 'endjob....'
job_name = self.get_name()
directory = self.get_parameter("directory")
#file = open(directory+'/'+job_name+'.out','r')
file = open(self.outfile,'r')
self.ReadOutput(file)
file.close()
# load in fort.15 ... only in case of success
if code:
return
fp = open(directory + '/fort.15',"r")
line = fp.readline()
ttt = ["coordinates angstrom"]
if line[:31] == " CARTESIAN COORDINATES: NUMAT =":
nat_rd = int(line[31:])
for i in range(nat_rd):
line = fp.readline()
words = line.split()
txt = z_to_el[int(words[1])] + " " + words[2] + " " + words[3] + " " + words[4]
ttt.append(txt)
print ttt
o = self.get_input("mol_obj")
res = Zmatrix(list=ttt)
res.connect()
res.name = "unnamed"
res.title = "untitled"
print res
res.list()
#self.results = [ res ]
# problem here as that as we are running in a slave thread
# we cannot use Tk .. so this is silent
ed = self.get_editor()
if ed:
try:
ed.connect_model(res)
except AttributeError:
pass
if ed.graph:
#ed.graph.import_objects(self.results)
txt = "Objects loaded from punchfile:"
txt = txt + "Structure update" '\n'
#for r in self.results:
# txt = txt + r.title + '\n'
ed.Info(txt)
# Update
if ed.update_func:
o = self.get_input("mol_obj")
#name = self.get_input("mol_name")
print 'performing update using res'
ed.update_func(res)
def get_theory(self):
return self.get_parameter("theory")
def check_direct(self):
return 0
class MNDOCalcEd(qm.QMCalcEd):
def __init__(self,root,calc,graph,**kw):
qm.QMCalcEd.__init__(self,root,calc,graph,**kw)
self.tasks = [MENU_ENER, MENU_GRAD, MENU_OPT]
## self.tasks = ["energy",
## "optimise internal coord.",
## "optimise cartesian coord."]
self.theories["energy"] = ["AM1", "PM3" ]
self.basissets = ["STO"]
# Don't do this until there is something in there to show...
#self.AddPage("SCFPage","SCF")
self.scf_methods = {}
tmp = ["rhf","rohf","uhf"]
self.scf_methods[MENU_ENER] = tmp
self.scf_methods[MENU_GRAD] = tmp
self.scf_methods[MENU_OPT] = tmp
self.hamiltonians = [ "mndo/d", "pm3", "am1", "mndo", "mindo/3", "cndo/2", "om1" , "om2" ]
# self.AddPage("DirectivesPage","Directives")
self.homolumo = Tkinter.BooleanVar()
self.chargeden = Tkinter.BooleanVar()
self.frequencies = Tkinter.BooleanVar()
#Create the tools used in the Molecule tab - spin & charge created in QM.
self.task_tool = tools.SelectOptionTool(self,'task','Task',self.tasks,command=self.__taskupdate)
#Used to specify task
self.tasktoolvalue = self.task_tool.widget.getvalue()
self.checkspin_widget = Tkinter.Button(self.interior(),
text = 'Check Spin',
command = self.__CheckSpin)
self.symmetry_tool = tools.BooleanTool(self,'symmetry','Use Symmetry')
mol_obj = self.calc.get_input('mol_obj')
## # need to propagate the default basis back
## self.basis_manager = self.calc.basis_manager
## self.basis_tool = BasisTool(self,'basis','ECP','default_basis',
## molecule=mol_obj,basis_manager=self.basis_manager)
## #Create the tools used in the Theory tab
## #self.guess_tool = GamessGuessTool(self,self.__guesscommand)
## self.guessoption_tool = SelectOptionTool(self,'guess_method','Vectors',self.guess_options,
## self.__guesstype)
## self.guessatoms_tool = SelectOptionTool(self,'guess_comp',None,self.compute_options)
## self.guesssection1_tool = tools.IntegerTool(self,'guess_sect1','Section a',0)
## self.guesssection2_tool = tools.IntegerTool(self,'guess_sect2','Section b',0)
## self.guessgetqblock1_tool = tools.IntegerTool(self,'getq_block1','File Block a',0)
## self.guessgetqblock2_tool = tools.IntegerTool(self,'getq_block2','File Block b',0)
## self.guessgetqsection1_tool = tools.IntegerTool(self,'getq_sect1','File Section a',0)
## self.guessgetqsection2_tool = tools.IntegerTool(self,'getq_sect2','File Section b',0)
self.scfmethod_tool = tools.SelectOptionTool(self,'scf_method',
'SCF Method',
self.scf_methods[self.tasktoolvalue],
self.__scfmethod)
self.hamiltonian_tool = tools.SelectOptionTool(self,'hamiltonian',
'Hamiltonian',
self.hamiltonians)
self.scfmaxcycles_tool = tools.IntegerTool(self,'scf_maxcyc','Max. Cycles',1)
self.scfthreshold_tool = tools.IntegerTool(self,'scf_threshold','Threshold',3)
## self.scfbypass_tool = BooleanTool(self,'scf_bypass', 'Bypass SCF')
## self.scflevelinit_tool = FloatTool(self,'scf_level_init','Initial Levelshifter Value',0.0)
## self.scflevelit_tool = tools.IntegerTool(self,'scf_level_it','Cycle to change on',1)
## self.scflevelfinal_tool = FloatTool(self,'scf_level_final','Final Levelshifter Value',0.0)
## self.postscfmethod_tool = tools.SelectOptionTool(self,'postscf_method',
## 'Method',
## self.postscf_methods[self.tasktoolvalue])
## #Create the tools for the DFT tab
## self.dftfunctional_tool = tools.SelectOptionTool(self,'dft_functional','Functional',self.dft_functionals)
## self.dftaccuracy_tool = tools.SelectOptionTool(self,'dft_grid','Grid setting',self.dft_grids)
## self.dftweightscheme_tool = tools.SelectOptionTool(self,'dft_weights',
## 'DFT weighting scheme',
## self.dft_weights)
## self.dftradial_tool = MenuCounterTool(self,
## 'dft_radialgrid',
## 'Radial Grid',
## self.dft_radialgrids,
## 'dft_radialgridpoints',
## 'Number of points',
## command = self.__dftradialgridpoints
## )
## self.radialgrid = self.dftradial_tool.firstmenu.getvalue()
## self.dftangular_tool = MenuCounterMenuTool(self,
## 'dft_angulargrid',
## 'Angular Grid',
## self.dft_angulargrids,
## 'dft_angulargridpoints',
## 'Number of points',
## 'dft_angulargridpoints',
## 'Number of points',
## self.dft_lebedevpoints,
## command = self.__dftangulargridpoints
## )
## self.angulargrid = self.dftangular_tool.firstmenu.getvalue()
## self.dftjfit_tool = BooleanTool(self,'dft_jfit','Use Coulomb Fitting',self.__dftjbasselect)
## self.dftjbas_tool = tools.SelectOptionTool(self,'dft_jbas','Fitting Basis',self.dft_jbas)
## self.dftschwarz_tool = tools.IntegerTool(self,'dft_schwarz','Schwarz cutoff')
## #Create the tools used in the Properties tab
## self.homolumo_tool = BooleanTool(self, 'ana_homolumo', 'HOMO/LUMO')
## self.homolumo1_tool = BooleanTool(self, 'ana_homolumo1', 'HOMO1/LUMO1')
## self.homolumo2_tool = BooleanTool(self, 'ana_homolumo2', 'HOMO2/LUMO2')
## self.homolumo3_tool = BooleanTool(self, 'ana_homolumo2', 'HOMO3/LUMO3')
## self.homolumo4_tool = BooleanTool(self, 'ana_homolumo4', 'HOMO4/LUMO4')
## self.homolumo5_tool = BooleanTool(self, 'ana_homolumo5', 'HOMO5/LUMO5')
## self.chargeden_tool = BooleanTool(self, 'ana_chargeden', 'Charge Density')
## self.diffden_tool = BooleanTool(self, 'ana_diffden', 'Difference Density')
## self.potential_tool = BooleanTool(self, 'ana_potential', 'Potential')
## self.chargedengrad_tool = BooleanTool(self, 'ana_chargedengrad', 'Gradient Density')
## self.spinden_tool = BooleanTool(self, 'ana_spinden', 'Spin Density')
## self.frequencies_tool = BooleanTool(self, 'ana_frequencies', 'Finite Difference')
## self.hessian_tool = BooleanTool(self, 'ana_hessian', "Analytic")
## #Create the tools used in the Optimisation tab
## self.optcoords_tool = tools.SelectOptionTool(self,'optimiser', 'Opt. Coords',
## self.optcoord_opts, self.__selectcoords)
## self.find_ts_tool = BooleanTool(self,"find_ts","Locate Transition State",self.__findts)
## # self.optmethod_tool = tools.SelectOptionTool(self,'optimiser_method','Method',self.optmethodopts)
## self.optmaxcyc1_tool = tools.IntegerTool(self,'max_opt_step','Energy evaluations',0)
## self.optmaxcyc2_tool = tools.IntegerTool(self,'max_opt_line','Line searches',0)
## self.optxtol_tool = FloatTool(self,'opt_conv_thsld','Convergence Thresh.',0.0)
## self.optstepmax_tool = FloatTool(self,'max_opt_step_len','Max. Step size',0.0)
## self.optvalue_tool = FloatTool(self,'opt_value','Turning Point Accuracy',0.0)
## self.optjorg_tool = BooleanTool(self,'opt_jorgensen','Use Jorgensen-Simons Algorithm',
## self.__optjorgensen)
## self.optpowell_tool = BooleanTool(self,'opt_powell','Use Powell Hessian update')
## self.optbfgs_tool = tools.SelectOptionTool(self,'opt_hess_update', 'Hessian Update Procedure',
## self.optbfgs_opts)
## self.optminhess_tool = FloatTool(self,'opt_min_hess','Min. Hessian Eigenvalue')
## self.optmaxhess_tool = FloatTool(self,'opt_max_hess','Max. Hessian Eigenvalue')
## self.optrfo_tool = MenuAndBooleanTool(self,'opt_rfo','opt_rfomode',
## 'Use Rational Function Optimisation',
## 'RFO Mode',self.optrfo_opts)
## #Create the tools used for the Job tab
## self.jobname_tool = TextFieldTool(self,'job_name','Job Name')
## self.hostname_tool = tools.SelectOptionTool(self,'hostname', 'Host name',
## self.hostnames, command=self.__sethost)
## self.hostname = self.hostname_tool.widget.getvalue()# get the hostname for the below tool
## self.submission_tool = tools.SelectOptionTool(self,'submission','Job Submission',
## self.submissionpolicies[self.hostname])
## self.username_tool = TextFieldTool(self,'username','User Name')
## self.workingdirectory_tool = ChangeDirectoryTool(self,'directory','Working Directory')
## #Create the tools used in the Restart Group
## self.ed0keep_tool = BooleanTool(self, 'ed0_keep', 'specify',
## command=lambda s=self: s.__keepfile('ed0'))
## self.ed0path_tool = ChangeDirectoryTool(self,'ed0_path','')
## self.ed2keep_tool = BooleanTool(self, 'ed2_keep', 'keep',
## command=lambda s= self: s.__keepfile('ed2'))
## self.ed2name_tool = BooleanTool (self, 'ed2_specify','specify ',
## command=lambda s=self: s.__keepfile('ed2'))
## self.ed2path_tool = FileTool(self,'ed2_path','',
## filetypes=[('Mainfiles','*.ed2'), ('All files','*.*')])
## self.ed3keep_tool = BooleanTool(self, 'ed3_keep', 'keep',
## command=lambda s = self: s.__keepfile('ed3'))
## self.ed3name_tool = BooleanTool (self, 'ed3_specify','specify ',
## command=lambda s=self: s.__keepfile('ed3'))
## self.ed3path_tool = FileTool(self,'ed3_path','',
## filetypes=[('Dumpfiles','*.ed3'), ('All files','*.*')])
## self.ed7keep_tool = BooleanTool(self, 'ed7_keep', 'keep',
## command=lambda s = self: s.__keepfile('ed7'))
## self.ed7name_tool = BooleanTool (self, 'ed7_specify','specify ',
## command=lambda s=self: s.__keepfile('ed7'))
## self.ed7path_tool = FileTool(self,'ed7_path','',
## filetypes=[('Tempfiles','*.ed7'), ('All files','*.*')])
## self.ed14keep_tool = BooleanTool(self, 'ed14_keep', 'specify',
## command=lambda s=self: s.__keepfile('ed14'))
## self.ed14path_tool = FileTool(self,'ed14_path','',
## filetypes=[('Dumpfiles','*.ed3'), ('All files','*.*')],
## action="open")
self.LayoutToolsTk()
self.__initialisetools()
def __initialisetools(self):
pass
def __taskupdate(self,task):
"""Update the SCF and post-SCF methods for the task that has been selected
and hide the optimisation tab
"""
self.scfmethod_tool.SetItems(self.scf_methods[task])
#### self.postscfmethod_tool.SetItems(self.postscf_methods[task])
# if task != MENU_OPT:
# self.notebook.tab('Optimisation').configure(state="disabled")
# else:
# self.notebook.tab('Optimisation').configure(state="active")
def LayoutToolsTk(self):
"""Place the widgets belonging to the tools (ChargeTool etc)
This will generally be replaced by a more specific function
for a particular code interface.
"""
#Add Molecule tab
page = self.notebook.add('Molecule',tab_text='Molecule')
# Associate helpfile with notebook frame
tab = self.notebook.tab('Molecule')
viewer.help.sethelp(tab,'Molecule Tab')
page.optgroup = Pmw.Group(page,tag_text="Options")
page.optgroup.pack(expand='yes',fill='both')
## page.basisgroup = Pmw.Group(page,tag_text="Basis Selector")
## page.basisgroup.pack(expand='yes',fill='both')
self.title_tool.widget.pack(in_=page.optgroup.interior())
self.task_tool.widget.pack(in_=page.optgroup.interior())
self.scfmethod_tool.widget.pack(in_=page.optgroup.interior())
self.hamiltonian_tool.widget.pack(in_=page.optgroup.interior())
self.charge_tool.widget.pack(in_=page.optgroup.interior())
self.spin_tool.widget.pack(in_=page.optgroup.interior())
self.checkspin_widget.pack(in_=page.optgroup.interior())
## self.symmetry_tool.widget.pack(in_=page.optgroup.interior())
## Pmw.alignlabels([self.charge_tool.widget, self.spin_tool.widget])
## self.basis_tool.widget.pack(in_=page.basisgroup.interior())
## #Add Theory tab
## page = self.notebook.add('Theory',tab_text='Theory')
## # Associate helpfile with notebook frame
## tab = self.notebook.tab('Theory')
## tkmolview.help.sethelp(tab,'Theory Tab')
## page.guessgroup = Pmw.Group(page,tag_text="Guess")
## page.guessgroup.pack(expand='yes',fill='both')
## self.guessoption_tool.widget.pack(in_=page.guessgroup.interior(),side='left')
## page.guessframe = Tkinter.Frame(page.guessgroup.interior())
## page.guessframe.pack(in_=page.guessgroup.interior(),side='left')
## self.guessatoms_tool.SetParent(page.guessframe)
## self.guesssection1_tool.SetParent(page.guessframe)
## self.guesssection2_tool.SetParent(page.guessframe)
## self.guessgetqblock1_tool.SetParent(page.guessframe)
## self.guessgetqsection1_tool.SetParent(page.guessframe)
## self.guessgetqblock2_tool.SetParent(page.guessframe)
## self.guessgetqsection2_tool.SetParent(page.guessframe)
## page.scfgroup = Pmw.Group(page,tag_text="SCF")
## page.scfgroup.pack(expand='yes',fill='both')
## self.scfmethod_tool.widget.pack(in_=page.scfgroup.interior())
## self.scfmaxcycles_tool.widget.pack(in_=page.scfgroup.interior())
## self.scfthreshold_tool.widget.pack(in_=page.scfgroup.interior())
## self.scfbypass_tool.widget.pack(in_=page.scfgroup.interior())
## page.scflevelgroup = Pmw.Group(page,tag_text="SCF Level Shifters")
## page.scflevelgroup.pack(in_=page.scfgroup.interior(),
## expand='yes',
## fill='both',
## padx=10,
## pady=10)
## self.scflevelinit_tool.widget.pack(in_=page.scflevelgroup.interior())
## self.scflevelit_tool.widget.pack(in_=page.scflevelgroup.interior())
## self.scflevelfinal_tool.widget.pack(in_=page.scflevelgroup.interior())
## page.postscfgroup = Pmw.Group(page,tag_text="Post SCF")
## page.postscfgroup.pack(expand='yes',fill='both')
## self.postscfmethod_tool.widget.pack(in_=page.postscfgroup.interior())
## #Add DFT tab
## page = self.notebook.add('DFT',tab_text='DFT')
## # Associate helpfile with notebook frame
## tab = self.notebook.tab('DFT')
## tkmolview.help.sethelp(tab,'DFT Tab')
## page.dftgroup1 = Pmw.Group(page,tag_text="Functional")
## page.dftgroup1.pack(expand='yes',fill='both')
## page.dftgroup2 = Pmw.Group(page,tag_text="Accuracy")
## page.dftgroup2.pack(expand='yes',fill='both')
## page.dftgroup3 = Pmw.Group(page,tag_text="Quadrature Types")
## page.dftgroup3.pack(expand='yes',fill='both')
## #page.dftgroup4 = Pmw.Group(page,tag_text="DFT Options4")
## #page.dftgroup4.pack(expand='yes',fill='both')
## page.dftgroup5 = Pmw.Group(page,tag_text="Coulomb Fitting")
## page.dftgroup5.pack(expand='yes',fill='both')
## self.dftfunctional_tool.widget.pack(in_=page.dftgroup1.interior())
## self.dftaccuracy_tool.widget.pack(in_=page.dftgroup2.interior())
## self.dftweightscheme_tool.widget.pack(in_=page.dftgroup2.interior())
## self.dftradial_tool.widget.pack(in_=page.dftgroup3.interior(),side='top')
## self.dftangular_tool.widget.pack(in_=page.dftgroup3.interior(),side='top')
## self.dftjfit_tool.SetParent(page.dftgroup5.interior())
## self.dftjfit_tool.Pack()
## self.dftjbas_tool.SetParent(page.dftgroup5.interior())
## self.dftschwarz_tool.SetParent(page.dftgroup5.interior())
## # Add Properties tab
## page = self.notebook.add('Properties',tab_text='Properties')
## # Associate helpfile with notebook frame
## tab = self.notebook.tab('Properties')
## tkmolview.help.sethelp(tab,'Properties Tab')
## page.grgroup = Pmw.Group(page,tag_text="Graphical options")
## page.grgroup.pack(expand='yes',fill='x')
## page.mogroup = Pmw.Group(page.grgroup.interior(),tag_text="Orbital Plots")
## page.mogroup.pack(expand='yes',fill='x',side='right')
## self.homolumo_tool.widget.pack(in_=page.mogroup.interior())
## self.homolumo1_tool.widget.pack(in_=page.mogroup.interior())
## self.homolumo2_tool.widget.pack(in_=page.mogroup.interior())
## self.homolumo3_tool.widget.pack(in_=page.mogroup.interior())
## self.homolumo4_tool.widget.pack(in_=page.mogroup.interior())
## self.homolumo5_tool.widget.pack(in_=page.mogroup.interior())
## f = Frame(page.grgroup.interior())
## f.pack(expand='yes',fill='x',side='left')
## page.group2 = Pmw.Group(f,tag_text="Density and Potential")
## page.group2.pack(expand='yes',fill='x',side='top')
## self.chargeden_tool.widget.pack(in_=page.group2.interior())
## self.diffden_tool.widget.pack(in_=page.group2.interior())
## self.potential_tool.widget.pack(in_=page.group2.interior())
## self.chargedengrad_tool.widget.pack(in_=page.group2.interior())
## self.spinden_tool.widget.pack(in_=page.group2.interior())
## page.editgrid_button = Tkinter.Button(f,command=self.edit_grid)
## page.editgrid_button.config(text="Edit Grid")
## page.editgrid_button.pack(side='bottom',padx=10,pady=20)
## page.vgroup = Pmw.Group(page,tag_text="Frequencies")
## page.vgroup.pack(expand='yes',fill='x')
## self.frequencies_tool.widget.pack(in_=page.vgroup.interior())
## self.hessian_tool.widget.pack(in_=page.vgroup.interior())
## Pmw.alignlabels([self.homolumo_tool.widget,
## self.homolumo1_tool.widget,
## self.homolumo2_tool.widget,
## self.homolumo3_tool.widget,
## self.homolumo4_tool.widget,
## self.homolumo5_tool.widget])
## Pmw.alignlabels([self.potential_tool.widget,
## self.chargeden_tool.widget,
## self.diffden_tool.widget,
## self.chargedengrad_tool.widget,
## self.spinden_tool.widget,
## page.editgrid_button])
## #Add Optimisation tab
## page = self.notebook.add('Optimisation',tab_text='Optimisation')
## # Associate helpfile with notebook frame
## tab = self.notebook.tab('Optimisation')
## viewer.help.sethelp(tab,'Optimisation Tab')
## page.rungroup = Pmw.Group(page,tag_text="Runtype")
## page.rungroup.pack(expand='yes',fill='both')
## self.optcoords_tool.widget.pack(in_=page.rungroup.interior())
## self.find_ts_tool.widget.pack(in_=page.rungroup.interior())
## page.searchgroup = Pmw.Group(page,tag_text="Search Procedure")
## page.searchgroup.pack(expand='yes',fill='both')
## self.optmaxcyc1_tool.SetParent(page.searchgroup.interior())
## self.optmaxcyc1_tool.Pack()
## self.optmaxcyc2_tool.SetParent(page.searchgroup.interior())
## self.optmaxcyc2_tool.Pack()
## self.optxtol_tool.SetParent(page.searchgroup.interior())
## self.optxtol_tool.Pack()
## self.optstepmax_tool.SetParent(page.searchgroup.interior())
## self.optstepmax_tool.Pack()
## self.optvalue_tool.SetParent(page.searchgroup.interior())
## self.optvalue_tool.Pack()
## Pmw.alignlabels([self.optmaxcyc1_tool.widget, self.optmaxcyc2_tool.widget,
## self.optxtol_tool.widget, self.optstepmax_tool.widget,
## self.optvalue_tool.widget])
## page.jorggroup = Pmw.Group(page,tag_text="Jorgensen-Simons Algorithm")
## page.jorggroup.pack(expand='yes',fill='both')
## self.optjorg_tool.SetParent(page.jorggroup.interior())
## self.optjorg_tool.Pack()
## self.optpowell_tool.SetParent(page.jorggroup.interior())
## self.optbfgs_tool.SetParent(page.jorggroup.interior())
## self.optminhess_tool.SetParent(page.jorggroup.interior())
## self.optmaxhess_tool.SetParent(page.jorggroup.interior())
## self.optrfo_tool.SetParent(page.jorggroup.interior())
## Pmw.alignlabels([self.optjorg_tool.widget, self.optpowell_tool.widget,
## self.optbfgs_tool.widget, self.optminhess_tool.widget,
## self.optmaxhess_tool.widget, self.optrfo_tool.widget])
## #Add Job tab
## page = self.notebook.add('Job',tab_text='Job')
## # Associate helpfile with notebook frame
## tab = self.notebook.tab('Job')
## tkmolview.help.sethelp(tab,'Job Tab')
## page.jobgroup = Pmw.Group(page,tag_text="Job Group")
## page.jobgroup.pack(side='top',expand='yes',fill='both')
## self.jobname_tool.widget.pack(in_=page.jobgroup.interior())
## self.hostname_tool.widget.pack(in_=page.jobgroup.interior())
## self.submission_tool.widget.pack(in_=page.jobgroup.interior())
## self.username_tool.widget.pack(in_=page.jobgroup.interior())
## self.workingdirectory_tool.widget.pack(in_=page.jobgroup.interior())
## #Add Restart group
## page.fpathgroup = Pmw.Group(page,tag_text="File Path Group")
## page.fpathgroup.pack(expand='yes',fill='both')
## #Need to create multiple frames so things can be packed and forgotten
## # without the order getting all jumbled.
## page.ed0frame = Tkinter.Frame(page.fpathgroup.interior())
## page.ed0frame.pack(in_=page.fpathgroup.interior(),side='top',
## expand='yes', fill='both')
## ed0label = Tkinter.Label(page.ed0frame, text='ECP Libraries (ed0) ')
## ed0label.pack(side='left')
## self.ed0keep_tool.SetParent(page.ed0frame)
## self.ed0path_tool.SetParent(page.ed0frame)
## self.ed0keep_tool.widget.pack(in_=page.ed0frame, side='left')
## page.ed2frame = Tkinter.Frame(page.fpathgroup.interior())
## page.ed2frame.pack(in_=page.fpathgroup.interior(),side='top',
## expand='yes', fill='both')
## ed2label = Tkinter.Label(page.ed2frame, text='Mainfile (ed2) ')
## ed2label.pack(side='left')
## self.ed2keep_tool.SetParent(page.ed2frame)
## self.ed2name_tool.SetParent(page.ed2frame)
## self.ed2path_tool.SetParent(page.ed2frame)
## self.ed2keep_tool.widget.pack(in_=page.ed2frame, side='left')
## page.ed3frame = Tkinter.Frame(page.fpathgroup.interior())
## page.ed3frame.pack(in_=page.fpathgroup.interior(),side='top',
## expand='yes', fill='x')
## ed3label = Tkinter.Label(page.ed3frame, text='Dumpfile (ed3) ')
## ed3label.pack(side='left')
## self.ed3keep_tool.SetParent(page.ed3frame)
## self.ed3path_tool.SetParent(page.ed3frame)
## self.ed3name_tool.SetParent(page.ed3frame)
## self.ed3keep_tool.widget.pack(in_=page.ed3frame, side='left')
## page.ed7frame = Tkinter.Frame(page.fpathgroup.interior())
## page.ed7frame.pack(in_=page.fpathgroup.interior(),side='top',
## expand='yes', fill='both')
## ed7label = Tkinter.Label(page.ed7frame, text='Tempfile (ed7) ')
## ed7label.pack(side='left')
## self.ed7keep_tool.SetParent(page.ed7frame)
## self.ed7name_tool.SetParent(page.ed7frame)
## self.ed7path_tool.SetParent(page.ed7frame)
## self.ed7keep_tool.widget.pack(in_=page.ed7frame, side='left')
## page.ed14frame = Tkinter.Frame(page.fpathgroup.interior())
## page.ed14frame.pack(in_=page.fpathgroup.interior(),side='top',
## expand='yes', fill='both')
## ed14label = Tkinter.Label(page.ed14frame, text='Foreign Dumpfile (ed14) ')
## ed14label.pack(side='left')
## self.ed14keep_tool.SetParent(page.ed14frame)
## self.ed14path_tool.SetParent(page.ed14frame)
## self.ed14keep_tool.widget.pack(in_=page.ed14frame, side='left')
## Pmw.alignlabels([self.ed0keep_tool.widget,
## self.ed0path_tool.widget,
## self.ed2keep_tool.widget,
## self.ed2path_tool.widget,
## self.ed2name_tool.widget,
## self.ed2keep_tool.widget,
## self.ed3path_tool.widget,
## self.ed3name_tool.widget,
## self.ed3keep_tool.widget,
## self.ed7path_tool.widget,
## self.ed7name_tool.widget,
## self.ed7keep_tool.widget,
## self.ed14path_tool.widget,
## self.ed14keep_tool.widget])
def TaskPage(self,page,action):
QMCalcEd.TaskPage(self,page,action)
# Create a group for the checkboxes
if action == Create:
page.group = Pmw.Group(page,tag_text="Analysis options")
page.group.pack(expand='yes',fill='x')
def SCFPage(self,page,action):
"""Maintain the SCF page."""
labels = []
def DirectivesPage(self,page,action):
"""Entry for various directives not covered by GUI yet"""
pass
def __CheckSpin(self):
for tool in self.tools:
tool.ReadWidget()
self.calc.CheckSpin()
def __scfmethod(self,scf):
"""Configure all widgets and variables that depend on the SCF type.
"""
self.scfmethod_tool.ReadWidget()
#if (scf == 'DFT') or (scf == 'UDFT') or (scf == 'Direct DFT') or (scf == 'Direct UDFT'):
# self.notebook.tab('DFT').configure(state="active")
#else:
# self.notebook.tab('DFT').configure(state="disabled")
#
# REM the default 'enter' and 'vectors' sections are configured in the __guesstype
# method as this is always run after the __scfmethod is invoked
#guess = self.calc.get_parameter("guess_method")
#self.__guesstype("")
##########################################
#
# Unittesting stuff
#
##########################################
class MNDOCalcEdTestCases(unittest.TestCase):
exdir=gui_path+os.sep+'examples'+os.sep
def testOpt(self):
# tkroot either created in this module if we run standalone, or passed in by the
# testall script if run as part of all the tests
global tkroot
calc = MNDOCalc()
m = objects.zmatrix.Zmatrix(file=self.exdir+'water.zmt')
calc.set_input('mol_obj',m)
calc.set_parameter('task',MENU_OPT)
jm = jobmanager.JobManager()
je = jobmanager.jobeditor.JobEditor(tkroot,jm)
vt = MNDOCalcEd(tkroot,calc,None,job_editor=je)
vt.Run()
def testMe():
"""Return a unittest test suite with all the testcases that should be run by the main
gui testing framework."""
return unittest.TestLoader().loadTestsFromTestCase(MNDOCalcEdTestCases)
if __name__ == "__main__":
tkroot=Tkinter.Tk()
if 0:
unittest.main()
else:
#
# Test editor in standalone mode
#
calc = MNDOCalc()
m = objects.zmatrix.Zmatrix(file=gui_path+os.sep+'examples'+os.sep+'water.zmt')
calc.set_input('mol_obj',m)
calc.set_parameter('task',MENU_OPT)
jm = jobmanager.JobManager()
je = jobmanager.jobeditor.JobEditor(tkroot,jm)
vt = MNDOCalcEd(tkroot,calc,None,job_editor=je)
#vt.Run()
#calc.WriteInput()
tkroot.mainloop()
|
code-google-com/shaderman
|
refs/heads/master
|
modes/Unix/__init__.py
|
4
|
import os
import subprocess
def name():
return "Unix"
def generator(self, filename, code):
print code
#code = "bash -c '%s'" % code
#os.system(code)
#pipe = subprocess.Popen(code, shell=True, stdout=subprocess.PIPE).stdout
#buffer = str("".join(pipe.readlines()))
#print buffer
#print `pwd`
p = subprocess.Popen([code], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sts = os.waitpid(p.pid, 0)
buffer = str("".join(p.stdout.readlines()+p.stderr.readlines()))
print buffer
|
pfnet/chainer
|
refs/heads/master
|
tests/chainerx_tests/unit_tests/routines_tests/test_indexing.py
|
3
|
import unittest
import numpy
import pytest
import chainer.testing
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,indices', [
# empty indexing
((), ()),
((3,), ()),
((2, 2, 2), ()),
# integer indexing - non-tuple indexing
((3,), 0),
((3,), 1),
((3,), 2),
((3,), -1),
((2, 3), 0),
((2, 3), 1),
((2, 3), numpy.int8(-1)),
((2, 3), numpy.int32(0)),
((2, 3), numpy.uint64(1)),
# integer indexining - tuple indexing
((3,), (0,)),
((3,), (1,)),
((3,), (2,)),
((3,), (-1,)),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (0, 0)),
((2, 3), (1, 1)),
((2, 3, 4), (0, -2, 3)),
((2, 3, 4), (1, 0)),
# slice indexing - non-tuple indexing
((3,), slice(None)),
((3,), slice(2)),
((3,), slice(0, 3)),
((3,), slice(0, 2)),
((3,), slice(1, 3)),
((3,), slice(0, 0)),
((3,), slice(0, 1)),
((3,), slice(2, 0, -1)),
((3,), slice(-2, -1)),
((3,), slice(2, None, -1)),
((3,), slice(None, 0, 1)),
((3,), slice(None, -1, -1)),
((3,), slice(None, -2, -1)),
((6,), slice(0, 6, 2)),
((6,), slice(1, 6, 2)),
((6,), slice(5, None, -2)),
((6,), slice(4, 10)),
((6,), slice(10, 5, -1)),
((6,), slice(5, -1)),
((6,), slice(5, -1, -1)),
((6,), slice(-1, 5)),
((6,), slice(-1, 5, -1)),
# slice indexing - tuple indexing
((3,), (slice(None),)),
((3,), (slice(2),)),
((3,), (slice(0, 3),)),
((3,), (slice(0, 2),)),
((3,), (slice(1, 3),)),
((3,), (slice(0, 0),)),
((3,), (slice(0, 1),)),
((3,), (slice(2, 0, -1),)),
((3,), (slice(-2, -1),)),
((3,), (slice(2, None, -1),)),
((3,), (slice(None, 0, 1),)),
((3,), (slice(None, -1, -1),)),
((3,), (slice(None, -2, -1),)),
((6,), (slice(0, 6, 2),)),
((6,), (slice(1, 6, 2),)),
((6,), (slice(5, None, -2),)),
((6,), (slice(50, 1, -1),)),
((6,), (slice(3, 3, 1),)),
((6,), (slice(3, 3, -2),)),
((6,), (slice(50, 50, 1),)),
((6,), (slice(50, 50, -2),)),
((6,), (slice(-50, -50, 1),)),
((6,), (slice(-50, -50, -2),)),
((2, 3), (slice(None), slice(None))),
((2, 3), (slice(1), slice(2))),
((2, 3), (slice(0, 2), slice(0, 3))),
((2, 3), (slice(0, 2), slice(0, -1))),
((2, 3), (slice(0, None, -1), slice(2, 3))),
((2, 3), (slice(0, None, None), slice(-2, 0, -1))),
((2, 3), (slice(1, 2), slice(0, 2))),
((2, 3), (slice(-2, None, -1), slice(0, 3))),
((2, 3), (slice(-2, None, -1), slice(-3, None, -1))),
((2, 3), (slice(-2, None, -1), slice(None, None, -2))),
((2, 3), (slice(1, 2), slice(None, None, 1))),
((2, 3), (slice(1, 2), slice(None, None, 2))),
((2, 3, 4), (slice(1), slice(-2, 3), slice(1, None, -1))),
# newaxis indexing - non-tuple indexing
((), chainerx.newaxis),
((3,), chainerx.newaxis),
# newaxis indexing - tuple indexing
((), (chainerx.newaxis,)),
((3,), (chainerx.newaxis,)),
((2, 3), (chainerx.newaxis, chainerx.newaxis)),
# ellipsis indexing - non-tuple indexing
((), Ellipsis),
((3,), Ellipsis),
# ellipsis indexing - tuple indexing
((), (Ellipsis,)),
((2, 3), (Ellipsis,)),
# mixed indexing - tuple indexing
((2, 3), (0, slice(1, 3))),
((4, 3), (slice(1, 3), 1)),
((2, 3, 4), (1, slice(2,), slice(1, 3))),
((2, 3), (1, chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), slice(1, 2), slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (slice(0, 1), slice(1, 2), chainerx.newaxis, slice(1, 3))),
((2, 3, 4), (slice(0, 1), chainerx.newaxis, slice(1, 2), slice(1, 3))),
((2, 3, 4), (chainerx.newaxis, slice(0, 1), slice(1, 2), slice(1, 3))),
((2, 3, 4),
(1, slice(2,), chainerx.newaxis, slice(1, 3), chainerx.newaxis)),
((2, 3, 4), (0, Ellipsis)),
((2, 3, 4), (Ellipsis, 2)),
((2, 3, 4), (1, Ellipsis, 2)),
((2, 3, 4), (1, Ellipsis, 2, 3)),
((2, 3, 4), (chainerx.newaxis, Ellipsis, chainerx.newaxis)),
((2, 3, 4), (1, Ellipsis, chainerx.newaxis, 3)),
((2, 3, 4), (1, Ellipsis, 2, chainerx.newaxis, 3)),
((2, 3, 4), (slice(0, 1), Ellipsis, slice(1, 3))),
])
class TestGetitem(op_utils.NumpyOpTest):
# TODO(niboshi): Remove this
check_numpy_strides_compliance = False
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype('float32')
return x,
def forward_xp(self, inputs, xp):
x, = inputs
y = x[self.indices]
return y,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices', [
((), 0),
((), (1,)),
((), (1, 0)),
((3,), 3),
((3,), (0, 1)),
((2, 3,), (2, 0)),
((2,), (2, chainerx.newaxis, 3)),
((2,), (2, Ellipsis, chainerx.newaxis, 3)),
((2,), (Ellipsis, Ellipsis)),
])
def test_getitem_index_error(device, shape, indices):
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
with pytest.raises(IndexError):
a[indices]
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_getitem_zero_sized_offsets(device):
a = chainerx.arange(6)
b = a[3:3]
# Test pre-conditions.
assert b.size == 0
assert b.offset == 12
# The offset of `c` should be the same as `b` since `b` is empty.
c = b[2:]
assert c.size == 0
assert c.offset == b.offset
@op_utils.op_test(['native:0', 'cuda:0'])
# TODO(hvy): Add cases where axis=None, when supported.
@chainer.testing.parameterize_pytest('shape,indices,axis', [
# Valid parameters
((3,), [0], 0),
((3,), [1], 0),
((2, 3), [0], 0),
((2, 3), [0], 1),
((2, 3), [0], -1),
((2, 3), [1], 0),
((2, 3), [0, -1], 0),
((2, 3), [1, 0], 0),
((2, 3), [1, 2], 1),
((2, 3), [2, 1], 1),
((2, 3), [[0], [1]], 0),
# Take from a duplicate index
((3, 2), [1, 1], 0),
# Invalid: Axis out of bounds
((2, 3), [0], 2),
((2, 3), [0], -3),
])
@chainer.testing.parameterize_pytest('is_module', [True, False])
@chainer.testing.parameterize_pytest(
'indices_type', ['list', 'numpy', 'xp'])
# TODO(niboshi): indices_dtype is ignored if indices_type == 'list', which is
# wasteful.
@chainer.testing.parameterize_pytest(
'indices_dtype', chainerx.testing.integral_dtypes)
@chainer.testing.parameterize_pytest(
'mode', ['raise', 'wrap', 'clip'])
@chainer.testing.parameterize_pytest(
'a_dtype', chainerx.testing.all_dtypes)
class TestTake(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
forward_accept_errors = (chainerx.DimensionError, numpy.AxisError)
def setup(self):
if (self.mode == 'raise'
and numpy.dtype(self.indices_dtype).kind == 'u'
and (numpy.array(self.indices, 'int64') < 0).any()):
raise unittest.SkipTest(
'Indices underflows and index out of bounds cannot be tested.')
if self.a_dtype == 'float16':
self.check_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-3})
self.check_double_backward_options.update(
{'rtol': 1e-3, 'atol': 1e-3})
def generate_inputs(self):
a = numpy.random.uniform(-1, 1, self.shape).astype(self.a_dtype)
return a,
def forward_xp(self, inputs, xp):
indices = self.indices
axis = self.axis
indices_type = self.indices_type
a, = inputs
if (xp is chainerx and self.mode == 'raise'
and 'cuda' in xp.get_default_device().name):
pytest.skip('CUDA is not supportted with mode="raise"')
assert isinstance(indices, list)
if indices_type == 'list':
pass
elif indices_type == 'numpy':
indices = numpy.array(indices).astype(self.indices_dtype)
elif indices_type == 'xp':
indices = xp.array(indices).astype(self.indices_dtype)
else:
assert False, indices_type
if self.is_module:
b = xp.take(a, indices, axis, mode=self.mode)
else:
b = a.take(indices, axis, mode=self.mode)
return b,
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices,axis', [
# Invalid: Index out of bounds
((2, 3), [2], 0),
((2, 3), [-3], 0),
])
def test_take_index_error(device, shape, indices, axis):
a = array_utils.create_dummy_ndarray(chainerx, shape, 'float32')
indices = numpy.array(indices).astype(numpy.int32)
error = IndexError
if device.backend.name == 'cuda':
error = chainerx.BackendError # Not supported in CUDA
with pytest.raises(error):
chainerx.take(a, indices, axis, mode='raise')
def _random_condition(shape, dtype, *, random_state=None):
if random_state is None:
random_state = numpy.random.RandomState()
neg_mask = random_state.randint(0, 2, size=shape).astype('bool')
cond = array_utils.uniform(shape, dtype, random_state=random_state)
# Replace zeros with nonzero, making the average number of zero elements
# in cond independent of the dtype.
cond[cond == 0] = 1
cond[neg_mask] = 0
return cond
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('shape,indices,axis', [
# Invalid: Index out of bounds
((2, 3), [1, 1], 0),
((2, 3, 4), [0, 1, 1], 1),
])
def test_take_non_contiguous(device, shape, indices, axis):
a = numpy.random.uniform(-1, 1, shape).astype('float32')
indices = numpy.array(indices).astype(numpy.int32)
chx_a = chainerx.array(a).astype('float32')
a = numpy.transpose(a, axes=range(chx_a.ndim)[::-1])
chx_a = chainerx.transpose(chx_a, axes=range(chx_a.ndim)[::-1])
assert(not chx_a.is_contiguous)
chx_indices = chainerx.array(indices).astype(numpy.int32)
chx_out = chainerx.take(chx_a, chx_indices, axis)
np_out = numpy.take(a, indices, axis)
numpy.testing.assert_array_equal(chx_out, np_out)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,in_shapes': [
# Same Shapes
((2, 3), ((2, 3), (2, 3))),
# Broadcast Shapes
((2, 3), ((1, 3), (1, 3))),
((2, 3), ((2, 1), (1, 3))),
((2, 3), ((2, 3), (1, 3))),
((4, 5), ((3, 4, 1), (1, 5))),
((1, 4, 5), ((3, 4, 1), (3, 1, 5))),
],
'cond_dtype': ['bool_'],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,in_shapes': [((2, 3), ((2, 3), (2, 3)))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
})
))
class TestWhere(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
input_lhs = 'random'
input_rhs = 'random'
def setup(self):
super().setup()
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
def func(self, xp, x, y):
condition = xp.array(self.condition)
return xp.where(condition, x, y)
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(
chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('cond_shape,x_shape,y_shape', [
((2, 3), (3, 4), (2, 3)),
((2, 3), (2, 3), (3, 4)),
((2, 3), (1, 3), (2, 4))
])
def test_where_invalid_shapes(xp, cond_shape, x_shape, y_shape):
x = array_utils.create_dummy_ndarray(xp, x_shape, 'float32')
y = array_utils.create_dummy_ndarray(xp, y_shape, 'float32')
c = array_utils.create_dummy_ndarray(xp, cond_shape, 'float32')
return xp.where(c, x, y)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'cond_shape,shape': math_utils.shapes_combination_inplace_binary,
'cond_dtype': ['bool_'],
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'cond_shape,shape': [((2, 3), (2, 3))],
'cond_dtype': chainerx.testing.all_dtypes,
'in_dtypes,scalar_type,out_dtype': (
dtype_utils.result_dtypes_array_scalar),
'is_scalar_rhs': [True, False],
})
))
class TestWhereScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
input = 'random'
scalar_value = 3
def setup(self):
super().setup()
self.condition = _random_condition(self.cond_shape, self.cond_dtype)
def func_scalar(self, xp, a, scalar):
condition = xp.array(self.condition)
if self.is_scalar_rhs:
return xp.where(condition, a, scalar)
else:
return xp.where(condition, scalar, a)
_in_out_dtypes_where_scalar = [
((bool, bool), 'bool_'),
((bool, int), 'int32'),
((bool, float), 'float32'),
((int, bool), 'int32'),
((int, int), 'int32'),
((int, float), 'float32'),
((float, bool), 'float32'),
((float, int), 'float32'),
((float, float), 'float32'),
]
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize('cond_shape', [(2, 3)])
@pytest.mark.parametrize('cond_dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('in_types,out_dtype', _in_out_dtypes_where_scalar)
def test_where_scalar_scalar(xp, cond_shape, cond_dtype, in_types, out_dtype):
cond = _random_condition(
cond_shape, cond_dtype, random_state=numpy.random.RandomState(seed=0))
cond = xp.array(cond)
x_type, y_type = in_types
x = x_type(0)
y = y_type(2)
out = xp.where(cond, x, y)
return dtype_utils.cast_if_numpy_array(xp, out, out_dtype)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'dtype': chainerx.testing.all_dtypes,
'input': [
[],
[[]],
[0],
[1],
[2, 0, 5],
[4, 0, 0, 0],
[0, 0, 0, 4],
[0, 0, 0, 0],
[[4, 0, 0, 1], [0, 0, 4, 1]],
[[4, 4, 1, 1], [4, 1, 4, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0]],
]
})
))
class TestNonzero(op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x = numpy.asarray(self.input).astype(self.dtype)
return x,
def forward_xp(self, inputs, xp):
x, = inputs
return xp.nonzero(x)
|
wjzhang/pyOCD
|
refs/heads/master
|
pyOCD/gdbserver/signals.py
|
14
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
SIGINT = 2
SIGSEGV = 11
SIGILL = 4
SIGSTOP = 17
SIGTRAP = 5
SIGBUS = 10
|
akosyakov/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyCallingNonCallableInspection/tupleNonCallable.py
|
83
|
<warning descr="'(int, int)' object is not callable">(1,2)()</warning>
|
rgommers/statsmodels
|
refs/heads/master
|
statsmodels/nonparametric/smoothers_lowess.py
|
37
|
# -*- coding: utf-8 -*-
"""Lowess - wrapper for cythonized extension
Author : Chris Jordan-Squire
Author : Carl Vogel
Author : Josef Perktold
"""
import numpy as np
from ._smoothers_lowess import lowess as _lowess
def lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, is_sorted=False,
missing='drop', return_sorted=True):
'''LOWESS (Locally Weighted Scatterplot Smoothing)
A lowess function that outs smoothed estimates of endog
at the given exog values from points (exog, endog)
Parameters
----------
endog: 1-D numpy array
The y-values of the observed points
exog: 1-D numpy array
The x-values of the observed points
frac: float
Between 0 and 1. The fraction of the data used
when estimating each y-value.
it: int
The number of residual-based reweightings
to perform.
delta: float
Distance within which to use linear-interpolation
instead of weighted regression.
is_sorted : bool
If False (default), then the data will be sorted by exog before
calculating lowess. If True, then it is assumed that the data is
already sorted by exog.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'drop'.
return_sorted : bool
If True (default), then the returned array is sorted by exog and has
missing (nan or infinite) observations removed.
If False, then the returned array is in the same length and the same
sequence of observations as the input array.
Returns
-------
out: ndarray, float
The returned array is two-dimensional if return_sorted is True, and
one dimensional if return_sorted is False.
If return_sorted is True, then a numpy array with two columns. The
first column contains the sorted x (exog) values and the second column
the associated estimated y (endog) values.
If return_sorted is False, then only the fitted values are returned,
and the observations will be in the same order as the input arrays.
Notes
-----
This lowess function implements the algorithm given in the
reference below using local linear estimates.
Suppose the input data has N points. The algorithm works by
estimating the `smooth` y_i by taking the frac*N closest points
to (x_i,y_i) based on their x values and estimating y_i
using a weighted linear regression. The weight for (x_j,y_j)
is tricube function applied to abs(x_i-x_j).
If it > 1, then further weighted local linear regressions
are performed, where the weights are the same as above
times the _lowess_bisquare function of the residuals. Each iteration
takes approximately the same amount of time as the original fit,
so these iterations are expensive. They are most useful when
the noise has extremely heavy tails, such as Cauchy noise.
Noise with less heavy-tails, such as t-distributions with df>2,
are less problematic. The weights downgrade the influence of
points with large residuals. In the extreme case, points whose
residuals are larger than 6 times the median absolute residual
are given weight 0.
`delta` can be used to save computations. For each `x_i`, regressions
are skipped for points closer than `delta`. The next regression is
fit for the farthest point within delta of `x_i` and all points in
between are estimated by linearly interpolating between the two
regression fits.
Judicious choice of delta can cut computation time considerably
for large data (N > 5000). A good choice is ``delta = 0.01 * range(exog)``.
Some experimentation is likely required to find a good
choice of `frac` and `iter` for a particular dataset.
References
----------
Cleveland, W.S. (1979) "Robust Locally Weighted Regression
and Smoothing Scatterplots". Journal of the American Statistical
Association 74 (368): 829-836.
Examples
--------
The below allows a comparison between how different the fits from
lowess for different values of frac can be.
>>> import numpy as np
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + np.random.normal(size=len(x))
>>> z = lowess(y, x)
>>> w = lowess(y, x, frac=1./3)
This gives a similar comparison for when it is 0 vs not.
>>> import numpy as np
>>> import scipy.stats as stats
>>> import statsmodels.api as sm
>>> lowess = sm.nonparametric.lowess
>>> x = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
>>> y = np.sin(x) + stats.cauchy.rvs(size=len(x))
>>> z = lowess(y, x, frac= 1./3, it=0)
>>> w = lowess(y, x, frac=1./3)
'''
endog = np.asarray(endog, float)
exog = np.asarray(exog, float)
# Inputs should be vectors (1-D arrays) of the
# same length.
if exog.ndim != 1:
raise ValueError('exog must be a vector')
if endog.ndim != 1:
raise ValueError('endog must be a vector')
if endog.shape[0] != exog.shape[0] :
raise ValueError('exog and endog must have same length')
if missing in ['drop', 'raise']:
# Cut out missing values
mask_valid = (np.isfinite(exog) & np.isfinite(endog))
all_valid = np.all(mask_valid)
if all_valid:
y = endog
x = exog
else:
if missing == 'drop':
x = exog[mask_valid]
y = endog[mask_valid]
else:
raise ValueError('nan or inf found in data')
elif missing == 'none':
y = endog
x = exog
all_valid = True # we assume it's true if missing='none'
else:
raise ValueError("missing can only be 'none', 'drop' or 'raise'")
if not is_sorted:
# Sort both inputs according to the ascending order of x values
sort_index = np.argsort(x)
x = np.array(x[sort_index])
y = np.array(y[sort_index])
res = _lowess(y, x, frac=frac, it=it, delta=delta)
_, yfitted = res.T
if return_sorted:
return res
else:
# rebuild yfitted with original indices
# a bit messy: y might have been selected twice
if not is_sorted:
yfitted_ = np.empty_like(y)
yfitted_.fill(np.nan)
yfitted_[sort_index] = yfitted
yfitted = yfitted_
else:
yfitted = yfitted
if not all_valid:
yfitted_ = np.empty_like(endog)
yfitted_.fill(np.nan)
yfitted_[mask_valid] = yfitted
yfitted = yfitted_
# we don't need to return exog anymore
return yfitted
|
jeasoft/odoo
|
refs/heads/marcos-8.0
|
comunity_modules/menu_collapsible/__init__.py
|
3
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ONESTEiN BV (<http://www.onestein.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
SKA-ScienceDataProcessor/integration-prototype
|
refs/heads/master
|
sip/science_pipeline_workflows/ical_dask/pipelines/imaging_processing.py
|
1
|
#!/usr/bin/python3 -u
# coding: utf-8
"""Pipeline processing using Dask (processing stage only).
This script demonstrates the continuum imaging and ICAL pipelines.
"""
import logging
import pprint
import pickle
import numpy
import sys
import json
from data_models.polarisation import PolarisationFrame
from data_models.data_model_helpers import import_blockvisibility_from_hdf5
from processing_components.calibration.calibration_control import \
create_calibration_controls
from processing_components.image.operations import export_image_to_fits, \
qa_image
from processing_components.imaging.base import create_image_from_visibility
from workflows.arlexecute.pipelines.pipeline_arlexecute import continuum_imaging_arlexecute, \
ical_arlexecute
from processing_components.imaging.base import advise_wide_field
from workflows.arlexecute.execution_support.arlexecute import arlexecute
from workflows.arlexecute.execution_support.dask_init import get_dask_Client
PP = pprint.PrettyPrinter()
RESULTS_DIR = 'results'
LOG = logging.getLogger('sip.ical.generate_data')
def init_logging():
"""Initialise Python logging."""
fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \
'| %(message)s'
logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
def main():
"""Run the workflow."""
init_logging()
LOG.info("Starting imaging-pipeline")
# Read parameters
PARFILE = 'parameters.json'
if len(sys.argv) > 1:
PARFILE = sys.argv[1]
LOG.info("JSON parameter file = %s", PARFILE)
try:
with open(PARFILE, "r") as par_file:
jspar = json.load(par_file)
except AssertionError as error:
LOG.critical('ERROR %s', error)
return
# We will use dask
arlexecute.set_client(get_dask_Client())
arlexecute.run(init_logging)
# Import visibility list from HDF5 file
vis_list = import_blockvisibility_from_hdf5(
'%s/%s' % (RESULTS_DIR, jspar["files"]["vis_list"]))
# Now read the BlockVisibilities constructed using a model drawn from GLEAM
predicted_vislist = import_blockvisibility_from_hdf5(
'%s/%s' % (RESULTS_DIR, jspar["files"]["predicted_vis_list"]))
corrupted_vislist = import_blockvisibility_from_hdf5(
'%s/%s' % (RESULTS_DIR, jspar["files"]["corrupted_vis_list"]))
# Reproduce parameters from the visibility data
ntimes = vis_list[0].nvis
phasecentre = vis_list[0].phasecentre
print(phasecentre)
polframe = vis_list[0].polarisation_frame.type
LOG.info("Polarisation Frame of vis_list: %s", polframe)
wprojection_planes = jspar["advice"]["wprojection_planes"]
guard_band_image = jspar["advice"]["guard_band_image"]
delA = jspar["advice"]["delA"]
advice_low = advise_wide_field(vis_list[0], guard_band_image=guard_band_image,
delA=delA,
wprojection_planes=wprojection_planes)
advice_high = advise_wide_field(vis_list[-1], guard_band_image=guard_band_image,
delA=delA,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel = advice_high['npixels2']
cellsize = min(advice_low['cellsize'], advice_high['cellsize'])
# Recovering frequencies
fstart = vis_list[0].frequency
fend = vis_list[-1].frequency
num_freq_win = len(vis_list)
frequency = numpy.linspace(fstart, fend, num_freq_win)
# Recovering bandwidths
channel_bandwidth = numpy.array(num_freq_win * [vis_list[1].frequency - vis_list[0].frequency])
# Get the LSM. This is currently blank.
model_list = [
arlexecute.execute(create_image_from_visibility)(
vis_list[f],
npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame(polframe))
for f, freq in enumerate(frequency)
]
# future_predicted_vislist = arlexecute.scatter(predicted_vislist)
# Create and execute graphs to make the dirty image and PSF
# LOG.info('About to run invert to get dirty image')
# dirty_list = invert_component(future_predicted_vislist, model_list,
# context='wstack',
# vis_slices=vis_slices, dopsf=False)
# dirty_list = arlexecute.compute(dirty_list, sync=True)
# LOG.info('About to run invert to get PSF')
# psf_list = invert_component(future_predicted_vislist, model_list,
# context='wstack',
# vis_slices=vis_slices, dopsf=True)
# psf_list = arlexecute.compute(psf_list, sync=True)
# Now deconvolve using msclean
# LOG.info('About to run deconvolve')
# deconvolve_list, _ = deconvolve_component(
# dirty_list, psf_list,
# model_imagelist=model_list,
# deconvolve_facets=8,
# deconvolve_overlap=16,
# deconvolve_taper='tukey',
# scales=[0, 3, 10],
# algorithm='msclean',
# niter=1000,
# fractional_threshold=0.1,
# threshold=0.1,
# gain=0.1,
# psf_support=64)
# deconvolved = arlexecute.compute(deconvolve_list, sync=True)
LOG.info('About to run continuum imaging')
continuum_imaging_list = continuum_imaging_arlexecute(
predicted_vislist,
model_imagelist = model_list,
context = jspar["processing"]["continuum_imaging"]["context"] , #'wstack',
vis_slices = vis_slices,
scales = jspar["processing"]["continuum_imaging"]["scales"], #[0, 3, 10],
algorithm = jspar["processing"]["continuum_imaging"]["algorithm"], #'mmclean',
nmoment = jspar["processing"]["continuum_imaging"]["nmoment"], #3,
niter = jspar["processing"]["continuum_imaging"]["niter"], #1000,
fractional_threshold = jspar["processing"]["continuum_imaging"]["fractional_threshold"], #0.1,
threshold = jspar["processing"]["continuum_imaging"]["threshold"], #0.1,
nmajor = jspar["processing"]["continuum_imaging"]["nmajor"], #5,
gain = jspar["processing"]["continuum_imaging"]["gain"], #0.25,
deconvolve_facets = jspar["processing"]["continuum_imaging"]["deconvolve_facets"], #8,
deconvolve_overlap = jspar["processing"]["continuum_imaging"]["deconvolve_overlap"], #16,
deconvolve_taper = jspar["processing"]["continuum_imaging"]["deconvolve_taper"], #'tukey',
psf_support = jspar["processing"]["continuum_imaging"]["psf_support"] ) #64)
result = arlexecute.compute(continuum_imaging_list, sync=True)
deconvolved = result[0][0]
residual = result[1][0]
restored = result[2][0]
print(qa_image(deconvolved, context='Clean image - no selfcal'))
print(qa_image(restored, context='Restored clean image - no selfcal'))
export_image_to_fits(restored,
'%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_restored"]))
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
export_image_to_fits(residual[0],
'%s/%s' % (RESULTS_DIR, jspar["files"]["continuum_imaging_residual"]))
controls = create_calibration_controls()
controls['T']['first_selfcal'] = jspar["processing"]["controls"]["T"]["first_selfcal"]
controls['G']['first_selfcal'] = jspar["processing"]["controls"]["G"]["first_selfcal"]
controls['B']['first_selfcal'] = jspar["processing"]["controls"]["B"]["first_selfcal"]
controls['T']['timescale'] = jspar["processing"]["controls"]["T"]["timescale"]
controls['G']['timescale'] = jspar["processing"]["controls"]["G"]["timescale"]
controls['B']['timescale'] = jspar["processing"]["controls"]["B"]["timescale"]
PP.pprint(controls)
future_corrupted_vislist = arlexecute.scatter(corrupted_vislist)
ical_list = ical_arlexecute(future_corrupted_vislist,
model_imagelist = model_list,
context = jspar["processing"]["ical"]["context"] , #'wstack',
calibration_context = jspar["processing"]["ical"]["calibration_context"] , #'TG',
controls = controls,
vis_slices = ntimes,
scales = jspar["processing"]["ical"]["scales"], #[0, 3, 10],
timeslice = jspar["processing"]["ical"]["timeslice"], #'auto',
algorithm = jspar["processing"]["ical"]["algorithm"], #'mmclean',
nmoment = jspar["processing"]["ical"]["nmoment"], #3,
niter = jspar["processing"]["ical"]["niter"], #1000,
fractional_threshold = jspar["processing"]["ical"]["fractional_threshold"], #0.1,
threshold = jspar["processing"]["ical"]["threshold"], #0.1,
nmajor = jspar["processing"]["ical"]["nmajor"], #5,
gain = jspar["processing"]["ical"]["gain"], #0.25,
deconvolve_facets = jspar["processing"]["ical"]["deconvolve_facets"], #8,
deconvolve_overlap = jspar["processing"]["ical"]["deconvolve_overlap"], #16,
deconvolve_taper = jspar["processing"]["ical"]["deconvolve_taper"], #'tukey',
global_solution = jspar["processing"]["ical"]["global_solution"], #False,
do_selfcal = jspar["processing"]["ical"]["do_selfcal"], #True,
psf_support = jspar["processing"]["ical"]["psf_support"]) #64
LOG.info('About to run ical')
result = arlexecute.compute(ical_list, sync=True)
deconvolved = result[0][0]
residual = result[1][0]
restored = result[2][0]
print(qa_image(deconvolved, context='Clean image'))
print(qa_image(restored, context='Restored clean image'))
export_image_to_fits(restored, '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_restored"]))
print(qa_image(residual[0], context='Residual clean image'))
export_image_to_fits(residual[0], '%s/%s' % (RESULTS_DIR, jspar["files"]["ical_residual"]))
arlexecute.close()
if __name__ == '__main__':
main()
|
sephii/django
|
refs/heads/master
|
django/utils/autoreload.py
|
40
|
# Autoreloading launcher.
# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
#
# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the CherryPy Team nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import # Avoid importing `importlib` from this package.
import os
import signal
import sys
import time
import traceback
from django.apps import apps
from django.conf import settings
from django.core.signals import request_finished
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
# This import does nothing, but it's necessary to avoid some race conditions
# in the threading module. See http://code.djangoproject.com/ticket/2330 .
try:
import threading # NOQA
except ImportError:
pass
try:
import termios
except ImportError:
termios = None
USE_INOTIFY = False
try:
# Test whether inotify is enabled and likely to work
import pyinotify
fd = pyinotify.INotifyWrapper.create().inotify_init()
if fd >= 0:
USE_INOTIFY = True
os.close(fd)
except ImportError:
pass
RUN_RELOADER = True
FILE_MODIFIED = 1
I18N_MODIFIED = 2
_mtimes = {}
_win = (sys.platform == "win32")
_error_files = []
_cached_modules = set()
_cached_filenames = []
def gen_filenames(only_new=False):
"""
Returns a list of filenames referenced in sys.modules and translation
files.
"""
# N.B. ``list(...)`` is needed, because this runs in parallel with
# application code which might be mutating ``sys.modules``, and this will
# fail with RuntimeError: cannot mutate dictionary while iterating
global _cached_modules, _cached_filenames
module_values = set(sys.modules.values())
_cached_filenames = clean_files(_cached_filenames)
if _cached_modules == module_values:
# No changes in module list, short-circuit the function
if only_new:
return []
else:
return _cached_filenames
new_modules = module_values - _cached_modules
new_filenames = clean_files(
[filename.__file__ for filename in new_modules
if hasattr(filename, '__file__')])
if not _cached_filenames and settings.USE_I18N:
# Add the names of the .mo files that can be generated
# by compilemessages management command to the list of files watched.
basedirs = [os.path.join(os.path.dirname(os.path.dirname(__file__)),
'conf', 'locale'),
'locale']
for app_config in reversed(list(apps.get_app_configs())):
basedirs.append(os.path.join(app_config.path, 'locale'))
basedirs.extend(settings.LOCALE_PATHS)
basedirs = [os.path.abspath(basedir) for basedir in basedirs
if os.path.isdir(basedir)]
for basedir in basedirs:
for dirpath, dirnames, locale_filenames in os.walk(basedir):
for filename in locale_filenames:
if filename.endswith('.mo'):
new_filenames.append(os.path.join(dirpath, filename))
_cached_modules = _cached_modules.union(new_modules)
_cached_filenames += new_filenames
if only_new:
return new_filenames
else:
return _cached_filenames + clean_files(_error_files)
def clean_files(filelist):
filenames = []
for filename in filelist:
if not filename:
continue
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
if os.path.exists(filename):
filenames.append(filename)
return filenames
def reset_translations():
import gettext
from django.utils.translation import trans_real
gettext._translations = {}
trans_real._translations = {}
trans_real._default = None
trans_real._active = threading.local()
def inotify_code_changed():
"""
Checks for changed code using inotify. After being called
it blocks until a change event has been fired.
"""
class EventHandler(pyinotify.ProcessEvent):
modified_code = None
def process_default(self, event):
if event.path.endswith('.mo'):
EventHandler.modified_code = I18N_MODIFIED
else:
EventHandler.modified_code = FILE_MODIFIED
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm, EventHandler())
def update_watch(sender=None, **kwargs):
if sender and getattr(sender, 'handles_files', False):
# No need to update watches when request serves files.
# (sender is supposed to be a django.core.handlers.BaseHandler subclass)
return
mask = (
pyinotify.IN_MODIFY |
pyinotify.IN_DELETE |
pyinotify.IN_ATTRIB |
pyinotify.IN_MOVED_FROM |
pyinotify.IN_MOVED_TO |
pyinotify.IN_CREATE
)
for path in gen_filenames(only_new=True):
wm.add_watch(path, mask)
# New modules may get imported when a request is processed.
request_finished.connect(update_watch)
# Block until an event happens.
update_watch()
notifier.check_events(timeout=None)
notifier.read_events()
notifier.process_events()
notifier.stop()
# If we are here the code must have changed.
return EventHandler.modified_code
def code_changed():
global _mtimes, _win
for filename in gen_filenames():
stat = os.stat(filename)
mtime = stat.st_mtime
if _win:
mtime -= stat.st_ctime
if filename not in _mtimes:
_mtimes[filename] = mtime
continue
if mtime != _mtimes[filename]:
_mtimes = {}
try:
del _error_files[_error_files.index(filename)]
except ValueError:
pass
return I18N_MODIFIED if filename.endswith('.mo') else FILE_MODIFIED
return False
def check_errors(fn):
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except (ImportError, IndentationError, NameError, SyntaxError,
TypeError, AttributeError):
et, ev, tb = sys.exc_info()
if getattr(ev, 'filename', None) is None:
# get the filename from the last item in the stack
filename = traceback.extract_tb(tb)[-1][0]
else:
filename = ev.filename
if filename not in _error_files:
_error_files.append(filename)
raise
return wrapper
def ensure_echo_on():
if termios:
fd = sys.stdin
if fd.isatty():
attr_list = termios.tcgetattr(fd)
if not attr_list[3] & termios.ECHO:
attr_list[3] |= termios.ECHO
if hasattr(signal, 'SIGTTOU'):
old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
else:
old_handler = None
termios.tcsetattr(fd, termios.TCSANOW, attr_list)
if old_handler is not None:
signal.signal(signal.SIGTTOU, old_handler)
def reloader_thread():
ensure_echo_on()
if USE_INOTIFY:
fn = inotify_code_changed
else:
fn = code_changed
while RUN_RELOADER:
change = fn()
if change == FILE_MODIFIED:
sys.exit(3) # force reload
elif change == I18N_MODIFIED:
reset_translations()
time.sleep(1)
def restart_with_reloader():
while True:
args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
if exit_code != 3:
return exit_code
def python_reloader(main_func, args, kwargs):
if os.environ.get("RUN_MAIN") == "true":
thread.start_new_thread(main_func, args, kwargs)
try:
reloader_thread()
except KeyboardInterrupt:
pass
else:
try:
exit_code = restart_with_reloader()
if exit_code < 0:
os.kill(os.getpid(), -exit_code)
else:
sys.exit(exit_code)
except KeyboardInterrupt:
pass
def jython_reloader(main_func, args, kwargs):
from _systemrestart import SystemRestart
thread.start_new_thread(main_func, args)
while True:
if code_changed():
raise SystemRestart
time.sleep(1)
def main(main_func, args=None, kwargs=None):
if args is None:
args = ()
if kwargs is None:
kwargs = {}
if sys.platform.startswith('java'):
reloader = jython_reloader
else:
reloader = python_reloader
wrapped_main_func = check_errors(main_func)
reloader(wrapped_main_func, args, kwargs)
|
regras/btc_offline_pos
|
refs/heads/master
|
terminal_de_baixo_custo/python-cgi/bitcoin/ripemd.py
|
31
|
## ripemd.py - pure Python implementation of the RIPEMD-160 algorithm.
## Bjorn Edstrom <be@bjrn.se> 16 december 2007.
##
## Copyrights
## ==========
##
## This code is a derived from an implementation by Markus Friedl which is
## subject to the following license. This Python implementation is not
## subject to any other license.
##
##/*
## * Copyright (c) 2001 Markus Friedl. All rights reserved.
## *
## * Redistribution and use in source and binary forms, with or without
## * modification, are permitted provided that the following conditions
## * are met:
## * 1. Redistributions of source code must retain the above copyright
## * notice, this list of conditions and the following disclaimer.
## * 2. Redistributions in binary form must reproduce the above copyright
## * notice, this list of conditions and the following disclaimer in the
## * documentation and/or other materials provided with the distribution.
## *
## * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
## * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
## * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
## * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
## * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
## * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
## * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## */
##/*
## * Preneel, Bosselaers, Dobbertin, "The Cryptographic Hash Function RIPEMD-160",
## * RSA Laboratories, CryptoBytes, Volume 3, Number 2, Autumn 1997,
## * ftp://ftp.rsasecurity.com/pub/cryptobytes/crypto3n2.pdf
## */
try:
import psyco
psyco.full()
except ImportError:
pass
import sys
is_python2 = sys.version_info.major == 2
#block_size = 1
digest_size = 20
digestsize = 20
try:
range = xrange
except:
pass
class RIPEMD160:
"""Return a new RIPEMD160 object. An optional string argument
may be provided; if present, this string will be automatically
hashed."""
def __init__(self, arg=None):
self.ctx = RMDContext()
if arg:
self.update(arg)
self.dig = None
def update(self, arg):
"""update(arg)"""
RMD160Update(self.ctx, arg, len(arg))
self.dig = None
def digest(self):
"""digest()"""
if self.dig:
return self.dig
ctx = self.ctx.copy()
self.dig = RMD160Final(self.ctx)
self.ctx = ctx
return self.dig
def hexdigest(self):
"""hexdigest()"""
dig = self.digest()
hex_digest = ''
for d in dig:
if (is_python2):
hex_digest += '%02x' % ord(d)
else:
hex_digest += '%02x' % d
return hex_digest
def copy(self):
"""copy()"""
import copy
return copy.deepcopy(self)
def new(arg=None):
"""Return a new RIPEMD160 object. An optional string argument
may be provided; if present, this string will be automatically
hashed."""
return RIPEMD160(arg)
#
# Private.
#
class RMDContext:
def __init__(self):
self.state = [0x67452301, 0xEFCDAB89, 0x98BADCFE,
0x10325476, 0xC3D2E1F0] # uint32
self.count = 0 # uint64
self.buffer = [0]*64 # uchar
def copy(self):
ctx = RMDContext()
ctx.state = self.state[:]
ctx.count = self.count
ctx.buffer = self.buffer[:]
return ctx
K0 = 0x00000000
K1 = 0x5A827999
K2 = 0x6ED9EBA1
K3 = 0x8F1BBCDC
K4 = 0xA953FD4E
KK0 = 0x50A28BE6
KK1 = 0x5C4DD124
KK2 = 0x6D703EF3
KK3 = 0x7A6D76E9
KK4 = 0x00000000
def ROL(n, x):
return ((x << n) & 0xffffffff) | (x >> (32 - n))
def F0(x, y, z):
return x ^ y ^ z
def F1(x, y, z):
return (x & y) | (((~x) % 0x100000000) & z)
def F2(x, y, z):
return (x | ((~y) % 0x100000000)) ^ z
def F3(x, y, z):
return (x & z) | (((~z) % 0x100000000) & y)
def F4(x, y, z):
return x ^ (y | ((~z) % 0x100000000))
def R(a, b, c, d, e, Fj, Kj, sj, rj, X):
a = ROL(sj, (a + Fj(b, c, d) + X[rj] + Kj) % 0x100000000) + e
c = ROL(10, c)
return a % 0x100000000, c
PADDING = [0x80] + [0]*63
import sys
import struct
def RMD160Transform(state, block): #uint32 state[5], uchar block[64]
x = [0]*16
if sys.byteorder == 'little':
if is_python2:
x = struct.unpack('<16L', ''.join([chr(x) for x in block[0:64]]))
else:
x = struct.unpack('<16L', bytes(block[0:64]))
else:
raise "Error!!"
a = state[0]
b = state[1]
c = state[2]
d = state[3]
e = state[4]
#/* Round 1 */
a, c = R(a, b, c, d, e, F0, K0, 11, 0, x);
e, b = R(e, a, b, c, d, F0, K0, 14, 1, x);
d, a = R(d, e, a, b, c, F0, K0, 15, 2, x);
c, e = R(c, d, e, a, b, F0, K0, 12, 3, x);
b, d = R(b, c, d, e, a, F0, K0, 5, 4, x);
a, c = R(a, b, c, d, e, F0, K0, 8, 5, x);
e, b = R(e, a, b, c, d, F0, K0, 7, 6, x);
d, a = R(d, e, a, b, c, F0, K0, 9, 7, x);
c, e = R(c, d, e, a, b, F0, K0, 11, 8, x);
b, d = R(b, c, d, e, a, F0, K0, 13, 9, x);
a, c = R(a, b, c, d, e, F0, K0, 14, 10, x);
e, b = R(e, a, b, c, d, F0, K0, 15, 11, x);
d, a = R(d, e, a, b, c, F0, K0, 6, 12, x);
c, e = R(c, d, e, a, b, F0, K0, 7, 13, x);
b, d = R(b, c, d, e, a, F0, K0, 9, 14, x);
a, c = R(a, b, c, d, e, F0, K0, 8, 15, x); #/* #15 */
#/* Round 2 */
e, b = R(e, a, b, c, d, F1, K1, 7, 7, x);
d, a = R(d, e, a, b, c, F1, K1, 6, 4, x);
c, e = R(c, d, e, a, b, F1, K1, 8, 13, x);
b, d = R(b, c, d, e, a, F1, K1, 13, 1, x);
a, c = R(a, b, c, d, e, F1, K1, 11, 10, x);
e, b = R(e, a, b, c, d, F1, K1, 9, 6, x);
d, a = R(d, e, a, b, c, F1, K1, 7, 15, x);
c, e = R(c, d, e, a, b, F1, K1, 15, 3, x);
b, d = R(b, c, d, e, a, F1, K1, 7, 12, x);
a, c = R(a, b, c, d, e, F1, K1, 12, 0, x);
e, b = R(e, a, b, c, d, F1, K1, 15, 9, x);
d, a = R(d, e, a, b, c, F1, K1, 9, 5, x);
c, e = R(c, d, e, a, b, F1, K1, 11, 2, x);
b, d = R(b, c, d, e, a, F1, K1, 7, 14, x);
a, c = R(a, b, c, d, e, F1, K1, 13, 11, x);
e, b = R(e, a, b, c, d, F1, K1, 12, 8, x); #/* #31 */
#/* Round 3 */
d, a = R(d, e, a, b, c, F2, K2, 11, 3, x);
c, e = R(c, d, e, a, b, F2, K2, 13, 10, x);
b, d = R(b, c, d, e, a, F2, K2, 6, 14, x);
a, c = R(a, b, c, d, e, F2, K2, 7, 4, x);
e, b = R(e, a, b, c, d, F2, K2, 14, 9, x);
d, a = R(d, e, a, b, c, F2, K2, 9, 15, x);
c, e = R(c, d, e, a, b, F2, K2, 13, 8, x);
b, d = R(b, c, d, e, a, F2, K2, 15, 1, x);
a, c = R(a, b, c, d, e, F2, K2, 14, 2, x);
e, b = R(e, a, b, c, d, F2, K2, 8, 7, x);
d, a = R(d, e, a, b, c, F2, K2, 13, 0, x);
c, e = R(c, d, e, a, b, F2, K2, 6, 6, x);
b, d = R(b, c, d, e, a, F2, K2, 5, 13, x);
a, c = R(a, b, c, d, e, F2, K2, 12, 11, x);
e, b = R(e, a, b, c, d, F2, K2, 7, 5, x);
d, a = R(d, e, a, b, c, F2, K2, 5, 12, x); #/* #47 */
#/* Round 4 */
c, e = R(c, d, e, a, b, F3, K3, 11, 1, x);
b, d = R(b, c, d, e, a, F3, K3, 12, 9, x);
a, c = R(a, b, c, d, e, F3, K3, 14, 11, x);
e, b = R(e, a, b, c, d, F3, K3, 15, 10, x);
d, a = R(d, e, a, b, c, F3, K3, 14, 0, x);
c, e = R(c, d, e, a, b, F3, K3, 15, 8, x);
b, d = R(b, c, d, e, a, F3, K3, 9, 12, x);
a, c = R(a, b, c, d, e, F3, K3, 8, 4, x);
e, b = R(e, a, b, c, d, F3, K3, 9, 13, x);
d, a = R(d, e, a, b, c, F3, K3, 14, 3, x);
c, e = R(c, d, e, a, b, F3, K3, 5, 7, x);
b, d = R(b, c, d, e, a, F3, K3, 6, 15, x);
a, c = R(a, b, c, d, e, F3, K3, 8, 14, x);
e, b = R(e, a, b, c, d, F3, K3, 6, 5, x);
d, a = R(d, e, a, b, c, F3, K3, 5, 6, x);
c, e = R(c, d, e, a, b, F3, K3, 12, 2, x); #/* #63 */
#/* Round 5 */
b, d = R(b, c, d, e, a, F4, K4, 9, 4, x);
a, c = R(a, b, c, d, e, F4, K4, 15, 0, x);
e, b = R(e, a, b, c, d, F4, K4, 5, 5, x);
d, a = R(d, e, a, b, c, F4, K4, 11, 9, x);
c, e = R(c, d, e, a, b, F4, K4, 6, 7, x);
b, d = R(b, c, d, e, a, F4, K4, 8, 12, x);
a, c = R(a, b, c, d, e, F4, K4, 13, 2, x);
e, b = R(e, a, b, c, d, F4, K4, 12, 10, x);
d, a = R(d, e, a, b, c, F4, K4, 5, 14, x);
c, e = R(c, d, e, a, b, F4, K4, 12, 1, x);
b, d = R(b, c, d, e, a, F4, K4, 13, 3, x);
a, c = R(a, b, c, d, e, F4, K4, 14, 8, x);
e, b = R(e, a, b, c, d, F4, K4, 11, 11, x);
d, a = R(d, e, a, b, c, F4, K4, 8, 6, x);
c, e = R(c, d, e, a, b, F4, K4, 5, 15, x);
b, d = R(b, c, d, e, a, F4, K4, 6, 13, x); #/* #79 */
aa = a;
bb = b;
cc = c;
dd = d;
ee = e;
a = state[0]
b = state[1]
c = state[2]
d = state[3]
e = state[4]
#/* Parallel round 1 */
a, c = R(a, b, c, d, e, F4, KK0, 8, 5, x)
e, b = R(e, a, b, c, d, F4, KK0, 9, 14, x)
d, a = R(d, e, a, b, c, F4, KK0, 9, 7, x)
c, e = R(c, d, e, a, b, F4, KK0, 11, 0, x)
b, d = R(b, c, d, e, a, F4, KK0, 13, 9, x)
a, c = R(a, b, c, d, e, F4, KK0, 15, 2, x)
e, b = R(e, a, b, c, d, F4, KK0, 15, 11, x)
d, a = R(d, e, a, b, c, F4, KK0, 5, 4, x)
c, e = R(c, d, e, a, b, F4, KK0, 7, 13, x)
b, d = R(b, c, d, e, a, F4, KK0, 7, 6, x)
a, c = R(a, b, c, d, e, F4, KK0, 8, 15, x)
e, b = R(e, a, b, c, d, F4, KK0, 11, 8, x)
d, a = R(d, e, a, b, c, F4, KK0, 14, 1, x)
c, e = R(c, d, e, a, b, F4, KK0, 14, 10, x)
b, d = R(b, c, d, e, a, F4, KK0, 12, 3, x)
a, c = R(a, b, c, d, e, F4, KK0, 6, 12, x) #/* #15 */
#/* Parallel round 2 */
e, b = R(e, a, b, c, d, F3, KK1, 9, 6, x)
d, a = R(d, e, a, b, c, F3, KK1, 13, 11, x)
c, e = R(c, d, e, a, b, F3, KK1, 15, 3, x)
b, d = R(b, c, d, e, a, F3, KK1, 7, 7, x)
a, c = R(a, b, c, d, e, F3, KK1, 12, 0, x)
e, b = R(e, a, b, c, d, F3, KK1, 8, 13, x)
d, a = R(d, e, a, b, c, F3, KK1, 9, 5, x)
c, e = R(c, d, e, a, b, F3, KK1, 11, 10, x)
b, d = R(b, c, d, e, a, F3, KK1, 7, 14, x)
a, c = R(a, b, c, d, e, F3, KK1, 7, 15, x)
e, b = R(e, a, b, c, d, F3, KK1, 12, 8, x)
d, a = R(d, e, a, b, c, F3, KK1, 7, 12, x)
c, e = R(c, d, e, a, b, F3, KK1, 6, 4, x)
b, d = R(b, c, d, e, a, F3, KK1, 15, 9, x)
a, c = R(a, b, c, d, e, F3, KK1, 13, 1, x)
e, b = R(e, a, b, c, d, F3, KK1, 11, 2, x) #/* #31 */
#/* Parallel round 3 */
d, a = R(d, e, a, b, c, F2, KK2, 9, 15, x)
c, e = R(c, d, e, a, b, F2, KK2, 7, 5, x)
b, d = R(b, c, d, e, a, F2, KK2, 15, 1, x)
a, c = R(a, b, c, d, e, F2, KK2, 11, 3, x)
e, b = R(e, a, b, c, d, F2, KK2, 8, 7, x)
d, a = R(d, e, a, b, c, F2, KK2, 6, 14, x)
c, e = R(c, d, e, a, b, F2, KK2, 6, 6, x)
b, d = R(b, c, d, e, a, F2, KK2, 14, 9, x)
a, c = R(a, b, c, d, e, F2, KK2, 12, 11, x)
e, b = R(e, a, b, c, d, F2, KK2, 13, 8, x)
d, a = R(d, e, a, b, c, F2, KK2, 5, 12, x)
c, e = R(c, d, e, a, b, F2, KK2, 14, 2, x)
b, d = R(b, c, d, e, a, F2, KK2, 13, 10, x)
a, c = R(a, b, c, d, e, F2, KK2, 13, 0, x)
e, b = R(e, a, b, c, d, F2, KK2, 7, 4, x)
d, a = R(d, e, a, b, c, F2, KK2, 5, 13, x) #/* #47 */
#/* Parallel round 4 */
c, e = R(c, d, e, a, b, F1, KK3, 15, 8, x)
b, d = R(b, c, d, e, a, F1, KK3, 5, 6, x)
a, c = R(a, b, c, d, e, F1, KK3, 8, 4, x)
e, b = R(e, a, b, c, d, F1, KK3, 11, 1, x)
d, a = R(d, e, a, b, c, F1, KK3, 14, 3, x)
c, e = R(c, d, e, a, b, F1, KK3, 14, 11, x)
b, d = R(b, c, d, e, a, F1, KK3, 6, 15, x)
a, c = R(a, b, c, d, e, F1, KK3, 14, 0, x)
e, b = R(e, a, b, c, d, F1, KK3, 6, 5, x)
d, a = R(d, e, a, b, c, F1, KK3, 9, 12, x)
c, e = R(c, d, e, a, b, F1, KK3, 12, 2, x)
b, d = R(b, c, d, e, a, F1, KK3, 9, 13, x)
a, c = R(a, b, c, d, e, F1, KK3, 12, 9, x)
e, b = R(e, a, b, c, d, F1, KK3, 5, 7, x)
d, a = R(d, e, a, b, c, F1, KK3, 15, 10, x)
c, e = R(c, d, e, a, b, F1, KK3, 8, 14, x) #/* #63 */
#/* Parallel round 5 */
b, d = R(b, c, d, e, a, F0, KK4, 8, 12, x)
a, c = R(a, b, c, d, e, F0, KK4, 5, 15, x)
e, b = R(e, a, b, c, d, F0, KK4, 12, 10, x)
d, a = R(d, e, a, b, c, F0, KK4, 9, 4, x)
c, e = R(c, d, e, a, b, F0, KK4, 12, 1, x)
b, d = R(b, c, d, e, a, F0, KK4, 5, 5, x)
a, c = R(a, b, c, d, e, F0, KK4, 14, 8, x)
e, b = R(e, a, b, c, d, F0, KK4, 6, 7, x)
d, a = R(d, e, a, b, c, F0, KK4, 8, 6, x)
c, e = R(c, d, e, a, b, F0, KK4, 13, 2, x)
b, d = R(b, c, d, e, a, F0, KK4, 6, 13, x)
a, c = R(a, b, c, d, e, F0, KK4, 5, 14, x)
e, b = R(e, a, b, c, d, F0, KK4, 15, 0, x)
d, a = R(d, e, a, b, c, F0, KK4, 13, 3, x)
c, e = R(c, d, e, a, b, F0, KK4, 11, 9, x)
b, d = R(b, c, d, e, a, F0, KK4, 11, 11, x) #/* #79 */
t = (state[1] + cc + d) % 0x100000000;
state[1] = (state[2] + dd + e) % 0x100000000;
state[2] = (state[3] + ee + a) % 0x100000000;
state[3] = (state[4] + aa + b) % 0x100000000;
state[4] = (state[0] + bb + c) % 0x100000000;
state[0] = t % 0x100000000;
pass
def RMD160Update(ctx, inp, inplen):
if type(inp) == str:
inp = [ord(i)&0xff for i in inp]
have = int((ctx.count // 8) % 64)
inplen = int(inplen)
need = 64 - have
ctx.count += 8 * inplen
off = 0
if inplen >= need:
if have:
for i in range(need):
ctx.buffer[have+i] = inp[i]
RMD160Transform(ctx.state, ctx.buffer)
off = need
have = 0
while off + 64 <= inplen:
RMD160Transform(ctx.state, inp[off:]) #<---
off += 64
if off < inplen:
# memcpy(ctx->buffer + have, input+off, len-off);
for i in range(inplen - off):
ctx.buffer[have+i] = inp[off+i]
def RMD160Final(ctx):
size = struct.pack("<Q", ctx.count)
padlen = 64 - ((ctx.count // 8) % 64)
if padlen < 1+8:
padlen += 64
RMD160Update(ctx, PADDING, padlen-8)
RMD160Update(ctx, size, 8)
return struct.pack("<5L", *ctx.state)
assert '37f332f68db77bd9d7edd4969571ad671cf9dd3b' == \
new('The quick brown fox jumps over the lazy dog').hexdigest()
assert '132072df690933835eb8b6ad0b77e7b6f14acad7' == \
new('The quick brown fox jumps over the lazy cog').hexdigest()
assert '9c1185a5c5e9fc54612808977ee8f548b2258d31' == \
new('').hexdigest()
|
hiaselhans/OpenGlider
|
refs/heads/develop
|
openglider/glider/parametric/export_ods.py
|
2
|
import copy
import math
import ezodf
import openglider.glider
import openglider.glider.parametric.glider
from openglider.glider.ballooning import BallooningBezierNeu
from openglider.glider.cell import DiagonalRib
from openglider.glider.parametric.arc import ArcCurve
from openglider.utils.table import Table
file_version = "V3"
def export_ods_2d(glider, filename):
doc = ezodf.newdoc(doctype="ods", filename=filename)
assert isinstance(glider, openglider.glider.parametric.glider.ParametricGlider)
doc.sheets.append(get_geom_sheet(glider))
cell_sheet = get_cell_sheet(glider)
cell_sheet.name = "Cell Elements"
rib_sheet = get_rib_sheet(glider)
rib_sheet.name = "Rib Elements"
attachment_points = glider.lineset.get_attachment_point_table()
rib_sheet.append_right(attachment_points[0])
cell_sheet.append_right(attachment_points[1])
doc.sheets.append(cell_sheet.get_ods_sheet())
doc.sheets.append(rib_sheet.get_ods_sheet())
doc.sheets.append(get_airfoil_sheet(glider))
doc.sheets.append(get_ballooning_sheet(glider))
doc.sheets.append(get_parametric_sheet(glider))
doc.sheets.append(get_lines_sheet(glider))
doc.sheets.append(get_data_sheet(glider))
# airfoil sheet
doc.saveas(filename)
def get_airfoil_sheet(glider_2d):
profiles = glider_2d.profiles
max_length = max(len(p) for p in profiles)
sheet = ezodf.Sheet(name="Airfoils", size=(max_length+1, len(profiles)*2))
for i, profile in enumerate(profiles):
sheet[0, 2*i].set_value(profile.name or "unnamed")
for j, p in enumerate(profile):
sheet[j+1, 2*i].set_value(p[0])
sheet[j+1, 2*i+1].set_value(p[1])
return sheet
def get_geom_sheet(glider_2d):
geom_page = ezodf.Sheet(name="geometry", size=(glider_2d.shape.half_cell_num + 2, 10))
# rib_nos
geom_page[0, 0].set_value("Ribs")
shape = glider_2d.shape.get_half_shape()
geom_page[0, 1].set_value("Chord")
for i, chord in enumerate(shape.chords):
geom_page[i+1, 1].set_value(chord)
geom_page[0, 2].set_value("Le x (m)")
geom_page[0, 3].set_value("Le y (m)")
for i, p in enumerate(shape.front):
geom_page[i+1, 2].set_value(p[0])
geom_page[i+1, 3].set_value(-p[1])
# set arc values
geom_page[0, 4].set_value("Arc")
last_angle = 0
cell_angles = glider_2d.arc.get_cell_angles(glider_2d.shape.rib_x_values)
if glider_2d.shape.has_center_cell:
cell_angles = cell_angles[1:]
for i, angle in enumerate(cell_angles + [cell_angles[-1]]):
this_angle = angle * 180/math.pi
geom_page[i+1, 4].set_value(this_angle-last_angle)
last_angle = this_angle
geom_page[0, 5].set_value("AOA")
geom_page[0, 6].set_value("Z-rotation")
geom_page[0, 7].set_value("Y-rotation")
geom_page[0, 8].set_value("profile-merge")
geom_page[0, 9].set_value("ballooning-merge")
aoa_int = glider_2d.aoa.interpolation(num=100)
profile_int = glider_2d.profile_merge_curve.interpolation(num=100)
ballooning_int = glider_2d.ballooning_merge_curve.interpolation(num=100)
for rib_no, x in enumerate(glider_2d.shape.rib_x_values):
geom_page[rib_no+1, 0].set_value(rib_no+1)
geom_page[rib_no+1, 5].set_value(aoa_int(x)*180/math.pi)
geom_page[rib_no+1, 6].set_value(0)
geom_page[rib_no+1, 7].set_value(0)
geom_page[rib_no+1, 8].set_value(profile_int(x))
geom_page[rib_no+1, 9].set_value(ballooning_int(x))
return geom_page
def get_cell_sheet(glider):
cell_num = glider.shape.half_cell_num
row_num = glider.shape.half_cell_num
table = Table()
table["A1"] = file_version
for i in range(1, row_num+1):
table[i, 0] = str(i)
elems = glider.elements
# cuts
cuts_table = Table()
cuts_per_cell = []
for cell_no in range(cell_num):
cuts_this = []
for cut in elems["cuts"]:
if cell_no in cut["cells"]:
cuts_this.append((cut["left"], cut["right"], cut["type"]))
cuts_this.sort(key=lambda x: sum(x[:2]))
cuts_per_cell.append(cuts_this)
def find_next(cut, cell_no):
cuts_this = cuts_per_cell[cell_no]
for new_cut in cuts_this:
if cut[1] == new_cut[0] and new_cut[2] == cut[2]:
cuts_this.remove(new_cut)
return new_cut
def add_column(cell_no):
cuts_this = cuts_per_cell[cell_no]
if not cuts_this:
return False
cut = cuts_this[0]
column = Table()
column[0, 0] = cut[2]
column.insert_row(cut[:2], cell_no+1)
cuts_this.remove(cut)
for cell_no_temp in range(cell_no+1, cell_num):
cut_next = find_next(cut, cell_no_temp)
if not cut_next:
break
column.insert_row(cut_next[:2], cell_no_temp+1)
cut = cut_next
cuts_table.append_right(column)
return column
for cell_no in range(cell_num):
while add_column(cell_no):
pass
table.append_right(cuts_table)
# Diagonals
for diagonal in elems["diagonals"]:
diagonal_table = Table()
diagonal = copy.copy(diagonal)
diagonal_table[0, 0] = "QR"
cells = diagonal.pop("cells")
_diagonal = DiagonalRib(**diagonal)
for cell_no in cells:
# center_left, center_right, width_left, width_right, height_left, height_right
diagonal_table[cell_no+1, 0] = _diagonal.center_left
diagonal_table[cell_no+1, 1] = _diagonal.center_right
diagonal_table[cell_no+1, 2] = _diagonal.width_left
diagonal_table[cell_no+1, 3] = _diagonal.width_right
diagonal_table[cell_no+1, 4] = _diagonal.left_front[1]
diagonal_table[cell_no+1, 5] = _diagonal.right_front[1]
table.append_right(diagonal_table)
# Straps
for strap in elems["straps"]:
strap_table = Table()
strap_table[0, 0] = "STRAP"
for cell_no in strap["cells"]:
#
strap_table[cell_no+1, 0] = strap["left"]
strap_table[cell_no+1, 1] = strap["right"]
strap_table[cell_no+1, 2] = strap["width"]
table.append_right(strap_table)
# Material
material_table = Table()
for cell_no, cell in enumerate(elems["materials"]):
for part_no, part in enumerate(cell):
material_table[cell_no+1, part_no] = part
for part_no in range(material_table.num_columns):
material_table[0, part_no] = "MATERIAL"
table.append_right(material_table)
return table
def get_rib_sheet(glider_2d):
table = Table()
table[0, 0] = file_version
for i in range(1, glider_2d.shape.half_cell_num+1):
table[i, 0] = f"rib{i}"
# holes
for hole in glider_2d.elements["holes"]:
hole_table = Table()
hole_table[0, 0] = "HOLE"
for rib_no in hole["ribs"]:
table[rib_no+1, 0] = hole["pos"]
table[rib_no+1, 1] = hole["size"]
table.append_right(hole_table)
# rigidfoils
rigidfoils = glider_2d.elements.get("rigidfoils", [])
rigidfoils.sort(key=lambda r: r["start"])
for rigidfoil in rigidfoils:
rigidfoil_table = Table()
rigidfoil_table[0, 0] = "RIGIDFOIL"
for rib_no in rigidfoil["ribs"]:
rigidfoil_table[rib_no+1, 0] = rigidfoil["start"]
rigidfoil_table[rib_no+1, 1] = rigidfoil["end"]
rigidfoil_table[rib_no+1, 2] = rigidfoil["distance"]
table.append_right(rigidfoil_table)
return table
def get_ballooning_sheet(glider_2d):
balloonings = glider_2d.balloonings
table = Table()
#row_num = max([len(b.upper_spline.controlpoints)+len(b.lower_spline.controlpoints) for b in balloonings])+1
#sheet = ezodf.Sheet(name="Balloonings", size=(row_num, 2*len(balloonings)))
for ballooning_no, ballooning in enumerate(balloonings):
#sheet.append_columns(2)
table[0, 2*ballooning_no] = "ballooning_{}".format(ballooning_no)
if type(ballooning) is BallooningBezierNeu:
table[0, 2*ballooning_no+1] = "V3"
pts = ballooning.controlpoints
else:
table[0, 2*ballooning_no+1] = "V2"
pts = list(ballooning.upper_spline.controlpoints) + list(ballooning.lower_spline.controlpoints)
for i, point in enumerate(pts):
table[i+1, 2*ballooning_no] = point[0]
table[i+1, 2*ballooning_no+1] = point[1]
ods_sheet = table.get_ods_sheet()
ods_sheet.name = "Balloonings"
return ods_sheet
def get_parametric_sheet(glider : "openglider.glider.parametric.glider.ParametricGlider"):
line_no = 1 + max([
glider.shape.front_curve.numpoints,
glider.shape.back_curve.numpoints,
glider.shape.rib_distribution.numpoints,
glider.arc.curve.numpoints,
glider.zrot.numpoints,
glider.aoa.numpoints,
glider.ballooning_merge_curve.numpoints,
glider.profile_merge_curve.numpoints
])
sheet = ezodf.Sheet(name="Parametric", size=(line_no, 16))
def add_curve(name, curve, column_no):
#sheet.append_columns(2)
sheet[0, column_no].set_value(name)
for i, p in enumerate(curve):
sheet[i+1, column_no].set_value(p[0])
sheet[i+1, column_no+1].set_value(p[1])
add_curve("front", glider.shape.front_curve.controlpoints, 0)
add_curve("back", glider.shape.back_curve.controlpoints, 2)
add_curve("rib_distribution", glider.shape.rib_distribution.controlpoints, 4)
add_curve("arc", glider.arc.curve.controlpoints, 6)
add_curve("aoa", glider.aoa.controlpoints, 8)
add_curve("zrot", glider.zrot.controlpoints, 10)
add_curve("ballooning_merge_curve", glider.ballooning_merge_curve.controlpoints, 12)
add_curve("profile_merge_curve", glider.profile_merge_curve.controlpoints, 14)
return sheet
def get_lines_sheet(glider, places=3):
table = glider.lineset.get_input_table()
ods_sheet = table.get_ods_sheet("Lines")
return ods_sheet
def get_data_sheet(glider):
ods_sheet = ezodf.Sheet(name="Data", size=(3, 10))
ods_sheet[0,0].set_value("Data")
current_row = 1
# lower attachment_points
for pt_no, att_pt in enumerate(glider.lineset.get_lower_attachment_points()):
ods_sheet.append_rows(3)
for i, axis in enumerate(['X', 'Y', 'Z']):
ods_sheet[current_row + i, 0].set_value("AHP{}{}".format(axis, att_pt.name))
ods_sheet[current_row + i, 1].set_value(att_pt.pos_3D[i])
current_row += 3
ods_sheet[current_row, 0].set_value("SPEED")
ods_sheet[current_row, 1].set_value(glider.speed)
ods_sheet[current_row+1, 0].set_value("GLIDE")
ods_sheet[current_row+1, 1].set_value(glider.glide)
return ods_sheet
# for i, value in enumerate(("Ribs", "Chord", "x: (m)", "y LE (m)", "kruemmung", "aoa", "Z-rotation",
# "Y-Rotation-Offset", "merge", "balooning")):
# geom_page.get_cell((0, i)).value = value
#
# ribs = glider.ribs()
# x = [rib[0][0] for rib in ribs]
# y = [rib[0][1] for rib in ribs]
# chord = [rib[0][1] - rib[1][1] for rib in ribs]
|
tph-thuering/vnetsource
|
refs/heads/master
|
ts_om/models.py
|
2
|
from StringIO import StringIO
import datetime
from xml.etree.ElementTree import ParseError
from django.core.exceptions import MultipleObjectsReturned
from django.utils import timezone
from django.db import models
from django.contrib.auth.models import User
from lxml import etree
from lxml.etree import XMLSyntaxError
import vecnet.openmalaria.scenario
from data_services.models import Simulation, SimulationGroup, DimUser
class ExperimentFile(models.Model):
name = models.CharField(max_length=200)
file = models.FileField(upload_to='ts_om/experiments/%Y/%m/%d')
user = models.ForeignKey(User)
test_sim_group = models.ForeignKey(SimulationGroup, null=True, related_name="test_submit_group")
sim_group = models.ForeignKey(SimulationGroup, null=True, related_name="submit_group")
def __unicode__(self):
return self.name
@property
def state(self):
pass
class BaselineScenario(models.Model):
name = models.CharField(max_length=200)
xml = models.TextField()
def __unicode__(self):
return self.name
class DemographicsSnippet(models.Model):
name = models.CharField(max_length=200)
maximum_age_yrs = models.CharField(max_length=200)
xml = models.TextField()
title = models.CharField(max_length=200)
url = models.CharField(max_length=200)
def __unicode__(self):
return self.title
class ModelSnippet(models.Model):
"""
These snippets provide parameters calibrated by model fitting as well as some
model options chosen to differentiate the model. Downloaded from
https://code.google.com/p/openmalaria/source/browse/v32/models+5-day/?repo=snippets
See the publication "Ensemble Modeling of the Likely Public Health Impact of a
Pre-Erythrocytic Malaria Vaccine", Smith et al, for a description of these
models:
http://www.plosmedicine.org/article/info%3Adoi%2F10.1371%2Fjournal.pmed.1001157
Summary of the description is available on
http://www.plosmedicine.org/article/fetchObject.action?uri=info:doi/10.1371/journal.pmed.1001157.t002&representation=PNG_L
"""
name = models.CharField(max_length=200)
xml = models.TextField()
def __unicode__(self):
return self.name
class Scenario(models.Model):
xml = models.TextField()
start_date = models.IntegerField(default=datetime.datetime.now().year)
user = models.ForeignKey(User)
simulation = models.ForeignKey(Simulation, null=True, blank=True)
last_modified = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
is_public = models.BooleanField(default=False)
# -------------------------------------------------
# name property setter and getter
@property
def name(self):
try:
tree = etree.parse(StringIO(str(self.xml)))
except XMLSyntaxError:
name = "Invalid xml document"
else:
try:
name = tree.getroot().xpath('@name')[0]
except IndexError:
name = "Unnamed scenario"
return name
@name.setter
def name(self, value):
tree = etree.parse(StringIO(str(self.xml)))
scenario = tree.getroot()
scenario.attrib['name'] = value
self.xml = etree.tostring(tree.getroot(), pretty_print=True)
#
# ----------------------------------------------------
@property
def status(self):
try:
status = self.simulation.status
except Exception:
status = None
return status
class AnophelesSnippet(models.Model):
# Vector description in /om:scenario/entomology/vector/anopheles section of xml
anopheles = models.TextField(null=False, blank=False)
# anophelesParams in /om:scenario/interventions/human/component/GVI section.
# Only required if GVI interventions are applied.
gvi_anophelesParams = models.TextField(null=True, blank=True)
# anophelesParams in /om:scenario/interventions/human/component/ITN section.
# Only required if ITN interventions are applied.
itn_anophelesParams = models.TextField(null=True, blank=True)
# anophelesParams in /om:scenario/interventions/human/component/IRS section.
# Only required if IRS interventions are applied.
irs_anophelesParams = models.TextField(null=True, blank=True)
@property
def name(self):
try:
tree = etree.parse(StringIO(str(self.anopheles)))
except XMLSyntaxError:
name = "Invalid xml snippet"
else:
try:
name = tree.getroot().xpath('@mosquito')[0]
except IndexError:
name = "Unnamed anopheles snippet"
return name
def __unicode__(self):
return self.name
class InterventionComponent(models.Model):
name = models.CharField(max_length=200)
tag = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class InterventionSnippet(models.Model):
name = models.CharField(max_length=200)
component = models.ForeignKey(InterventionComponent, null=False)
xml = models.TextField(null=False, blank=False)
def __unicode__(self):
return self.name
class Experiment(models.Model):
"""
OpenMalaria Experiment is just a collection of Scenarios.
We also need SimulationGroup - to track status of this Experiment,
and optional base - to show differences between individual scenarios and base scenario of this Experiment
"""
name = models.TextField()
description = models.TextField(blank=True, default="")
sim_group = models.ForeignKey(SimulationGroup, null=True, blank=True)
# base = models.TextField(null=True, blank=True)
#
# Experiment Specification is JSON document that includes experiment template, Sweep Memorandum
# and describes which combinations of sweeps and arms will be generated.
# More details: https://docs.google.com/document/d/1SSBqc-0fDhsGtMuBWsGfM2m2GxTAT8Io7oikoEF4sRQ/edit
#
# If Experiment was generated expernally, Experiment Specification may not be available
experiment_specification = models.TextField(null=True, blank=True)
def get_sweeps(self):
""" Return list of all sweeps in this experiment
This function implies that this is full factorial experiment
:return dictionary - sweep name:list of arm names for that sweep
"""
# Get list of sweep that belongs to this Experiment
sweeps_names = SweepArmMappingToScenario.objects.filter(experiment=self).distinct(["sweep_name"])
sweeps = dict()
for sweep_name in sweeps_names:
# Create list of arm names
arms = [arm.name for arm in
SweepArmMappingToScenario.objects.filter(experiment=self, sweep_name=sweep_name)]
sweeps[sweep_name] = arms
return sweeps
def get_scenario(self, keys):
sweep = SweepArmMappingToScenario.objects.filter(experiment=self, **keys)
if sweep.count() == 1:
return sweep[0].scenario
if sweep.count() == 0:
return None
raise MultipleObjectsReturned("Too many scenarios found, key list maybe incomplete")
class SweepArmMappingToScenario(models.Model):
"""
Each Scenario in Experiment will be associated with a list values for each sweep (one value for one parameter)
"""
experiment = models.ForeignKey(Experiment)
scenario = models.ForeignKey(Scenario)
sweep_name = models.CharField(max_length=127)
arm_name = models.CharField(max_length=127)
# class ScenarioTemplate(models.Model):
# pass
class RunMetaData():
# results_base_url = 'https://ci.vecnet.org/ts_emod/output/results_viewer/'
def __init__(self, scenario):
self.scenario = scenario
self.getMetaData()
def getMetaData(self):
# Stuff retrieved from derived
scenario = self.scenario
simulation = scenario.simulation
django_user = scenario.user
user = DimUser.objects.get_or_create(username=django_user.username)
try:
scenario_wrapper = vecnet.openmalaria.scenario.Scenario(scenario.xml)
except ParseError:
scenario_wrapper = None
try:
duration_in_years = scenario_wrapper.monitoring.surveys[-1] * 5 / 365
except AttributeError:
duration_in_years = 0
meta_data = scenario.metadata
self.coordinates = []
self.species = []
self.creator = user.username # user.first_name + user.last_name
self.results_url = ""
# self.results_url = "https://ci-qa.vecnet.org" + reverse("ts_emod_run_details", args=[dimRun.id, experiment.id])
# self.coordinates = self.getCoordinates()
self.coordinates = ["", ""]
self.time_period = [scenario.start_date, scenario.start_date + duration_in_years]
# for specie in configJson['parameters']['Vector_Species_Params']:
# self.species.append("An. " + specie)
self.species.append("")
# for intervention in campaignJson['Events']:
# interventions.append(self.Intervention(intervention))
# self.interventions = list(set([intervention.name for intervention in interventions]))
self.interventions = [""]
self.interventions_set = list(set(self.interventions))
self.model_version = simulation.version
self.simulation_type = "" # configJson['parameters']['Simulation_Type']
self.run_date = scenario.time_launched
# Stuff retrieved from database
if meta_data is None:
meta_data = {}
self.is_public = meta_data.get('isPublic', 'True')
self.title = self.scenario.name # metaData.get('title', '')
self.citation = meta_data.get('citation', '')
self.location = meta_data.get('location', '')
self.tags = meta_data.get('tags', '')
self.description = meta_data.get('description', '')
self.parameters_of_interest = meta_data.get('parametersOfInterest', '')
self.metadata_last_update_date = meta_data.get('metaDataLastUpdateDate', self.run_date)
# Generate autoname/autoid
# interventions_label = ""
# unique_labels = set()
# for intervention in interventions:
# unique_labels.add(intervention.type)
# for label in unique_labels:
# interventions_label += label
#
# days = (scenario.end_date_key.timestamp - scenario.start_date_key.timestamp).days
# if days < 365:
# duration = "%sm" % (days / 30)
# else:
# duration = "%sy" % (days / 365)
# self.autoname = str(self.creator) + str(interventions_label) + \
# str(self.location.replace(" ", "").replace(",", "")) + str(duration) + str(self.scenario.id)
self.autoname = "AUTO"
def getCoordinates(self):
# location = GisBaseTable.objects.all().filter(id=self.scenario.location_key.geom_key)
# Ignore PyCharm warning below
# GisBaseTable.objects is models.GeoManager() and it does produce GeoQuerySet which has centroid function
# Somehow PyCharm thinks it is normal QuerySet
# Please refer to https://docs.djangoproject.com/en/dev/ref/contrib/gis/geoquerysets/#centroid for more details
# centroid = location.centroid(model_att='centroid')[0].centroid
# return {'latitude': centroid.y, 'longitude': centroid.x}
pass
def saveMetaData(self):
if self.scenario.metadata is None:
self.scenario.metadata = {}
self.scenario.metadata['isPublic'] = self.is_public
# self.dimRun.metadata['title'] = self.title
self.scenario.name = self.title
self.scenario.metadata['citation'] = self.citation
self.scenario.metadata['location'] = self.location
self.scenario.metadata['tags'] = self.tags
self.scenario.metadata['description'] = self.description
self.scenario.metadata['parametersOfInterest'] = self.parameters_of_interest
self.scenario.metadata['metaDataLastUpdateDate'] = timezone.now()
self.scenario.save()
|
heukelum/linux
|
refs/heads/devel
|
tools/perf/scripts/python/sctop.py
|
1996
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
dnozay/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/contrib/gis/geometry/backend/__init__.py
|
388
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError, e:
try:
module = import_module(geom_backend)
except ImportError, e_user:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
sparkslabs/kamaelia
|
refs/heads/master
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/UI/OpenGL/Vector.py
|
12
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===============
3D Vector class
===============
A class for 3 dimensional vectors providing several methods for common
vector operations.
"""
from math import *
# =====================
# Vector: used for handling 3D Vectors
# =====================
class Vector:
"""\
Vector([x][,y][,z]) -> A new Vector object.
Keyword arguments:
- x,y,z -- Initial values.
"""
def __init__(self, x=0.0, y=0.0, z=0.0):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
self.x = float(x)
self.y = float(y)
self.z = float(z)
def zero(self):
""" Set all values to zero. """
self.x = 0.0
self.y = 0.0
self.z = 0.0
return self
def invert(self):
""" Changes the sign of each vector component. """
self.x = -self.x
self.y = -self.y
self.z = -self.z
return self
def copy(self):
""" Returns a copy of the Vector object. """
return Vector(self.x,self.y,self.z)
def toTuple(self):
""" Returns a tuple (x,y,z). """
return (self.x,self.y,self.z)
def length(self):
""" Returns the length of the vector. """
return sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def dot(self, other):
""" Returns the dot product between self and other. """
return self.x*other.x + self.y*other.y + self.z*other.z
def cross(self, other):
""" Returns the cross product between self and other. """
return Vector(self.y*other.z - self.z*other.y, self.z*other.x - self.x*other.z, self.x*other.y - self.y*other.x)
def norm(self):
""" Returns a normalised version of the vector. """
l = sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
return Vector(self.x / l, self.y / l, self.z / l)
def __str__(self):
return str([self.x,self.y,self.z])
def __eq__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return True
return False
def __ne__(self, other):
if self.x == other.x and self.y == other.y and self.z == other.z:
return False
return True
def __mul__(self, factor):
return Vector(self.x * factor, self.y * factor, self.z * factor)
def __div__(self, factor):
return Vector(self.x / factor, self.y / factor, self.z / factor)
def __mod__(self, factor):
return Vector(self.x % factor, self.y % factor, self.z % factor)
def __add__(self, other):
return Vector(self.x +other.x, self.y +other.y, self.z +other.z)
def __sub__(self, other):
return Vector(self.x -other.x, self.y -other.y, self.z-other.z)
def __imul__(self, factor):
return Vector(self.x * factor, self.y * factor, self.z * factor)
def __idiv__(self, factor):
return Vector(self.x / factor, self.y / factor, self.z / factor)
def __imod__(self, factor):
return Vector(self.x % factor, self.y % factor, self.z % factor)
def __iadd__(self, other):
return Vector(self.x +other.x, self.y +other.y, self.z +other.z)
def __isub__(self, other):
return Vector(self.x -other.x, self.y -other.y, self.z-other.z)
def __neg__(self):
return Vector(-self.x, -self.y, -self.z)
# Licensed to the BBC under a Contributor Agreement: THF
|
Zyell/home-assistant
|
refs/heads/master
|
tests/components/binary_sensor/test_command_line.py
|
8
|
"""The tests for the Command line Binary sensor platform."""
import unittest
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.components.binary_sensor import command_line
from tests.common import get_test_home_assistant
class TestCommandSensorBinarySensor(unittest.TestCase):
"""Test the Command line Binary sensor."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup(self):
"""Test sensor setup."""
config = {'name': 'Test',
'command': 'echo 1',
'payload_on': '1',
'payload_off': '0'}
devices = []
def add_dev_callback(devs):
"""Add callback to add devices."""
for dev in devs:
devices.append(dev)
command_line.setup_platform(
self.hass, config, add_dev_callback)
self.assertEqual(1, len(devices))
entity = devices[0]
self.assertEqual('Test', entity.name)
self.assertEqual(STATE_ON, entity.state)
def test_setup_bad_config(self):
"""Test the setup with a bad configuration."""
config = {}
devices = []
def add_dev_callback(devs):
"""Add callback to add devices."""
for dev in devs:
devices.append(dev)
self.assertFalse(command_line.setup_platform(
self.hass, config, add_dev_callback))
self.assertEqual(0, len(devices))
def test_template(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData('echo 10')
entity = command_line.CommandBinarySensor(
self.hass, data, 'test', None, '1.0', '0',
'{{ value | multiply(0.1) }}')
self.assertEqual(STATE_ON, entity.state)
def test_sensor_off(self):
"""Test setting the state with a template."""
data = command_line.CommandSensorData('echo 0')
entity = command_line.CommandBinarySensor(
self.hass, data, 'test', None, '1', '0', None)
self.assertEqual(STATE_OFF, entity.state)
|
leeseulstack/openstack
|
refs/heads/master
|
neutron/tests/unit/ml2/test_security_group.py
|
12
|
# Copyright (c) 2013 OpenStack Foundation
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import math
import mock
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self, plugin=None):
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
notifier_p = mock.patch(NOTIFIER)
notifier_cls = notifier_p.start()
self.notifier = mock.Mock()
notifier_cls.return_value = self.notifier
self._attribute_map_bk_ = {}
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
self._attribute_map_bk_[item] = (attributes.
RESOURCE_ATTRIBUTE_MAP[item].
copy())
super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
def tearDown(self):
super(Ml2SecurityGroupsTestCase, self).tearDown()
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase,
test_sg.TestSecurityGroups,
test_sg_rpc.SGNotificationTestMixin):
def setUp(self):
super(TestMl2SecurityGroups, self).setUp()
plugin = manager.NeutronManager.get_plugin()
plugin.start_rpc_listeners()
def _make_port_with_new_sec_group(self, net_id):
sg = self._make_security_group(self.fmt, 'name', 'desc')
port = self._make_port(
self.fmt, net_id, security_groups=[sg['security_group']['id']])
return port['port']
def _make_port_without_sec_group(self, net_id):
port = self._make_port(
self.fmt, net_id, security_groups=[])
return port['port']
def test_security_group_get_ports_from_devices(self):
with self.network() as n:
with self.subnet(n):
orig_ports = [
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_with_new_sec_group(n['network']['id']),
self._make_port_without_sec_group(n['network']['id'])
]
plugin = manager.NeutronManager.get_plugin()
# should match full ID and starting chars
ports = plugin.get_ports_from_devices(
[orig_ports[0]['id'], orig_ports[1]['id'][0:8],
orig_ports[2]['id']])
self.assertEqual(len(orig_ports), len(ports))
for port_dict in ports:
p = next(p for p in orig_ports
if p['id'] == port_dict['id'])
self.assertEqual(p['id'], port_dict['id'])
self.assertEqual(p['security_groups'],
port_dict[ext_sg.SECURITYGROUPS])
self.assertEqual([], port_dict['security_group_rules'])
self.assertEqual([p['fixed_ips'][0]['ip_address']],
port_dict['fixed_ips'])
self._delete('ports', p['id'])
def test_security_group_get_ports_from_devices_with_bad_id(self):
plugin = manager.NeutronManager.get_plugin()
ports = plugin.get_ports_from_devices(['bad_device_id'])
self.assertFalse(ports)
def test_security_group_no_db_calls_with_no_ports(self):
plugin = manager.NeutronManager.get_plugin()
with mock.patch(
'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port'
) as get_mock:
self.assertFalse(plugin.get_ports_from_devices([]))
self.assertFalse(get_mock.called)
def test_large_port_count_broken_into_parts(self):
plugin = manager.NeutronManager.get_plugin()
max_ports_per_query = 5
ports_to_query = 73
for max_ports_per_query in (1, 2, 5, 7, 9, 31):
with contextlib.nested(
mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY',
new=max_ports_per_query),
mock.patch('neutron.plugins.ml2.db.get_sg_ids_grouped_by_port',
return_value={}),
) as (max_mock, get_mock):
plugin.get_ports_from_devices(
['%s%s' % (const.TAP_DEVICE_PREFIX, i)
for i in range(ports_to_query)])
all_call_args = map(lambda x: x[1][0], get_mock.mock_calls)
last_call_args = all_call_args.pop()
# all but last should be getting MAX_PORTS_PER_QUERY ports
self.assertTrue(
all(map(lambda x: len(x) == max_ports_per_query,
all_call_args))
)
remaining = ports_to_query % max_ports_per_query
if remaining:
self.assertEqual(remaining, len(last_call_args))
# should be broken into ceil(total/MAX_PORTS_PER_QUERY) calls
self.assertEqual(
math.ceil(ports_to_query / float(max_ports_per_query)),
get_mock.call_count
)
def test_full_uuids_skip_port_id_lookup(self):
plugin = manager.NeutronManager.get_plugin()
# when full UUIDs are provided, the _or statement should only
# have one matching 'IN' critiera for all of the IDs
with contextlib.nested(
mock.patch('neutron.plugins.ml2.db.or_'),
mock.patch('neutron.plugins.ml2.db.db_api.get_session')
) as (or_mock, sess_mock):
fmock = sess_mock.query.return_value.outerjoin.return_value.filter
# return no ports to exit the method early since we are mocking
# the query
fmock.return_value.all.return_value = []
plugin.get_ports_from_devices([test_api_v2._uuid(),
test_api_v2._uuid()])
# the or_ function should only have one argument
or_mock.assert_called_once_with(mock.ANY)
class TestMl2SGServerRpcCallBack(
Ml2SecurityGroupsTestCase,
test_sg_rpc.SGServerRpcCallBackTestCase):
pass
|
KitKatXperience/platform_external_chromium_org
|
refs/heads/kk
|
chrome/test/pyautolib/pyauto_errors.py
|
69
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PyAuto Errors."""
class JavascriptRuntimeError(RuntimeError):
"""Represent an error raised by injected Javascript."""
pass
class JSONInterfaceError(RuntimeError):
"""Represent an error in the JSON IPC interface."""
pass
class AutomationCommandFail(JSONInterfaceError):
"""Represent an automation command failure.
These failures are passed back from the Chrome side of the IPC.
"""
pass
class AutomationCommandTimeout(JSONInterfaceError):
"""Represent an automation command failure due to timeout."""
pass
class NTPThumbnailNotShownError(RuntimeError):
"""Represent an error while attempting to manipulate a NTP thumbnail.
This is due to it not being visible to a real user.
"""
pass
|
xinchoubiology/cuda-convnet2
|
refs/heads/master
|
layer.py
|
162
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import exp
import sys
import ConfigParser as cfg
import os
import numpy as n
import numpy.random as nr
from math import ceil, floor
from collections import OrderedDict
from os import linesep as NL
from python_util.options import OptionsParser
import re
class LayerParsingError(Exception):
pass
# A neuron that doesn't take parameters
class NeuronParser:
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
self.type = type
self.func_str = func_str
self.uses_acts = uses_acts
self.uses_inputs = uses_inputs
def parse(self, type):
if type == self.type:
return {'type': self.type,
'params': {},
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
return None
# A neuron that takes parameters
class ParamNeuronParser(NeuronParser):
neuron_regex = re.compile(r'^\s*(\w+)\s*\[\s*(\w+(\s*,\w+)*)\s*\]\s*$')
def __init__(self, type, func_str, uses_acts=True, uses_inputs=True):
NeuronParser.__init__(self, type, func_str, uses_acts, uses_inputs)
m = self.neuron_regex.match(type)
self.base_type = m.group(1)
self.param_names = m.group(2).split(',')
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(r'^%s\s*\[([\d,\.\s\-]*)\]\s*$' % self.base_type, type)
if m:
try:
param_vals = [float(v.strip()) for v in m.group(1).split(',')]
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals)),
'usesActs': self.uses_acts,
'usesInputs': self.uses_inputs}
except TypeError:
pass
return None
class AbsTanhNeuronParser(ParamNeuronParser):
def __init__(self):
ParamNeuronParser.__init__(self, 'abstanh[a,b]', 'f(x) = a * |tanh(b * x)|')
def parse(self, type):
dic = ParamNeuronParser.parse(self, type)
# Make b positive, since abs(tanh(bx)) = abs(tanh(-bx)) and the C++ code
# assumes b is positive.
if dic:
dic['params']['b'] = abs(dic['params']['b'])
return dic
class ParamParser:
lrs_regex = re.compile(r'^\s*(\w+)\s*(?:\[\s*(\w+(\s*;\w+)*)\s*\])?\s*$')
param_converters = {'i': int,
'f': float}
def __init__(self, type):
m = self.lrs_regex.match(type)
self.base_type = m.group(1)
param_names_with_type = m.group(2).split(';') if m.group(2) is not None else []
self.param_names = [p[1:] for p in param_names_with_type]
self.param_types = [self.param_converters[p[0]] for p in param_names_with_type]
self.param_regex_inner = ";".join([('\s*%s\s*=\s*[^;,\s=]+\s*' % p) for p in self.param_names])
self.regex_str = ('^%s\s*(?:\[(%s)\])?\s*$') % (self.base_type, self.param_regex_inner)
assert len(set(self.param_names)) == len(self.param_names)
def parse(self, type):
m = re.match(self.regex_str, type, flags=re.IGNORECASE)
if m:
try:
param_vals = [ptype(v.split('=')[1].strip()) for ptype,v in zip(self.param_types, m.group(1).split(';'))] if m.group(1) is not None else []
if len(param_vals) == len(self.param_names):
return {'type': self.base_type,
'params': dict(zip(self.param_names, param_vals))}
except TypeError:
pass
return None
# Subclass that throws more convnet-specific exceptions than the default
class MyConfigParser(cfg.SafeConfigParser):
def safe_get(self, section, option, f=cfg.SafeConfigParser.get, typestr=None, default=None):
try:
return f(self, section, option)
except cfg.NoOptionError, e:
if default is not None:
return default
raise LayerParsingError("Layer '%s': required parameter '%s' missing" % (section, option))
except ValueError, e:
if typestr is None:
raise e
raise LayerParsingError("Layer '%s': parameter '%s' must be %s" % (section, option, typestr))
def safe_get_list(self, section, option, f=str, typestr='strings', default=None):
v = self.safe_get(section, option, default=default)
if type(v) == list:
return v
try:
return [f(x.strip()) for x in v.split(',')]
except:
raise LayerParsingError("Layer '%s': parameter '%s' must be ','-delimited list of %s" % (section, option, typestr))
def safe_get_int(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getint, typestr='int', default=default)
def safe_get_float(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getfloat, typestr='float', default=default)
def safe_get_bool(self, section, option, default=None):
return self.safe_get(section, option, f=cfg.SafeConfigParser.getboolean, typestr='bool', default=default)
def safe_get_float_list(self, section, option, default=None):
return self.safe_get_list(section, option, float, typestr='floats', default=default)
def safe_get_int_list(self, section, option, default=None):
return self.safe_get_list(section, option, int, typestr='ints', default=default)
def safe_get_bool_list(self, section, option, default=None):
return self.safe_get_list(section, option, lambda x: x.lower() in ('true', '1'), typestr='bools', default=default)
# A class that implements part of the interface of MyConfigParser
class FakeConfigParser(object):
def __init__(self, dic):
self.dic = dic
def safe_get(self, section, option, default=None):
if option in self.dic:
return self.dic[option]
return default
def safe_get_int(self, section, option, default=None):
return int(self.safe_get(section, option, default))
def safe_get_int_list(self, section, option, default=None):
return list(self.safe_get(section, option, default))
class LayerParser:
def __init__(self):
self.dic = {}
self.set_defaults()
# Post-processing step -- this is called after all layers have been initialized
def optimize(self, layers):
self.dic['actsTarget'] = -1
self.dic['actsGradTarget'] = -1
if len(set(len(l['gpu']) for l in layers.values() if 'inputs' in l and self.dic['name'] in l['inputs'])) > 1:
# print set(len(l['gpu']) for l in layers.values())
raise LayerParsingError("Layer '%s': all next layers must have equal number of replicas." % (self.dic['name']))
def parse_params(self, vals, parsers, param_name, human_name, num_params=1):
dic, name = self.dic, self.dic['name']
# print vals
if len(vals) != num_params and len(vals) != 1:
raise LayerParsingError("Layer '%s': expected list of length %d for %s but got list of length %d."% (name, num_params, param_name, len(vals)))
parsed = []
# print vals
for v in vals:
for p in parsers:
parsedv = p.parse(v)
if parsedv:
parsed += [parsedv]
break
if len(parsed) == 1 and num_params > 1:
parsed = parsed * num_params
if len(parsed) == num_params:
return parsed
# print parsed, vals
raise LayerParsingError("Layer '%s': unable to parse %s %s=%s." % (name, human_name, param_name, ",".join(vals)))
# Add parameters from layer parameter file
def add_params(self, mcp):
pass
# self.dic['conserveMem'] = mcp.convnet.op.get_value('conserve_mem') if mcp.convnet is not None else 0
def init(self, dic):
self.dic = dic
return self
def set_defaults(self):
self.dic['outputs'] = 0
self.dic['parser'] = self
self.dic['requiresParams'] = False
# Does this layer use its own activity matrix
# for some purpose other than computing its output?
# Usually, this will only be true for layers that require their
# own activity matrix for gradient computations. For example, layers
# with logistic units must compute the gradient y * (1 - y), where y is
# the activity matrix.
#
# Layers that do not not use their own activity matrix should advertise
# this, since this will enable memory-saving matrix re-use optimizations.
#
# The default value of this property is True, for safety purposes.
# If a layer advertises that it does not use its own activity matrix when
# in fact it does, bad things will happen.
self.dic['usesActs'] = True
# Does this layer use the activity matrices of its input layers
# for some purpose other than computing its output?
#
# Again true by default for safety
self.dic['usesInputs'] = True
# Force this layer to use its own activity gradient matrix,
# instead of borrowing one from one of its inputs.
#
# This should be true for layers where the mapping from output
# gradient to input gradient is non-elementwise.
self.dic['forceOwnActs'] = True
# Does this layer need the gradient at all?
# Should only be true for layers with parameters (weights).
self.dic['gradConsumer'] = False
# The gpu indices on which this layer runs
self.dic['gpu'] = [-1]
def parse(self, name, mcp, prev_layers, model=None):
self.prev_layers = prev_layers
self.dic['name'] = name
self.dic['type'] = mcp.safe_get(name, 'type')
self.dic['id'] = len(prev_layers)
return self.dic
def verify_float_range(self, v, param_name, _min, _max):
self.verify_num_range(v, param_name, _min, _max, strconv=lambda x: '%.3f' % x)
def verify_num_range(self, v, param_name, _min, _max, strconv=lambda x:'%d' % x):
if type(v) == list:
for i,vv in enumerate(v):
self._verify_num_range(vv, param_name, _min, _max, i, strconv=strconv)
else:
self._verify_num_range(v, param_name, _min, _max, strconv=strconv)
def _verify_num_range(self, v, param_name, _min, _max, input=-1, strconv=lambda x:'%d' % x):
layer_name = self.dic['name'] if input < 0 else '%s[%d]' % (self.dic['name'], input)
if _min is not None and _max is not None and (v < _min or v > _max):
raise LayerParsingError("Layer '%s': parameter '%s' must be in the range %s-%s" % (layer_name, param_name, strconv(_min), strconv(_max)))
elif _min is not None and v < _min:
raise LayerParsingError("Layer '%s': parameter '%s' must be greater than or equal to %s" % (layer_name, param_name, strconv(_min)))
elif _max is not None and v > _max:
raise LayerParsingError("Layer '%s': parameter '%s' must be smaller than or equal to %s" % (layer_name, param_name, strconv(_max)))
def verify_divisible(self, value, div, value_name, div_name=None, input_idx=0):
layer_name = self.dic['name'] if len(self.dic['inputs']) == 0 else '%s[%d]' % (self.dic['name'], input_idx)
if value % div != 0:
raise LayerParsingError("Layer '%s': parameter '%s' must be divisible by %s" % (layer_name, value_name, str(div) if div_name is None else "'%s'" % div_name))
def verify_str_in(self, value, param_name, lst, input_idx=-1):
lname = self.dic['name'] if input_idx == -1 else ('%s[%d]' % (self.dic['name'], input_idx))
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (lname, param_name, ", ".join("'%s'" % s for s in lst)))
def verify_int_in(self, value, param_name, lst):
if value not in lst:
raise LayerParsingError("Layer '%s': parameter '%s' must be one of %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_all_ints_in(self, values, param_name, lst):
if len([v for v in values if v not in lst]) > 0:
raise LayerParsingError("Layer '%s': all parameters to '%s' must be among %s" % (self.dic['name'], param_name, ", ".join("'%d'" % s for s in lst)))
def verify_input_dims(self, dims):
for i,d in enumerate(dims):
if d is not None and self.dic['numInputs'][i] != d: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of input %d must be %d" % (self.dic['name'], i, d))
# This looks for neuron=x arguments in various layers, and creates
# separate layer definitions for them.
@staticmethod
def detach_neuron_layers(layers):
for name,l in layers.items():
if l['type'] != 'neuron' and 'neuron' in l and l['neuron']:
NeuronLayerParser().detach_neuron_layer(name, layers)
@staticmethod
def parse_layers(layer_cfg_path, param_cfg_path, model, layers={}):
try:
if not os.path.exists(layer_cfg_path):
raise LayerParsingError("Layer definition file '%s' does not exist" % layer_cfg_path)
if not os.path.exists(param_cfg_path):
raise LayerParsingError("Layer parameter file '%s' does not exist" % param_cfg_path)
if len(layers) == 0:
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(layer_cfg_path))
for name in mcp.sections():
if not mcp.has_option(name, 'type'):
raise LayerParsingError("Layer '%s': no type given" % name)
ltype = mcp.safe_get(name, 'type')
if ltype not in layer_parsers:
raise LayerParsingError("Layer '%s': Unknown layer type: '%s'" % (name, ltype))
layers[name] = layer_parsers[ltype]().parse(name, mcp, layers, model)
LayerParser.detach_neuron_layers(layers)
for l in layers.values():
l['parser'].optimize(layers)
del l['parser']
for name,l in layers.items():
if not l['type'].startswith('cost.'):
found = max(name in l2['inputs'] for l2 in layers.values() if 'inputs' in l2)
if not found:
raise LayerParsingError("Layer '%s' of type '%s' is unused" % (name, l['type']))
mcp = MyConfigParser(dict_type=OrderedDict)
mcp.readfp(open(param_cfg_path))
# mcp.convnet = model
for name,l in layers.items():
if not mcp.has_section(name) and l['requiresParams']:
raise LayerParsingError("Layer '%s' of type '%s' requires extra parameters, but none given in file '%s'." % (name, l['type'], param_cfg_path))
lp = layer_parsers[l['type']]().init(l)
lp.add_params(mcp)
except LayerParsingError, e:
print e
sys.exit(1)
return layers
@staticmethod
def register_layer_parser(ltype, cls):
if ltype in layer_parsers:
raise LayerParsingError("Layer type '%s' already registered" % ltype)
layer_parsers[ltype] = cls
# Any layer that takes an input (i.e. non-data layer)
class LayerWithInputParser(LayerParser):
def __init__(self, num_inputs=-1):
LayerParser.__init__(self)
self.num_inputs = num_inputs
def verify_num_params(self, params, auto_expand=True):
for param in params:
if len(self.dic[param]) != len(self.dic['inputs']):
if auto_expand and len(self.dic[param]) == 1:
self.dic[param] *= len(self.dic['inputs'])
else:
raise LayerParsingError("Layer '%s': %s list length does not match number of inputs" % (self.dic['name'], param))
# layers: dictionary: name -> layer
def optimize(self, layers):
LayerParser.optimize(self, layers)
dic = self.dic
# Check if I have an input that no one else uses.
#print "Layer %s optimizing" % dic['name']
if not dic['forceOwnActs']:
for i, inp in enumerate(dic['inputLayers']):
if inp['outputs'] == dic['outputs'] and sum(('inputs' in ll) and (inp['name'] in ll['inputs']) for ll in layers.itervalues()) == 1:
# I can share my activity matrix with this layer
# if it does not use its activity matrix, and I
# do not need to remember my inputs.
# TODO: a dropout layer should always be able to overwrite
# its input. Make it so.
# print "Layer %s(uses inputs=%d), input %s(uses acts = %d)" % (dic['name'], dic['usesInputs'], inp['name'], inp['usesActs'])
if not inp['usesActs'] and not dic['usesInputs']:
dic['actsTarget'] = i
print "Layer %s using acts from layer %s" % (dic['name'], inp['name'])
# print "Layer '%s' sharing activity matrix with layer '%s'" % (dic['name'], l['name'])
# I can share my gradient matrix with this layer if we're on the same GPU.
# This is different from the logic for actsTarget because this guy doesn't
# have an actsGrad matrix on my GPU if our GPUs are different, so there's
# nothing to share.
if dic['gpu'] == inp['gpu']:
dic['actsGradTarget'] = i
# print "Layer '%s' sharing activity gradient matrix with layer '%s'" % (dic['name'], l['name'])
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['inputs'] = [inp.strip() for inp in mcp.safe_get(name, 'inputs').split(',')]
for inp in dic['inputs']:
if inp not in prev_layers:
raise LayerParsingError("Layer '%s': input layer '%s' not defined" % (name, inp))
dic['inputLayers'] = [prev_layers[inp] for inp in dic['inputs']]
dic['gpu'] = mcp.safe_get_int_list(name, 'gpu', default=dic['inputLayers'][0]['gpu'])
dic['gpus'] = ", ".join('%s' % d for d in dic['gpu'])
dic['numReplicas'] = len(dic['gpu'])
if len(set(dic['gpu'])) != len(dic['gpu']):
raise LayerParsingError("Layer '%s': all replicas must run on different GPUs." % (name))
for inp in dic['inputs']:
# Data layers do not explicitly define how many replicas they have.
# The number of replicas for a data layer is given by the number of replicas
# in the next layer(s). So we set that here.
inpl = prev_layers[inp]
if inpl['type'] == 'data':
inpl['numReplicas'] = dic['numReplicas']
if inpl['numReplicas'] % dic['numReplicas'] != 0:
raise LayerParsingError("Layer '%s': number of replicas (%d) must divide number of replicas in all input layers (input %s has %d replicas)." % (name, dic['numReplicas'], inpl['name'], inpl['numReplicas']))
if len(set(inp['numReplicas'] for inp in dic['inputLayers'])) != 1:
raise LayerParsingError("Layer '%s': all input layers must have equal numbers of replicas." % (name))
# Need to also assert that all *next* layers have equal number of replicas but this is hard so it's done in Layer.optimize
for inp in dic['inputLayers']:
if inp['outputs'] == 0:
raise LayerParsingError("Layer '%s': input layer '%s' does not produce any output" % (name, inp['name']))
dic['numInputs'] = [inp['outputs'] for inp in dic['inputLayers']]
# Layers can declare a neuron activation function to apply to their output, as a shortcut
# to avoid declaring a separate neuron layer above themselves.
dic['neuron'] = mcp.safe_get(name, 'neuron', default="")
if self.num_inputs > 0 and len(dic['numInputs']) != self.num_inputs:
raise LayerParsingError("Layer '%s': number of inputs must be %d" % (name, self.num_inputs))
if model:
self.verify_all_ints_in(dic['gpu'], 'gpu', range(len(model.op.get_value('gpu'))))
return dic
def verify_img_size(self):
dic = self.dic
if dic['numInputs'][0] % dic['imgPixels'] != 0 or dic['imgSize'] * dic['imgSize'] != dic['imgPixels']:
raise LayerParsingError("Layer '%s': has %-d dimensional input, not interpretable as %d-channel images" % (dic['name'], dic['numInputs'][0], dic['channels']))
@staticmethod
def grad_consumers_below(dic):
if dic['gradConsumer']:
return True
if 'inputLayers' in dic:
return any(LayerWithInputParser.grad_consumers_below(l) for l in dic['inputLayers'])
def verify_no_grads(self):
if LayerWithInputParser.grad_consumers_below(self.dic):
raise LayerParsingError("Layer '%s': layers of type '%s' cannot propagate gradient and must not be placed over layers with parameters." % (self.dic['name'], self.dic['type']))
class NailbedLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['stride'] = mcp.safe_get_int(name, 'stride')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputsX'] = (dic['imgSize'] + dic['stride'] - 1) / dic['stride']
dic['start'] = (dic['imgSize'] - dic['stride'] * (dic['outputsX'] - 1)) / 2
dic['outputs'] = dic['channels'] * dic['outputsX']**2
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_img_size()
print "Initialized bed-of-nails layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class GaussianBlurLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['filterSize'] = mcp.safe_get_int(name, 'filterSize')
dic['stdev'] = mcp.safe_get_float(name, 'stdev')
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_int_in(dic['filterSize'], 'filterSize', [3, 5, 7, 9])
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['filter'] = n.array([exp(-(dic['filterSize']/2 - i)**2 / float(2 * dic['stdev']**2))
for i in xrange(dic['filterSize'])], dtype=n.float32).reshape(1, dic['filterSize'])
dic['filter'] /= dic['filter'].sum()
self.verify_img_size()
if dic['filterSize'] > dic['imgSize']:
raise LayerParsingError("Later '%s': filter size (%d) must be smaller than image size (%d)." % (dic['name'], dic['filterSize'], dic['imgSize']))
print "Initialized Gaussian blur layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class HorizontalReflectionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, 3)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
self.verify_img_size()
print "Initialized horizontal reflection layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class ResizeLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['tgtSize'] = int(floor(dic['imgSize'] / dic['scale']))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Really not recommended to use this for such severe scalings
self.verify_float_range(dic['scale'], 'scale', 0.5, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized resize layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class RandomScaleLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['maxScale'] = mcp.safe_get_float(name, 'maxScale')
dic['tgtSize'] = mcp.safe_get_int(name, 'tgtSize')
min_size = int(floor(dic['imgSize'] / dic['maxScale']))
max_size = dic['imgSize'] #int(floor(dic['imgSize'] * dic['maxScale']))
if dic['tgtSize'] < min_size:
raise LayerParsingError("Layer '%s': target size must be greater than minimum image size after rescaling (%d)" % (name, min_size))
if dic['tgtSize'] > max_size:
raise LayerParsingError("Layer '%s': target size must be smaller than maximum image size after rescaling (%d)" % (name, max_size))
dic['tgtPixels'] = dic['tgtSize']**2
self.verify_float_range(dic['maxScale'], 'maxScale', 1, 2)
dic['outputs'] = dic['channels'] * dic['tgtPixels']
self.verify_img_size()
self.verify_no_grads()
print "Initialized random scale layer '%s', producing %dx%d %d-channel output" % (name, dic['tgtSize'], dic['tgtSize'], dic['channels'])
return dic
class CropLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
dic['channels'] = mcp.safe_get_int(name, 'channels')
self.verify_num_range(dic['channels'], 'channels', 1, None)
dic['startX'] = mcp.safe_get_int(name, 'startX')
dic['startY'] = mcp.safe_get_int(name, 'startY', default=dic['startX'])
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['channels'] * (dic['sizeX']**2)
self.verify_num_range(dic['startX'], 'startX', 0, dic['imgSize']-1)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['startY'], 'startY', 0, dic['imgSize']-1)
self.verify_img_size()
self.verify_no_grads()
if dic['startX'] + dic['sizeX'] > dic['imgSize']:
raise LayerParsingError("Layer '%s': startX (%d) + sizeX (%d) > imgSize (%d)" % (name, dic['startX'], dic['sizeX'], dic['imgSize']))
print "Initialized cropping layer '%s', producing %dx%d %d-channel output" % (name, dic['sizeX'], dic['sizeX'], dic['channels'])
return dic
class ColorTransformLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['forceOwnActs'] = False
dic['usesActs'] = False
dic['usesInputs'] = False
# Computed values
dic['imgPixels'] = dic['numInputs'][0] / 3
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['channels'] = 3
dic['outputs'] = dic['numInputs'][0]
self.verify_img_size()
self.verify_no_grads()
return dic
class RGBToYUVLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
print "Initialized RGB --> YUV layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class RGBToLABLayerParser(ColorTransformLayerParser):
def __init__(self):
ColorTransformLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model=None):
dic = ColorTransformLayerParser.parse(self, name, mcp, prev_layers, model)
dic['center'] = mcp.safe_get_bool(name, 'center', default=False)
print "Initialized RGB --> LAB layer '%s', producing %dx%d %d-channel output" % (name, dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class NeuronLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
@staticmethod
def get_unused_layer_name(layers, wish):
if wish not in layers:
return wish
for i in xrange(1, 100):
name = '%s.%d' % (wish, i)
if name not in layers:
return name
raise LayerParsingError("This is insane.")
def parse_neuron(self, neuron_str):
for n in neuron_parsers:
p = n.parse(neuron_str)
if p: # Successfully parsed neuron, return it
self.dic['neuron'] = p
self.dic['usesActs'] = self.dic['neuron']['usesActs']
self.dic['usesInputs'] = self.dic['neuron']['usesInputs']
return
# Could not parse neuron
# Print available neuron types
colnames = ['Neuron type', 'Function']
m = max(len(colnames[0]), OptionsParser._longest_value(neuron_parsers, key=lambda x:x.type)) + 2
ntypes = [OptionsParser._bold(colnames[0].ljust(m))] + [n.type.ljust(m) for n in neuron_parsers]
fnames = [OptionsParser._bold(colnames[1])] + [n.func_str for n in neuron_parsers]
usage_lines = NL.join(ntype + fname for ntype,fname in zip(ntypes, fnames))
raise LayerParsingError("Layer '%s': unable to parse neuron type '%s'. Valid neuron types: %sWhere neurons have parameters, they must be floats." % (self.dic['name'], neuron_str, NL + usage_lines + NL))
def detach_neuron_layer(self, src_name, layers):
dic = self.dic
# self.set_defaults()
dic['name'] = NeuronLayerParser.get_unused_layer_name(layers, '%s_neuron' % src_name)
dic['type'] = 'neuron'
dic['inputs'] = src_name
dic['neuron'] = layers[src_name]['neuron']
dic['gpu'] = layers[src_name]['gpu']
# Yes it's not entirely correct to pass all of layers as prev_layers, but it's harmless
dic = self.parse(dic['name'], FakeConfigParser(dic), layers)
dic['src_layer'] = src_name
# Link upper layers to this new one
for l in layers.values():
if 'inputs' in l:
l['inputs'] = [inp if inp != src_name else dic['name'] for inp in l['inputs']]
l['inputLayers'] = [inp if inp['name'] != src_name else dic for inp in l['inputLayers']]
layers[dic['name']] = dic
def parse(self, name, mcp, prev_layers, model=None):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['numInputs'][0]
self.parse_neuron(dic['neuron'])
dic['forceOwnActs'] = False
print "Initialized neuron layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseSumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeffs'] = mcp.safe_get_float_list(name, 'coeffs', default=[1.0] * len(dic['inputs']))
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['requiresParams'] = True
print "Initialized elementwise sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class EltwiseMaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
if len(dic['inputs']) < 2:
raise LayerParsingError("Layer '%s': elementwise max layer must have at least 2 inputs, got %d." % (name, len(dic['inputs'])))
if len(set(dic['numInputs'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must have the same dimensionality. Got dimensionalities: %s" % (name, ", ".join(str(s) for s in dic['numInputs'])))
dic['outputs'] = dic['numInputs'][0]
print "Initialized elementwise max layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SumLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['stride'] = mcp.safe_get_int(name, 'stride', default=1)
self.verify_divisible(dic['numInputs'][0], dic['stride'], 'input dimensionality', 'stride')
dic['outputs'] = dic['numInputs'][0] / dic['stride']
print "Initialized sum layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class DropoutLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['enable'] = mcp.safe_get_bool(name, 'enable', default=True)
dic['keep'] = mcp.safe_get_float(name, 'keep', default=0.5)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesInputs'] = False
dic['usesActs'] = False
dic['forceOwnActs'] = False
dic['outputs'] = dic['numInputs'][0]
print "Initialized %s layer '%s' on GPUs %s, producing %d outputs" % (dic['type'], name, dic['gpus'], dic['outputs'])
return dic
class Dropout2LayerParser(DropoutLayerParser):
def __init__(self):
DropoutLayerParser.__init__(self)
class WeightLayerParser(LayerWithInputParser):
LAYER_PAT = re.compile(r'^\s*([^\s\[]+)(?:\[(\d+)\])?\s*$') # matches things like layername[5], etc
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
@staticmethod
def get_layer_name(name_str):
m = WeightLayerParser.LAYER_PAT.match(name_str)
if not m:
return None
return m.group(1), m.group(2)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['momW'] = mcp.safe_get_float_list(name, 'momW')
dic['momB'] = mcp.safe_get_float(name, 'momB')
dic['superEps'] = mcp.safe_get_float(name, 'superEps', default=0.0)
dic['superMom'] = mcp.safe_get_float(name, 'superMom', default=0.0)
dic['wc'] = mcp.safe_get_float_list(name, 'wc', default=[0.0] * len(dic['inputs']))
dic['wball'] = mcp.safe_get_float_list(name, 'wball', default=[0.0] * len(dic['inputs']))
self.verify_num_params(['momW', 'wc', 'wball'])
# dic['wballNormed'] = [wball * nweights for wball,nweights in zip(dic['wball'], dic['weightsPerFilter'])]
dic['wballNormed'] = dic['wball']
# Convert from old-style 0.001,0.02 hyperparam specification to new-stye
# const[base=0.001],const[base=0.02] and so forth
def convert_scalars_to_schedules(scalars):
parts = scalars.split(',')
for i,p in enumerate(parts):
p = p.strip()
if re.match('(?:\d*\.)?\d+$', p):
parts[i] = 'const[base=%s]' % p
return parts
dic['epsW'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsW')), lrs_parsers, 'epsW', 'learning rate schedule', num_params=len(dic['inputs']))
dic['epsB'] = self.parse_params(convert_scalars_to_schedules(mcp.safe_get(name, 'epsB')), lrs_parsers, 'epsB', 'learning rate schedule', num_params=1)[0]
dic['updatePeriod'] = mcp.safe_get_int(name, 'updatePeriod', default=0) # 0 means update as often as possible
# TODO: assert that updatePeriod is a multiple of active pass period, which is unknown here.
# the assert has to go in some post-processing step..
dic['gradConsumer'] = dic['epsB']['params']['base'] > 0 or any(w['params']['base'] > 0 for w in dic['epsW'])
@staticmethod
def unshare_weights(layer, layers, matrix_idx=None):
def unshare(layer, layers, indices):
for i in indices:
if layer['weightSourceLayers'][i] >= 0:
src_matrix_idx = layer['weightSourceMatrixIndices'][i]
layer['weightSourceLayers'][i] = ""
layer['weightSourceMatrixIndices'][i] = -1
layer['weights'][i] = layer['weights'][i].copy()
layer['weightsInc'][i] = n.zeros_like(layer['weights'][i])
print "Unshared weight matrix %s[%d] from %s[%d]." % (layer['name'], i, layer['weightSourceLayers'][i], src_matrix_idx)
else:
print "Weight matrix %s[%d] already unshared." % (layer['name'], i)
if 'weightSourceLayers' in layer:
unshare(layer, layers, range(len(layer['inputs'])) if matrix_idx is None else [matrix_idx])
# Load weight/biases initialization module
def call_init_func(self, param_name, shapes, input_idx=-1):
dic = self.dic
func_pat = re.compile('^([^\.]+)\.([^\(\)]+)\s*(?:\(([^,]+(?:,[^,]+)*)\))?$')
m = func_pat.match(dic[param_name])
if not m:
raise LayerParsingError("Layer '%s': '%s' parameter must have format 'moduleName.functionName(param1,param2,...)'; got: %s." % (dic['name'], param_name, dic['initWFunc']))
module, func = m.group(1), m.group(2)
params = m.group(3).split(',') if m.group(3) is not None else []
try:
mod = __import__(module)
return getattr(mod, func)(dic['name'], input_idx, shapes, params=params) if input_idx >= 0 else getattr(mod, func)(dic['name'], shapes, params=params)
except (ImportError, AttributeError, TypeError), e:
raise LayerParsingError("Layer '%s': %s." % (dic['name'], e))
def make_weights(self, initW, rows, cols, order='C'):
dic = self.dic
dic['weights'], dic['weightsInc'] = [], []
if dic['initWFunc']: # Initialize weights from user-supplied python function
# Initialization function is supplied in the format
# module.func
for i in xrange(len(dic['inputs'])):
dic['weights'] += [self.call_init_func('initWFunc', (rows[i], cols[i]), input_idx=i)]
if type(dic['weights'][i]) != n.ndarray:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], i, dic['initWFunc'], type(dic['weights'][i])))
if dic['weights'][i].dtype != n.float32:
raise LayerParsingError("Layer '%s[%d]': weight initialization function %s must weight matrices consisting of single-precision floats. Got: %s." % (dic['name'], i, dic['initWFunc'], dic['weights'][i].dtype))
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s[%d]': weight matrix returned by weight initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], i, dic['initWFunc'], (rows[i], cols[i]), dic['weights'][i].shape))
# Convert to desired order
dic['weights'][i] = n.require(dic['weights'][i], requirements=order)
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
print "Layer '%s[%d]' initialized weight matrices from function %s" % (dic['name'], i, dic['initWFunc'])
else:
for i in xrange(len(dic['inputs'])):
if dic['weightSourceLayers'][i] != '': # Shared weight matrix
src_layer = self.prev_layers[dic['weightSourceLayers'][i]] if dic['weightSourceLayers'][i] != dic['name'] else dic
dic['weights'] += [src_layer['weights'][dic['weightSourceMatrixIndices'][i]]]
dic['weightsInc'] += [src_layer['weightsInc'][dic['weightSourceMatrixIndices'][i]]]
if dic['weights'][i].shape != (rows[i], cols[i]):
raise LayerParsingError("Layer '%s': weight sharing source matrix '%s' has shape %dx%d; should be %dx%d."
% (dic['name'], dic['weightSource'][i], dic['weights'][i].shape[0], dic['weights'][i].shape[1], rows[i], cols[i]))
print "Layer '%s' initialized weight matrix %d from %s" % (dic['name'], i, dic['weightSource'][i])
else:
dic['weights'] += [n.array(initW[i] * nr.randn(rows[i], cols[i]), dtype=n.single, order=order)]
dic['weightsInc'] += [n.zeros_like(dic['weights'][i])]
def make_biases(self, rows, cols, order='C'):
dic = self.dic
if dic['initBFunc']:
dic['biases'] = self.call_init_func('initBFunc', (rows, cols))
if type(dic['biases']) != n.ndarray:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object. Got: %s." % (dic['name'], dic['initBFunc'], type(dic['biases'])))
if dic['biases'].dtype != n.float32:
raise LayerParsingError("Layer '%s': bias initialization function %s must return numpy.ndarray object consisting of single-precision floats. Got: %s." % (dic['name'], dic['initBFunc'], dic['biases'].dtype))
if dic['biases'].shape != (rows, cols):
raise LayerParsingError("Layer '%s': bias vector returned by bias initialization function %s has wrong shape. Should be: %s; got: %s." % (dic['name'], dic['initBFunc'], (rows, cols), dic['biases'].shape))
dic['biases'] = n.require(dic['biases'], requirements=order)
print "Layer '%s' initialized bias vector from function %s" % (dic['name'], dic['initBFunc'])
else:
dic['biases'] = dic['initB'] * n.ones((rows, cols), order=order, dtype=n.single)
dic['biasesInc'] = n.zeros_like(dic['biases'])
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['gradConsumer'] = True
dic['usesActs'] = False
dic['initW'] = mcp.safe_get_float_list(name, 'initW', default=0.01)
dic['initB'] = mcp.safe_get_float(name, 'initB', default=0)
dic['initWFunc'] = mcp.safe_get(name, 'initWFunc', default="")
dic['initBFunc'] = mcp.safe_get(name, 'initBFunc', default="")
# Find shared weight matrices
dic['weightSource'] = mcp.safe_get_list(name, 'weightSource', default=[''] * len(dic['inputs']))
self.verify_num_params(['initW'])
self.verify_num_params(['weightSource'], auto_expand=False)
dic['weightSourceLayers'] = []
dic['weightSourceMatrixIndices'] = []
for i, src_name in enumerate(dic['weightSource']):
src_layer_matrix_idx = -1
src_layer_name = ''
if src_name != '':
src_layer_match = WeightLayerParser.get_layer_name(src_name)
if src_layer_match is None:
raise LayerParsingError("Layer '%s': unable to parse weight sharing source '%s'. Format is layer[idx] or just layer, in which case idx=0 is used." % (name, src_name))
src_layer_name = src_layer_match[0]
src_layer_matrix_idx = int(src_layer_match[1]) if src_layer_match[1] is not None else 0
if src_layer_name not in prev_layers and src_layer_name != name:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' does not exist." % (name, src_layer_name))
# src_layer_idx = prev_names.index(src_layer_name) if src_layer_name != name else len(prev_names)
src_layer = prev_layers[src_layer_name] if src_layer_name != name else dic
if src_layer['gpu'] != dic['gpu']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' runs on GPUs %s, while '%s' runs on GPUs %s." % (name, src_layer_name, src_layer['gpu'], name, dic['gpu']))
if src_layer['type'] != dic['type']:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' is of type '%s'; should be '%s'." % (name, src_layer_name, src_layer['type'], dic['type']))
if src_layer_name != name and len(src_layer['weights']) <= src_layer_matrix_idx:
raise LayerParsingError("Layer '%s': weight sharing source layer '%s' has %d weight matrices, but '%s[%d]' requested." % (name, src_layer_name, len(src_layer['weights']), src_name, src_layer_matrix_idx))
if src_layer_name == name and src_layer_matrix_idx >= i:
raise LayerParsingError("Layer '%s': weight sharing source '%s[%d]' not defined yet." % (name, name, src_layer_matrix_idx))
dic['weightSourceLayers'] += [src_layer_name]
dic['weightSourceMatrixIndices'] += [src_layer_matrix_idx]
return dic
class FCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = mcp.safe_get_int(name, 'outputs')
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['outputs'], 'outputs', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
print "Initialized fully-connected layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class SplitFCLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['parts'] = mcp.safe_get_int(name, 'parts')
dic['outputs'] = mcp.safe_get_int(name, 'outputs') * dic['parts']
dic['weightsPerFilter'] = dic['numInputs']
self.verify_num_range(dic['parts'], 'parts', 1, None)
self.make_weights(dic['initW'], dic['numInputs'], [dic['outputs']/dic['parts']] * len(dic['numInputs']), order='F')
self.make_biases(1, dic['outputs'], order='F')
for i in xrange(len(dic['numInputs'])):
self.verify_divisible(dic['numInputs'][i], dic['parts'], 'numInputs', 'parts', input_idx=i)
print "Initialized split fully-connected layer '%s' on GPUs %s, producing %d outputs in %d parts" % (name, dic['gpus'], dic['outputs'], dic['parts'])
return dic
class LocalLayerParser(WeightLayerParser):
def __init__(self):
WeightLayerParser.__init__(self)
# Convert convolutional layer to unshared, locally-connected layer
@staticmethod
def conv_to_local(layers, lname):
layer = layers[lname]
if layer['type'] == 'conv':
layer['type'] = 'local'
for inp,inpname in enumerate(layer['inputs']):
src_layer_name = layer['weightSourceLayers'][inp]
if src_layer_name != '':
src_layer = layers[src_layer_name]
src_matrix_idx = layer['weightSourceMatrixIndices'][inp]
LocalLayerParser.conv_to_local(layers, src_layer_name)
for w in ('weights', 'weightsInc'):
layer[w][inp] = src_layer[w][src_matrix_idx]
else:
layer['weights'][inp] = n.require(n.reshape(n.tile(n.reshape(layer['weights'][inp], (1, n.prod(layer['weights'][inp].shape))), (layer['modules'], 1)),
(layer['modules'] * layer['filterChannels'][inp] * layer['filterPixels'][inp], layer['filters'])),
requirements='C')
layer['weightsInc'][inp] = n.zeros_like(layer['weights'][inp])
if layer['sharedBiases']:
layer['biases'] = n.require(n.repeat(layer['biases'], layer['modules'], axis=0), requirements='C')
layer['biasesInc'] = n.zeros_like(layer['biases'])
print "Converted layer '%s' from convolutional to unshared, locally-connected" % layer['name']
# Also call this function on any layers sharing my weights
for l in layers:
if 'weightSourceLayers' in l and lname in l['weightSourceLayers']:
LocalLayerParser.conv_to_local(layers, l)
return layer
def parse(self, name, mcp, prev_layers, model):
dic = WeightLayerParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['usesActs'] = False
# Supplied values
dic['channels'] = mcp.safe_get_int_list(name, 'channels')
dic['padding'] = mcp.safe_get_int_list(name, 'padding', default=[0]*len(dic['inputs']))
dic['stride'] = mcp.safe_get_int_list(name, 'stride', default=[1]*len(dic['inputs']))
dic['filterSize'] = mcp.safe_get_int_list(name, 'filterSize')
dic['filters'] = mcp.safe_get_int_list(name, 'filters')
dic['groups'] = mcp.safe_get_int_list(name, 'groups', default=[1]*len(dic['inputs']))
dic['initW'] = mcp.safe_get_float_list(name, 'initW')
dic['initCFunc'] = mcp.safe_get(name, 'initCFunc', default='')
dic['modulesX'] = mcp.safe_get_int(name, 'modulesX', default=0)
self.verify_num_params(['channels', 'padding', 'stride', 'filterSize', \
'filters', 'groups', 'initW'])
self.verify_num_range(dic['stride'], 'stride', 1, None)
self.verify_num_range(dic['filterSize'],'filterSize', 1, None)
self.verify_num_range(dic['padding'], 'padding', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['groups'], 'groups', 1, None)
self.verify_num_range(dic['modulesX'], 'modulesX', 0, None)
for i in xrange(len(dic['filters'])):
self.verify_divisible(dic['filters'][i], 16, 'filters', input_idx=i)
# Computed values
dic['imgPixels'] = [numInputs/channels for numInputs,channels in zip(dic['numInputs'], dic['channels'])]
dic['imgSize'] = [int(n.sqrt(imgPixels)) for imgPixels in dic['imgPixels']]
self.verify_num_range(dic['imgSize'], 'imgSize', 1, None)
dic['filters'] = [filters*groups for filters,groups in zip(dic['filters'], dic['groups'])]
dic['filterPixels'] = [filterSize**2 for filterSize in dic['filterSize']]
if dic['modulesX'] <= 0:
dic['modulesX'] = [1 + int(ceil((2*padding + imgSize - filterSize) / float(stride))) for padding,imgSize,filterSize,stride in zip(dic['padding'], dic['imgSize'], dic['filterSize'], dic['stride'])]
else:
dic['modulesX'] = [dic['modulesX']] * len(dic['inputs'])
dic['filterChannels'] = [channels/groups for channels,groups in zip(dic['channels'], dic['groups'])]
if len(set(dic['modulesX'])) != 1 or len(set(dic['filters'])) != 1:
raise LayerParsingError("Layer '%s': all inputs must produce equally-dimensioned output. Dimensions are: %s." % (name, ", ".join("%dx%dx%d" % (filters, modulesX, modulesX) for filters,modulesX in zip(dic['filters'], dic['modulesX']))))
dic['modulesX'] = dic['modulesX'][0]
dic['modules'] = dic['modulesX']**2
dic['filters'] = dic['filters'][0]
dic['outputs'] = dic['modules'] * dic['filters']
# dic['filterConns'] = [[]] * len(dic['inputs'])
for i in xrange(len(dic['inputs'])):
if dic['numInputs'][i] % dic['imgPixels'][i] != 0 or dic['imgSize'][i] * dic['imgSize'][i] != dic['imgPixels'][i]:
raise LayerParsingError("Layer '%s[%d]': has %-d dimensional input, not interpretable as square %d-channel images" % (name, i, dic['numInputs'][i], dic['channels'][i]))
if dic['channels'][i] > 3 and dic['channels'][i] % 4 != 0:
raise LayerParsingError("Layer '%s[%d]': number of channels must be smaller than 4 or divisible by 4" % (name, i))
# if dic['filterSize'][i] > totalPadding[i] + dic['imgSize'][i]:
# raise LayerParsingError("Layer '%s[%d]': filter size (%d) greater than image size + padding (%d)" % (name, i, dic['filterSize'][i], dic['padding'][i] + dic['imgSize'][i]))
if -dic['padding'][i] + dic['stride'][i] * (dic['modulesX'] - 1) + dic['filterSize'][i] < dic['imgSize'][i]:
raise LayerParsingError("Layer '%s[%d]': %dx%d output map with padding=%d, stride=%d does not cover entire input image." % (name, i, dic['modulesX'], dic['outputsX'], dic['padding'][i], dic['stride'][i]))
if dic['groups'][i] > 1:
self.verify_divisible(dic['channels'][i], 4*dic['groups'][i], 'channels', '4 * groups', input_idx=i)
self.verify_divisible(dic['channels'][i], dic['groups'][i], 'channels', 'groups', input_idx=i)
self.verify_divisible(dic['filters'], 16*dic['groups'][i], 'filters * groups', input_idx=i)
dic['padding'][i] = -dic['padding'][i]
# dic['overSample'] = [groups*filterChannels/channels for groups,filterChannels,channels in zip(dic['groups'], dic['filterChannels'], dic['channels'])]
dic['weightsPerFilter'] = [fc * (fz**2) for fc, fz in zip(dic['filterChannels'], dic['filterSize'])]
return dic
class ConvLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def add_params(self, mcp):
LocalLayerParser.add_params(self, mcp)
self.dic['wcNormMax'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMax', default=[0.0] * len(self.dic['inputs']))
self.dic['wcNormMin'] = mcp.safe_get_float_list(self.dic['name'], 'wcNormMin', default=[0.0] * len(self.dic['inputs']))
self.verify_num_params(['wcNormMax', 'wcNormMin'])
for min,max in zip(self.dic['wcNormMin'], self.dic['wcNormMax']):
if min > max:
raise LayerParsingError("Layer '%s': wcNormMin must be <= wcNormMax." % (self.dic['name']))
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
dic['sumWidth'] = mcp.safe_get_int(name, 'sumWidth')
dic['sharedBiases'] = mcp.safe_get_bool(name, 'sharedBiases', default=True)
num_biases = dic['filters'] if dic['sharedBiases'] else dic['modules']*dic['filters']
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
self.make_weights(dic['initW'], eltmult(dic['filterPixels'], dic['filterChannels']), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(num_biases, 1, order='C')
print "Initialized convolutional layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class LocalUnsharedLayerParser(LocalLayerParser):
def __init__(self):
LocalLayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LocalLayerParser.parse(self, name, mcp, prev_layers, model)
eltmult = lambda list1, list2: [l1 * l2 for l1,l2 in zip(list1, list2)]
scmult = lambda x, lst: [x * l for l in lst]
self.make_weights(dic['initW'], scmult(dic['modules'], eltmult(dic['filterPixels'], dic['filterChannels'])), [dic['filters']] * len(dic['inputs']), order='C')
self.make_biases(dic['modules'] * dic['filters'], 1, order='C')
print "Initialized locally-connected layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (name, dic['gpus'], dic['modulesX'], dic['modulesX'], dic['filters'])
return dic
class DataLayerParser(LayerParser):
def __init__(self):
LayerParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerParser.parse(self, name, mcp, prev_layers, model)
dic['dataIdx'] = mcp.safe_get_int(name, 'dataIdx')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['end'] = mcp.safe_get_int(name, 'end', default=model.train_data_provider.get_data_dims(idx=dic['dataIdx']))
dic['outputs'] = dic['end'] - dic['start']
# dic['usesActs'] = False
print "Initialized data layer '%s', producing %d outputs" % (name, dic['outputs'])
return dic
class SoftmaxLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = dic['inputLayers'][0]['outputs']
print "Initialized softmax layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class ConcatentionLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized concatenation layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PassThroughLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self)
# Note: this doesn't verify all the necessary constraints. Layer construction may still fail in C++ code.
# For example, it does not verify that every layer only has one pass-through parent. Obviously having
# two such parents is incoherent.
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
# if len(dic['inputLayers']) == 1:
# raise LayerParsingError("Layer %s: pass-through layer must have more than one input." % dic['name'])
if len(dic['gpu']) != len(dic['inputLayers'][0]['gpu']):
raise LayerParsingError("Layer '%s': number of replicas in pass-through layer must be equivalent to number of replicas in input layers." % dic['name'])
for inp in dic['inputLayers']:
conflicting_layers = [l for l in prev_layers.values() if l['type'] == 'pass' and inp['name'] in l['inputs'] and len(set(dic['gpu']).intersection(set(l['gpu']))) > 0]
if len(conflicting_layers) > 0:
raise LayerParsingError("Layer '%s' conflicts with layer '%s'. Both pass-through layers take layer '%s' as input and operate on an overlapping set of GPUs." % (dic['name'], conflicting_layers[0]['name'], inp['name']))
dic['outputs'] = sum(l['outputs'] for l in dic['inputLayers'])
# dic['copyOffsets'] = [sum(dic['inputLayers'][j]['outputs'] for j in xrange(i)) for i in xrange(len(dic['inputLayers']))]
print "Initialized pass-through layer '%s' on GPUs %s, producing %d outputs" % (name, dic['gpus'], dic['outputs'])
return dic
class PoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['sizeX'] = mcp.safe_get_int(name, 'sizeX')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputsX'] = mcp.safe_get_int(name, 'outputsX', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
# Avg pooler does not use its acts or inputs
dic['usesActs'] = dic['pool'] != 'avg'
dic['usesInputs'] = dic['pool'] != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
if dic['pool'] == 'avg':
dic['sum'] = mcp.safe_get_bool(name, 'sum', default=False)
self.verify_num_range(dic['sizeX'], 'sizeX', 1, dic['imgSize'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['sizeX'])
self.verify_num_range(dic['outputsX'], 'outputsX', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
if LayerWithInputParser.grad_consumers_below(dic):
self.verify_divisible(dic['channels'], 16, 'channels')
self.verify_str_in(dic['pool'], 'pool', ['max', 'maxabs', 'avg'])
self.verify_img_size()
if dic['outputsX'] <= 0:
dic['outputsX'] = int(ceil((dic['imgSize'] - dic['start'] - dic['sizeX']) / float(dic['stride']))) + 1;
dic['outputs'] = dic['outputsX']**2 * dic['channels']
print "Initialized %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['outputsX'], dic['outputsX'], dic['channels'])
return dic
class CrossMapPoolLayerParser(LayerWithInputParser):
def __init__(self):
LayerWithInputParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['start'] = mcp.safe_get_int(name, 'start', default=0)
dic['stride'] = mcp.safe_get_int(name, 'stride')
dic['outputChannels'] = mcp.safe_get_int(name, 'outputs', default=0)
dic['pool'] = mcp.safe_get(name, 'pool')
dic['requiresParams'] = False
# Avg pooler does not use its acts or inputs
dic['usesActs'] = 'pool' != 'avg'
dic['usesInputs'] = 'pool' != 'avg'
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
dic['outputs'] = dic['outputChannels'] * dic['imgPixels']
self.verify_num_range(dic['size'], 'size', 1, dic['channels'])
self.verify_num_range(dic['stride'], 'stride', 1, dic['size'])
self.verify_num_range(dic['outputChannels'], 'outputChannels', 0, None)
self.verify_num_range(dic['channels'], 'channels', 1, None)
self.verify_num_range(dic['start'], 'start', None, 0)
self.verify_str_in(dic['pool'], 'pool', ['max'])
self.verify_img_size()
covered_chans = dic['start'] + (dic['outputChannels'] - 1) * dic['stride'] + dic['size']
if covered_chans < dic['channels']:
raise LayerParsingError("Layer '%s': cross-map pooling with start=%d, stride=%d, size=%d, outputs=%d covers only %d of %d input channels." % \
(name, dic['start'], dic['stride'], dic['size'], dic['outputChannels'], covered_chans, dic['channels']))
print "Initialized cross-map %s-pooling layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (dic['pool'], name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['outputChannels'])
return dic
class NormLayerParser(LayerWithInputParser):
RESPONSE_NORM = 'response'
CONTRAST_NORM = 'contrast'
CROSSMAP_RESPONSE_NORM = 'cross-map response'
def __init__(self, norm_type):
LayerWithInputParser.__init__(self, num_inputs=1)
self.norm_type = norm_type
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['scale'] = mcp.safe_get_float(name, 'scale')
dic['scale'] /= dic['size'] if self.norm_type == self.CROSSMAP_RESPONSE_NORM else dic['size']**2
dic['pow'] = mcp.safe_get_float(name, 'pow')
dic['minDiv'] = mcp.safe_get_float(name, 'minDiv', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
dic['channels'] = mcp.safe_get_int(name, 'channels')
dic['size'] = mcp.safe_get_int(name, 'size')
dic['blocked'] = mcp.safe_get_bool(name, 'blocked', default=False)
dic['imgPixels'] = dic['numInputs'][0] / dic['channels']
dic['imgSize'] = int(n.sqrt(dic['imgPixels']))
# Contrast normalization layer does not use its inputs
dic['usesInputs'] = self.norm_type != self.CONTRAST_NORM
self.verify_num_range(dic['channels'], 'channels', 1, None)
if self.norm_type == self.CROSSMAP_RESPONSE_NORM:
self.verify_num_range(dic['size'], 'size', 2, dic['channels'])
if dic['channels'] % 16 != 0:
raise LayerParsingError("Layer '%s': number of channels must be divisible by 16 when using crossMap" % name)
else:
self.verify_num_range(dic['size'], 'size', 1, dic['imgSize'])
if self.norm_type != self.CROSSMAP_RESPONSE_NORM and dic['channels'] > 3 and dic['channels'] % 4 != 0:
raise LayerParsingError("Layer '%s': number of channels must be smaller than 4 or divisible by 4" % name)
self.verify_img_size()
dic['outputs'] = dic['imgPixels'] * dic['channels']
print "Initialized %s-normalization layer '%s' on GPUs %s, producing %dx%d %d-channel output" % (self.norm_type, name, dic['gpus'], dic['imgSize'], dic['imgSize'], dic['channels'])
return dic
class CostParser(LayerWithInputParser):
def __init__(self, num_inputs=-1):
LayerWithInputParser.__init__(self, num_inputs=num_inputs)
def parse(self, name, mcp, prev_layers, model):
dic = LayerWithInputParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
# Stored as string because python can't pickle lambda functions
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs]'
dic['children'] = mcp.safe_get_list(name, 'children', default=[])
# Aggregated costs only produce outputs which are additive.
for c in dic['children']:
if c not in prev_layers:
raise LayerParsingError("Layer '%s': child cost layer '%s' not defined" % (name, c))
if prev_layers[c]['type'] != dic['type']:
raise LayerParsingError("Layer '%s': child cost layer '%s' must have same type as parent" % (name, c))
prev_layers[c]['aggregated'] = 1
dic['aggregated'] = dic['children'] != []
del dic['neuron']
return dic
def add_params(self, mcp):
LayerWithInputParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['coeff'] = mcp.safe_get_float(name, 'coeff')
dic['gradConsumer'] = dic['coeff'] > 0
class CrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': Second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': Softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class LogregCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
dic, name = self.dic, self.dic['name']
dic['topk'] = mcp.safe_get_int(name, 'topk', default=1)
if dic['topk'] > dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': parameter 'topk'must not have value greater than the number of classess." % (name))
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
dic['requiresParams'] = True
if dic['numInputs'][0] != 1: # first input must be labels
raise LayerParsingError("Layer '%s': dimensionality of first input must be 1" % name)
if dic['inputLayers'][1]['type'] != 'softmax':
raise LayerParsingError("Layer '%s': second input must be softmax layer" % name)
if dic['numInputs'][1] != model.train_data_provider.get_num_classes():
raise LayerParsingError("Layer '%s': softmax input '%s' must produce %d outputs, because that is the number of classes in the dataset" \
% (name, dic['inputs'][1], model.train_data_provider.get_num_classes()))
print "Initialized logistic regression cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class BinomialCrossEntCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=2)
def add_params(self, mcp):
CostParser.add_params(self, mcp)
self.dic['posWeight'] = mcp.safe_get_float(self.dic['name'], 'posWeight', default=1.0)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != dic['numInputs'][1]:
raise LayerParsingError("Layer '%s': both inputs must produce the same number of outputs" % (name))
if 'neuron' not in dic['inputLayers'][1] or dic['inputLayers'][1]['neuron'] != 'logistic':
print "WARNING: Layer '%s': input '%s' is not logistic, results may not be what you intend." % (dic['name'], dic['inputs'][1])
if dic['type'] == 'cost.bce':
print "Initialized binomial cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
dic['computeSoftmaxErrorRate'] = True
return dic
class DetectionCrossEntCostParser(BinomialCrossEntCostParser):
def __init__(self):
BinomialCrossEntCostParser.__init__(self)
def parse(self, name, mcp, prev_layers, model):
dic = BinomialCrossEntCostParser.parse(self, name, mcp, prev_layers, model)
if dic['numInputs'][0] != model.train_data_provider.get_num_classes(): # first input must be labels
raise LayerParsingError("Layer '%s': Dimensionality of first input must be equal to number of labels" % name)
dic['computeSoftmaxErrorRate'] = False
dic['outputFilter'] = 'lambda costs,num_cases: [c/num_cases for c in costs[:2]] + [(class_cost[2] / class_cost[j] if class_cost[j] > 0 else n.inf) for class_cost in [costs[2:][i*3:(i+1)*3] for i in range(len(costs[2:])/3)] for j in range(2)]'
dic['outputFilterFormatter'] = 'lambda self,costs: "(crossent) %.6f, (err) %.6f, " % (costs[0], costs[1]) + ", ".join("(%s) %.6f, %.6f" % (self.train_data_provider.batch_meta["label_names"][i/2-1],costs[i],costs[i+1]) for i in xrange(2, len(costs), 2))'
print "Initialized detection cross-entropy cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
class SumOfSquaresCostParser(CostParser):
def __init__(self):
CostParser.__init__(self, num_inputs=1)
def parse(self, name, mcp, prev_layers, model):
dic = CostParser.parse(self, name, mcp, prev_layers, model)
print "Initialized sum-of-squares cost '%s' on GPUs %s" % (name, dic['gpus'])
return dic
# All the layer parsers
layer_parsers = {'data' : lambda : DataLayerParser(),
'fc': lambda : FCLayerParser(),
'sfc': lambda : SplitFCLayerParser(),
'conv': lambda : ConvLayerParser(),
'local': lambda : LocalUnsharedLayerParser(),
'softmax': lambda : SoftmaxLayerParser(),
'eltsum': lambda : EltwiseSumLayerParser(),
'eltmax': lambda : EltwiseMaxLayerParser(),
'sum': lambda : SumLayerParser(),
'neuron': lambda : NeuronLayerParser(),
'pool': lambda : PoolLayerParser(),
'cmpool': lambda : CrossMapPoolLayerParser(),
'rnorm': lambda : NormLayerParser(NormLayerParser.RESPONSE_NORM),
'cnorm': lambda : NormLayerParser(NormLayerParser.CONTRAST_NORM),
'cmrnorm': lambda : NormLayerParser(NormLayerParser.CROSSMAP_RESPONSE_NORM),
'nailbed': lambda : NailbedLayerParser(),
'blur': lambda : GaussianBlurLayerParser(),
'href': lambda : HorizontalReflectionLayerParser(),
'resize': lambda : ResizeLayerParser(),
'rgb2yuv': lambda : RGBToYUVLayerParser(),
'rgb2lab': lambda : RGBToLABLayerParser(),
'rscale': lambda : RandomScaleLayerParser(),
'crop': lambda : CropLayerParser(),
'concat': lambda : ConcatentionLayerParser(),
'pass': lambda : PassThroughLayerParser(),
'dropout': lambda : DropoutLayerParser(),
'dropout2': lambda : Dropout2LayerParser(),
'cost.logreg': lambda : LogregCostParser(),
'cost.crossent': lambda : CrossEntCostParser(),
'cost.bce': lambda : BinomialCrossEntCostParser(),
'cost.dce': lambda : DetectionCrossEntCostParser(),
'cost.sum2': lambda : SumOfSquaresCostParser()}
# All the neuron parsers
# This isn't a name --> parser mapping as the layer parsers above because neurons don't have fixed names.
# A user may write tanh[0.5,0.25], etc.
neuron_parsers = sorted([NeuronParser('ident', 'f(x) = x', uses_acts=False, uses_inputs=False),
NeuronParser('logistic', 'f(x) = 1 / (1 + e^-x)', uses_acts=True, uses_inputs=False),
NeuronParser('abs', 'f(x) = |x|', uses_acts=False, uses_inputs=True),
NeuronParser('relu', 'f(x) = max(0, x)', uses_acts=True, uses_inputs=False),
NeuronParser('nrelu', 'f(x) = max(0, x) + noise', uses_acts=True, uses_inputs=False),
NeuronParser('softrelu', 'f(x) = log(1 + e^x)', uses_acts=True, uses_inputs=False),
NeuronParser('square', 'f(x) = x^2', uses_acts=False, uses_inputs=True),
NeuronParser('sqrt', 'f(x) = sqrt(x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('log[a]', 'f(x) = log(a + x)', uses_acts=False, uses_inputs=True),
ParamNeuronParser('tanh[a,b]', 'f(x) = a * tanh(b * x)', uses_acts=True, uses_inputs=False),
ParamNeuronParser('brelu[a]', 'f(x) = min(a, max(0, x))', uses_acts=True, uses_inputs=False),
ParamNeuronParser('linear[a,b]', 'f(x) = a * x + b', uses_acts=True, uses_inputs=False),
ParamNeuronParser('drelu[a]', 'f(x) = x - a * tanh(x / a)', uses_acts=False, uses_inputs=True)],
key=lambda x:x.type)
# Learning rate schedules
lrs_parsers = sorted([ParamParser('const[fbase]'),
ParamParser('linear[fbase;ftgtFactor]'),
ParamParser('exp[fbase;ftgtFactor]'),
ParamParser('dexp[fbase;ftgtFactor;inumSteps]')])
|
jonparrott/gcloud-python
|
refs/heads/master
|
websecurityscanner/google/cloud/websecurityscanner_v1alpha/proto/crawled_url_pb2_grpc.py
|
591
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
codito/pomito
|
refs/heads/master
|
tests/plugins/ui/test_timer_window.py
|
1
|
# -*- coding: utf-8 -*-
"""Tests for the timer window."""
import pytest
from pomito.plugins.ui.qt.timer_window import TimerWindow
from pomito.test import PomitoTestFactory, FakeKeyBinder
@pytest.fixture(scope="function")
def timer_window(qtbot):
factory = PomitoTestFactory()
pomodoro_service = factory.create_fake_service()
timer = TimerWindow(pomodoro_service, FakeKeyBinder())
qtbot.addWidget(timer)
return timer
@pytest.mark.integration
def test_timer_window_pomodoro_session(qtbot, timer_window):
# with qtbot.waitSignal(task_window.task_selected):
# task_window.get_task()
# assert task_window.list_task is not None
pass
|
nagyistoce/edx-platform
|
refs/heads/master
|
openedx/core/lib/tests/test_course_tabs.py
|
116
|
""" Tests of specific tabs. """
from mock import patch, Mock
from unittest import TestCase
import xmodule.tabs as xmodule_tabs
from openedx.core.lib.course_tabs import CourseTabPluginManager
class CourseTabPluginManagerTestCase(TestCase):
"""Test cases for CourseTabPluginManager class"""
@patch('openedx.core.lib.course_tabs.CourseTabPluginManager.get_available_plugins')
def test_get_tab_types(self, get_available_plugins):
"""
Verify that get_course_view_types sorts appropriately
"""
def create_mock_plugin(tab_type, priority):
""" Create a mock plugin with the specified name and priority. """
mock_plugin = Mock()
mock_plugin.type = tab_type
mock_plugin.priority = priority
return mock_plugin
mock_plugins = {
"Last": create_mock_plugin(tab_type="Last", priority=None),
"Duplicate1": create_mock_plugin(tab_type="Duplicate", priority=None),
"Duplicate2": create_mock_plugin(tab_type="Duplicate", priority=None),
"First": create_mock_plugin(tab_type="First", priority=1),
"Second": create_mock_plugin(tab_type="Second", priority=1),
"Third": create_mock_plugin(tab_type="Third", priority=3),
}
get_available_plugins.return_value = mock_plugins
self.assertEqual(
[plugin.type for plugin in CourseTabPluginManager.get_tab_types()],
["First", "Second", "Third", "Duplicate", "Duplicate", "Last"]
)
class KeyCheckerTestCase(TestCase):
"""Test cases for KeyChecker class"""
def setUp(self):
super(KeyCheckerTestCase, self).setUp()
self.valid_keys = ['a', 'b']
self.invalid_keys = ['a', 'v', 'g']
self.dict_value = {'a': 1, 'b': 2, 'c': 3}
def test_key_checker(self):
self.assertTrue(xmodule_tabs.key_checker(self.valid_keys)(self.dict_value, raise_error=False))
self.assertFalse(xmodule_tabs.key_checker(self.invalid_keys)(self.dict_value, raise_error=False))
with self.assertRaises(xmodule_tabs.InvalidTabsException):
xmodule_tabs.key_checker(self.invalid_keys)(self.dict_value)
class NeedNameTestCase(TestCase):
"""Test cases for NeedName validator"""
def setUp(self):
super(NeedNameTestCase, self).setUp()
self.valid_dict1 = {'a': 1, 'name': 2}
self.valid_dict2 = {'name': 1}
self.valid_dict3 = {'a': 1, 'name': 2, 'b': 3}
self.invalid_dict = {'a': 1, 'b': 2}
def test_need_name(self):
self.assertTrue(xmodule_tabs.need_name(self.valid_dict1))
self.assertTrue(xmodule_tabs.need_name(self.valid_dict2))
self.assertTrue(xmodule_tabs.need_name(self.valid_dict3))
with self.assertRaises(xmodule_tabs.InvalidTabsException):
xmodule_tabs.need_name(self.invalid_dict)
|
rahatm1/kinect-drone
|
refs/heads/master
|
libardrone/demo.py
|
3
|
#!/usr/bin/env python
# Copyright (c) 2011 Bastian Venthur
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Demo app for the AR.Drone.
This simple application allows to control the drone and see the drone's video
stream.
"""
import pygame
import pygame.surfarray
import pygame.transform
import libardrone
def main():
pygame.init()
W, H = 320, 240
screen = pygame.display.set_mode((W, H))
drone = libardrone.ARDrone(True)
drone.reset()
clock = pygame.time.Clock()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYUP:
drone.hover()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
drone.reset()
running = False
# takeoff / land
elif event.key == pygame.K_RETURN:
print("return")
drone.takeoff()
elif event.key == pygame.K_SPACE:
print("space")
drone.land()
# emergency
elif event.key == pygame.K_BACKSPACE:
drone.reset()
# forward / backward
elif event.key == pygame.K_w:
drone.move_forward()
elif event.key == pygame.K_s:
drone.move_backward()
# left / right
elif event.key == pygame.K_a:
drone.move_left()
elif event.key == pygame.K_d:
drone.move_right()
# up / down
elif event.key == pygame.K_UP:
drone.move_up()
elif event.key == pygame.K_DOWN:
drone.move_down()
# turn left / turn right
elif event.key == pygame.K_LEFT:
drone.turn_left()
elif event.key == pygame.K_RIGHT:
drone.turn_right()
# speed
elif event.key == pygame.K_1:
drone.speed = 0.1
elif event.key == pygame.K_2:
drone.speed = 0.2
elif event.key == pygame.K_3:
drone.speed = 0.3
elif event.key == pygame.K_4:
drone.speed = 0.4
elif event.key == pygame.K_5:
drone.speed = 0.5
elif event.key == pygame.K_6:
drone.speed = 0.6
elif event.key == pygame.K_7:
drone.speed = 0.7
elif event.key == pygame.K_8:
drone.speed = 0.8
elif event.key == pygame.K_9:
drone.speed = 0.9
elif event.key == pygame.K_0:
drone.speed = 1.0
try:
# print pygame.image
pixelarray = drone.get_image()
if pixelarray != None:
surface = pygame.surfarray.make_surface(pixelarray)
rotsurface = pygame.transform.rotate(surface, 270)
screen.blit(rotsurface, (0, 0))
# battery status
hud_color = (255, 0, 0) if drone.navdata.get('drone_state', dict()).get('emergency_mask', 1) else (10, 10, 255)
bat = drone.navdata.get(0, dict()).get('battery', 0)
f = pygame.font.Font(None, 20)
hud = f.render('Battery: %i%%' % bat, True, hud_color)
screen.blit(hud, (10, 10))
except:
pass
pygame.display.flip()
clock.tick(50)
pygame.display.set_caption("FPS: %.2f" % clock.get_fps())
print("Shutting down...")
drone.halt()
print("Ok.")
if __name__ == '__main__':
main()
|
muravjov/ansible
|
refs/heads/stable-1.9
|
v2/ansible/executor/play_iterator.py
|
5
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.playbook.task import Task
from ansible.utils.boolean import boolean
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_role = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
def __repr__(self):
return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_role,
self.run_state,
self.fail_state,
self.pending_setup,
)
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.cur_role = self.cur_role
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play):
# FIXME: should we save the post_validated play from below here instead?
self._play = play
# post validate the play, as we need some fields to be finalized now
# so that we can use them to setup the iterator properly
all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play)
new_play = play.copy()
new_play.post_validate(all_vars, fail_on_undefined=False)
self._blocks = new_play.compile()
self._host_states = {}
for host in inventory.get_hosts(new_play.hosts):
self._host_states[host.name] = HostState(blocks=self._blocks)
def get_host_state(self, host):
try:
return self._host_states[host.name].copy()
except KeyError:
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
def get_next_task_for_host(self, host, peek=False, lock_step=True):
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
return None
else:
while True:
try:
cur_block = s._blocks[s.cur_block]
except IndexError:
s.run_state = self.ITERATING_COMPLETE
break
if s.run_state == self.ITERATING_SETUP:
s.run_state = self.ITERATING_TASKS
if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts):
# mark the host as having gathered facts
host.set_gathered_facts(True)
task = Task()
task.action = 'setup'
task.set_loader(self._play._loader)
elif s.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if s.pending_setup:
s.pending_setup = False
if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
s.run_state = self.ITERATING_RESCUE
elif s.cur_regular_task >= len(cur_block.block):
s.run_state = self.ITERATING_ALWAYS
else:
task = cur_block.block[s.cur_regular_task]
s.cur_regular_task += 1
break
elif s.run_state == self.ITERATING_RESCUE:
if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
s.run_state = self.ITERATING_ALWAYS
elif s.cur_rescue_task >= len(cur_block.rescue):
if len(cur_block.rescue) > 0:
s.fail_state = self.FAILED_NONE
s.run_state = self.ITERATING_ALWAYS
else:
task = cur_block.rescue[s.cur_rescue_task]
s.cur_rescue_task += 1
break
elif s.run_state == self.ITERATING_ALWAYS:
if s.cur_always_task >= len(cur_block.always):
if s.fail_state != self.FAILED_NONE:
s.run_state = self.ITERATING_COMPLETE
break
else:
s.cur_block += 1
s.cur_regular_task = 0
s.cur_rescue_task = 0
s.cur_always_task = 0
s.run_state = self.ITERATING_TASKS
else:
task= cur_block.always[s.cur_always_task]
s.cur_always_task += 1
break
if task and task._role:
# if we had a current role, mark that role as completed
if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
s.cur_role._completed = True
s.cur_role = task._role
if not peek:
self._host_states[host.name] = s
return (s, task)
def mark_host_failed(self, host):
s = self.get_host_state(host)
if s.pending_setup:
s.fail_state |= self.FAILED_SETUP
s.run_state = self.ITERATING_COMPLETE
elif s.run_state == self.ITERATING_TASKS:
s.fail_state |= self.FAILED_TASKS
s.run_state = self.ITERATING_RESCUE
elif s.run_state == self.ITERATING_RESCUE:
s.fail_state |= self.FAILED_RESCUE
s.run_state = self.ITERATING_ALWAYS
elif s.run_state == self.ITERATING_ALWAYS:
s.fail_state |= self.FAILED_ALWAYS
s.run_state = self.ITERATING_COMPLETE
self._host_states[host.name] = s
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.failed_state != self.FAILED_NONE)
def get_original_task(self, host, task):
'''
Finds the task in the task list which matches the UUID of the given task.
The executor engine serializes/deserializes objects as they are passed through
the different processes, and not all data structures are preserved. This method
allows us to find the original task passed into the executor engine.
'''
s = self.get_host_state(host)
for block in s._blocks:
if block.block:
for t in block.block:
if t._uuid == task._uuid:
return t
if block.rescue:
for t in block.rescue:
if t._uuid == task._uuid:
return t
if block.always:
for t in block.always:
if t._uuid == task._uuid:
return t
return None
def add_tasks(self, host, task_list):
s = self.get_host_state(host)
target_block = s._blocks[s.cur_block].copy()
if s.run_state == self.ITERATING_TASKS:
before = target_block.block[:s.cur_regular_task]
after = target_block.block[s.cur_regular_task:]
target_block.block = before + task_list + after
elif s.run_state == self.ITERATING_RESCUE:
before = target_block.rescue[:s.cur_rescue_task]
after = target_block.rescue[s.cur_rescue_task:]
target_block.rescue = before + task_list + after
elif s.run_state == self.ITERATING_ALWAYS:
before = target_block.always[:s.cur_always_task]
after = target_block.always[s.cur_always_task:]
target_block.always = before + task_list + after
s._blocks[s.cur_block] = target_block
self._host_states[host.name] = s
|
TheTypoMaster/asuswrt
|
refs/heads/master
|
release/src/router/libxml2/python/tests/xpathext.py
|
87
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
def foo(ctx, x):
return x + 1
def bar(ctx, x):
return "%d" % (x + 2)
doc = libxml2.parseFile("tst.xml")
ctxt = doc.xpathNewContext()
res = ctxt.xpathEval("//*")
if len(res) != 2:
print "xpath query: wrong node set size"
sys.exit(1)
if res[0].name != "doc" or res[1].name != "foo":
print "xpath query: wrong node set value"
sys.exit(1)
libxml2.registerXPathFunction(ctxt._o, "foo", None, foo)
libxml2.registerXPathFunction(ctxt._o, "bar", None, bar)
i = 10000
while i > 0:
res = ctxt.xpathEval("foo(1)")
if res != 2:
print "xpath extension failure"
sys.exit(1)
i = i - 1
i = 10000
while i > 0:
res = ctxt.xpathEval("bar(1)")
if res != "3":
print "xpath extension failure got %s expecting '3'"
sys.exit(1)
i = i - 1
doc.freeDoc()
ctxt.xpathFreeContext()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
duhzecca/cinder
|
refs/heads/master
|
cinder/volume/drivers/infortrend/eonstor_ds_cli/common_cli.py
|
20
|
# Copyright (c) 2015 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Infortrend Common CLI.
"""
import math
import time
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.infortrend.eonstor_ds_cli import cli_factory as cli
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
infortrend_esds_opts = [
cfg.StrOpt('infortrend_pools_name',
default='',
help='Infortrend raid pool name list. '
'It is separated with comma.'),
cfg.StrOpt('infortrend_cli_path',
default='/opt/bin/Infortrend/raidcmd_ESDS10.jar',
help='The Infortrend CLI absolute path. '
'By default, it is at '
'/opt/bin/Infortrend/raidcmd_ESDS10.jar'),
cfg.IntOpt('infortrend_cli_max_retries',
default=5,
help='Maximum retry time for cli. Default is 5.'),
cfg.IntOpt('infortrend_cli_timeout',
default=30,
help='Default timeout for CLI copy operations in minutes. '
'Support: migrate volume, create cloned volume and '
'create volume from snapshot. '
'By Default, it is 30 minutes.'),
cfg.StrOpt('infortrend_slots_a_channels_id',
default='0,1,2,3,4,5,6,7',
help='Infortrend raid channel ID list on Slot A '
'for OpenStack usage. It is separated with comma. '
'By default, it is the channel 0~7.'),
cfg.StrOpt('infortrend_slots_b_channels_id',
default='0,1,2,3,4,5,6,7',
help='Infortrend raid channel ID list on Slot B '
'for OpenStack usage. It is separated with comma. '
'By default, it is the channel 0~7.'),
]
infortrend_esds_extra_opts = [
cfg.StrOpt('infortrend_provisioning',
default='full',
help='Let the volume use specific provisioning. '
'By default, it is the full provisioning. '
'The supported options are full or thin.'),
cfg.StrOpt('infortrend_tiering',
default='0',
help='Let the volume use specific tiering level. '
'By default, it is the level 0. '
'The supported levels are 0,2,3,4.'),
]
CONF = cfg.CONF
CONF.register_opts(infortrend_esds_opts)
CONF.register_opts(infortrend_esds_extra_opts)
CLI_RC_FILTER = {
'CreatePartition': {'error': _('Failed to create partition.')},
'DeletePartition': {'error': _('Failed to delete partition.')},
'SetPartition': {'error': _('Failed to set partition.')},
'CreateMap': {
'warning': {20: _LW('The MCS Channel is grouped.')},
'error': _('Failed to create map.'),
},
'DeleteMap': {
'warning': {11: _LW('No mapping.')},
'error': _('Failed to delete map.'),
},
'CreateSnapshot': {'error': _('Failed to create snapshot.')},
'DeleteSnapshot': {'error': _('Failed to delete snapshot.')},
'CreateReplica': {'error': _('Failed to create replica.')},
'DeleteReplica': {'error': _('Failed to delete replica.')},
'CreateIQN': {
'warning': {20: _LW('IQN already existed.')},
'error': _('Failed to create iqn.'),
},
'DeleteIQN': {
'warning': {
20: _LW('IQN has been used to create map.'),
11: _LW('No such host alias name.'),
},
'error': _('Failed to delete iqn.'),
},
'ShowLV': {'error': _('Failed to get lv info.')},
'ShowPartition': {'error': _('Failed to get partition info.')},
'ShowSnapshot': {'error': _('Failed to get snapshot info.')},
'ShowDevice': {'error': _('Failed to get device info.')},
'ShowChannel': {'error': _('Failed to get channel info.')},
'ShowMap': {'error': _('Failed to get map info.')},
'ShowNet': {'error': _('Failed to get network info.')},
'ShowLicense': {'error': _('Failed to get license info.')},
'ShowReplica': {'error': _('Failed to get replica info.')},
'ShowWWN': {'error': _('Failed to get wwn info.')},
'ShowIQN': {'error': _('Failed to get iqn info.')},
'ExecuteCommand': {'error': _('Failed to execute common command.')},
}
def log_func(func):
def inner(self, *args, **kwargs):
LOG.debug('Entering: %(method)s', {'method': func.__name__})
start = timeutils.utcnow()
ret = func(self, *args, **kwargs)
end = timeutils.utcnow()
LOG.debug(
'Leaving: %(method)s, '
'Spent: %(time)s sec, '
'Return: %(ret)s.', {
'method': func.__name__,
'time': timeutils.delta_seconds(start, end),
'ret': ret})
return ret
return inner
def mi_to_gi(mi_size):
return mi_size * units.Mi / units.Gi
def gi_to_mi(gi_size):
return gi_size * units.Gi / units.Mi
class InfortrendCommon(object):
"""The Infortrend's Common Command using CLI.
Version history:
1.0.0 - Initial driver
1.0.1 - Support DS4000
"""
VERSION = '1.0.1'
constants = {
'ISCSI_PORT': 3260,
'MAX_LUN_MAP_PER_CHL': 128
}
provisioning_values = ['thin', 'full']
tiering_values = ['0', '2', '3', '4']
def __init__(self, protocol, configuration=None):
self.protocol = protocol
self.configuration = configuration
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(infortrend_esds_opts)
self.configuration.append_config_values(infortrend_esds_extra_opts)
self.iscsi_multipath = self.configuration.use_multipath_for_image_xfer
self.path = self.configuration.infortrend_cli_path
self.password = self.configuration.san_password
self.ip = self.configuration.san_ip
self.cli_retry_time = self.configuration.infortrend_cli_max_retries
self.cli_timeout = self.configuration.infortrend_cli_timeout * 60
self.iqn = 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s'
self.unmanaged_prefix = 'cinder-unmanaged-%s'
if self.ip == '':
msg = _('san_ip is not set.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.fc_lookup_service = fczm_utils.create_lookup_service()
self._volume_stats = None
self._model_type = 'R'
self._replica_timeout = self.cli_timeout
self.map_dict = {
'slot_a': {},
'slot_b': {},
}
self.map_dict_init = False
self.target_dict = {
'slot_a': {},
'slot_b': {},
}
if self.protocol == 'iSCSI':
self.mcs_dict = {
'slot_a': {},
'slot_b': {},
}
self._init_pool_list()
self._init_channel_list()
self.cli_conf = {
'path': self.path,
'password': self.password,
'ip': self.ip,
'cli_retry_time': int(self.cli_retry_time),
}
def _init_pool_list(self):
pools_name = self.configuration.infortrend_pools_name
if pools_name == '':
msg = _('Pools name is not set.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
tmp_pool_list = pools_name.split(',')
self.pool_list = [pool.strip() for pool in tmp_pool_list]
def _init_channel_list(self):
self.channel_list = {
'slot_a': [],
'slot_b': [],
}
tmp_channel_list = (
self.configuration.infortrend_slots_a_channels_id.split(',')
)
self.channel_list['slot_a'] = (
[channel.strip() for channel in tmp_channel_list]
)
tmp_channel_list = (
self.configuration.infortrend_slots_b_channels_id.split(',')
)
self.channel_list['slot_b'] = (
[channel.strip() for channel in tmp_channel_list]
)
def _execute_command(self, cli_type, *args, **kwargs):
command = getattr(cli, cli_type)
return command(self.cli_conf).execute(*args, **kwargs)
def _execute(self, cli_type, *args, **kwargs):
LOG.debug('Executing command type: %(type)s.', {'type': cli_type})
rc, out = self._execute_command(cli_type, *args, **kwargs)
if rc != 0:
if ('warning' in CLI_RC_FILTER[cli_type] and
rc in CLI_RC_FILTER[cli_type]['warning']):
LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc])
else:
msg = CLI_RC_FILTER[cli_type]['error']
LOG.error(msg)
raise exception.InfortrendCliException(
err=msg, param=args, rc=rc, out=out)
return rc, out
@log_func
def _init_map_info(self, multipath=False):
if not self.map_dict_init:
rc, channel_info = self._execute('ShowChannel')
if 'BID' in channel_info[0]:
self._model_type = 'R'
else:
self._model_type = 'G'
self._set_channel_id(channel_info, 'slot_a', multipath)
if multipath and self._model_type == 'R':
self._set_channel_id(channel_info, 'slot_b', multipath)
self.map_dict_init = True
@log_func
def _update_map_info(self, multipath=False):
"""Record the driver mapping information.
map_dict = {
'slot_a': {
'0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4
},
'slot_b' : {
'1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3
}
}
"""
rc, map_info = self._execute('ShowMap')
self._update_map_info_by_slot(map_info, 'slot_a')
if multipath and self._model_type == 'R':
self._update_map_info_by_slot(map_info, 'slot_b')
return map_info
@log_func
def _update_map_info_by_slot(self, map_info, slot_key):
for key, value in self.map_dict[slot_key].items():
self.map_dict[slot_key][key] = list(
range(self.constants['MAX_LUN_MAP_PER_CHL']))
if len(map_info) > 0 and isinstance(map_info, list):
for entry in map_info:
ch = entry['Ch']
lun = entry['LUN']
if ch not in self.map_dict[slot_key].keys():
continue
target_id = self.target_dict[slot_key][ch]
if (entry['Target'] == target_id and
int(lun) in self.map_dict[slot_key][ch]):
self.map_dict[slot_key][ch].remove(int(lun))
def _check_initiator_has_lun_map(self, initiator_wwns, map_info):
for initiator in initiator_wwns:
for entry in map_info:
if initiator.lower() == entry['Host-ID'].lower():
return True
return False
@log_func
def _set_channel_id(
self, channel_info, controller='slot_a', multipath=False):
if self.protocol == 'iSCSI':
check_channel_type = 'NETWORK'
else:
check_channel_type = 'FIBRE'
for entry in channel_info:
if entry['Type'] == check_channel_type:
if entry['Ch'] in self.channel_list[controller]:
self.map_dict[controller][entry['Ch']] = []
if self.protocol == 'iSCSI':
self._update_mcs_dict(
entry['Ch'], entry['MCS'], controller)
self._update_target_dict(entry, controller)
@log_func
def _update_target_dict(self, channel, controller):
"""Record the target id for mapping.
# R model
target_dict = {
'slot_a': {
'0': '0',
'1': '0',
},
'slot_b': {
'0': '1',
'1': '1',
},
}
# G model
target_dict = {
'slot_a': {
'2': '32',
'3': '112',
}
}
"""
if self._model_type == 'G':
self.target_dict[controller][channel['Ch']] = channel['ID']
else:
if controller == 'slot_a':
self.target_dict[controller][channel['Ch']] = channel['AID']
else:
self.target_dict[controller][channel['Ch']] = channel['BID']
def _update_mcs_dict(self, channel_id, mcs_id, controller):
"""Record the iSCSI MCS topology.
# R model with mcs, but it not working with iSCSI multipath
mcs_dict = {
'slot_a': {
'0': ['0', '1'],
'1': ['2']
},
'slot_b': {
'0': ['0', '1'],
'1': ['2']
}
}
# G model with mcs
mcs_dict = {
'slot_a': {
'0': ['0', '1'],
'1': ['2']
},
'slot_b': {}
}
"""
if mcs_id not in self.mcs_dict[controller]:
self.mcs_dict[controller][mcs_id] = []
self.mcs_dict[controller][mcs_id].append(channel_id)
def _check_tiers_setup(self):
tiering = self.configuration.infortrend_tiering
if tiering != '0':
self._check_extraspec_value(
tiering, self.tiering_values)
tier_levels_list = list(range(int(tiering)))
tier_levels_list = list(map(str, tier_levels_list))
rc, lv_info = self._execute('ShowLV', 'tier')
for pool in self.pool_list:
support_tier_levels = tier_levels_list[:]
for entry in lv_info:
if (entry['LV-Name'] == pool and
entry['Tier'] in support_tier_levels):
support_tier_levels.remove(entry['Tier'])
if len(support_tier_levels) == 0:
break
if len(support_tier_levels) != 0:
msg = _('Please create %(tier_levels)s '
'tier in pool %(pool)s in advance!') % {
'tier_levels': support_tier_levels,
'pool': pool}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _check_pools_setup(self):
pool_list = self.pool_list[:]
rc, lv_info = self._execute('ShowLV')
for lv in lv_info:
if lv['Name'] in pool_list:
pool_list.remove(lv['Name'])
if len(pool_list) == 0:
break
if len(pool_list) != 0:
msg = _('Please create %(pool_list)s pool in advance!') % {
'pool_list': pool_list}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def check_for_setup_error(self):
self._check_pools_setup()
self._check_tiers_setup()
def create_volume(self, volume):
"""Create a Infortrend partition."""
volume_id = volume['id'].replace('-', '')
self._create_partition_by_default(volume)
part_id = self._get_part_id(volume_id)
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': part_id,
}
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Create Volume %(volume_id)s completed.'), {
'volume_id': volume_id})
return model_update
def _create_partition_by_default(self, volume):
pool_id = self._get_target_pool_id(volume)
self._create_partition_with_pool(volume, pool_id)
def _create_partition_with_pool(
self, volume, pool_id, extraspecs=None):
volume_id = volume['id'].replace('-', '')
volume_size = gi_to_mi(volume['size'])
if extraspecs is None:
extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
provisioning = self._get_extraspecs_value(extraspecs, 'provisioning')
tiering = self._get_extraspecs_value(extraspecs, 'tiering')
extraspecs_dict = {}
cmd = ''
if provisioning == 'thin':
provisioning = int(volume_size * 0.2)
extraspecs_dict['provisioning'] = provisioning
extraspecs_dict['init'] = 'disable'
else:
self._check_extraspec_value(
provisioning, self.provisioning_values)
if tiering != '0':
self._check_extraspec_value(
tiering, self.tiering_values)
tier_levels_list = list(range(int(tiering)))
tier_levels_list = list(map(str, tier_levels_list))
self._check_tiering_existing(tier_levels_list, pool_id)
extraspecs_dict['provisioning'] = 0
extraspecs_dict['init'] = 'disable'
if extraspecs_dict:
cmd = self._create_part_parameters_str(extraspecs_dict)
commands = (pool_id, volume_id, 'size=%s' % volume_size, cmd)
self._execute('CreatePartition', *commands)
def _create_part_parameters_str(self, extraspecs_dict):
parameters_list = []
parameters = {
'provisioning': 'min=%sMB',
'tiering': 'tier=%s',
'init': 'init=%s',
}
for extraspec in extraspecs_dict.keys():
value = parameters[extraspec] % (extraspecs_dict[extraspec])
parameters_list.append(value)
cmd = ' '.join(parameters_list)
return cmd
def _check_tiering_existing(self, tier_levels, pool_id):
rc, lv_info = self._execute('ShowLV', 'tier')
for entry in lv_info:
if entry['LV-ID'] == pool_id and entry['Tier'] in tier_levels:
tier_levels.remove(entry['Tier'])
if len(tier_levels) == 0:
break
if len(tier_levels) != 0:
msg = _('Have not created %(tier_levels)s tier(s).') % {
'tier_levels': tier_levels}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
@log_func
def _create_map_with_lun_filter(
self, part_id, channel_id, lun_id, host, controller='slot_a'):
host_filter = self._create_target_id_and_host_filter(
controller, host)
target_id = self.target_dict[controller][channel_id]
commands = (
'part', part_id, channel_id, target_id, lun_id, host_filter
)
self._execute('CreateMap', *commands)
@log_func
def _create_map_with_mcs(
self, part_id, channel_list, lun_id, host, controller='slot_a'):
map_channel_id = None
for channel_id in channel_list:
host_filter = self._create_target_id_and_host_filter(
controller, host)
target_id = self.target_dict[controller][channel_id]
commands = (
'part', part_id, channel_id, target_id, lun_id,
host_filter
)
rc, out = self._execute('CreateMap', *commands)
if rc == 0:
map_channel_id = channel_id
break
if map_channel_id is None:
msg = _('Failed to create map on mcs, no channel can map.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return map_channel_id
def _create_target_id_and_host_filter(self, controller, host):
if self.protocol == 'iSCSI':
host_filter = 'iqn=%s' % host
else:
host_filter = 'wwn=%s' % host
return host_filter
def _get_extraspecs_dict(self, volume_type_id):
extraspecs = {}
if volume_type_id:
extraspecs = volume_types.get_volume_type_extra_specs(
volume_type_id)
return extraspecs
def _get_extraspecs_value(self, extraspecs, key):
value = None
if key == 'provisioning':
if (extraspecs and
'infortrend_provisioning' in extraspecs.keys()):
value = extraspecs['infortrend_provisioning'].lower()
else:
value = self.configuration.infortrend_provisioning.lower()
elif key == 'tiering':
value = self.configuration.infortrend_tiering
return value
def _select_most_free_capacity_pool_id(self, lv_info):
largest_free_capacity_gb = 0.0
dest_pool_id = None
for lv in lv_info:
if lv['Name'] in self.pool_list:
available_space = float(lv['Available'].split(' ', 1)[0])
free_capacity_gb = round(mi_to_gi(available_space))
if free_capacity_gb > largest_free_capacity_gb:
largest_free_capacity_gb = free_capacity_gb
dest_pool_id = lv['ID']
return dest_pool_id
def _get_target_pool_id(self, volume):
extraspecs = self._get_extraspecs_dict(volume['volume_type_id'])
pool_id = None
rc, lv_info = self._execute('ShowLV')
if 'pool_name' in extraspecs.keys():
poolname = extraspecs['pool_name']
for entry in lv_info:
if entry['Name'] == poolname:
pool_id = entry['ID']
else:
pool_id = self._select_most_free_capacity_pool_id(lv_info)
if pool_id is None:
msg = _('Failed to get pool id with volume %(volume_id)s.') % {
'volume_id': volume['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return pool_id
def _get_system_id(self, system_ip):
rc, device_info = self._execute('ShowDevice')
for entry in device_info:
if system_ip == entry['Connected-IP']:
return str(int(entry['ID'], 16))
return
@log_func
def _get_lun_id(self, ch_id, controller='slot_a'):
lun_id = -1
if len(self.map_dict[controller][ch_id]) > 0:
lun_id = self.map_dict[controller][ch_id][0]
self.map_dict[controller][ch_id].remove(lun_id)
if lun_id == -1:
msg = _('LUN number is out of bound '
'on channel id: %(ch_id)s.') % {'ch_id': ch_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
return lun_id
@log_func
def _get_mapping_info(self, multipath):
if self.iscsi_multipath or multipath:
return self._get_mapping_info_with_mcs()
else:
return self._get_mapping_info_with_normal()
def _get_mapping_info_with_mcs(self):
"""Get the minimun mapping channel id and multi lun id mapping info.
# R model with mcs
map_chl = {
'slot_a': ['0', '1']
}
map_lun = ['0']
# G model with mcs
map_chl = {
'slot_a': ['1', '2']
}
map_lun = ['0']
:returns: minimun mapping channel id per slot and multi lun id
"""
map_chl = {
'slot_a': []
}
min_lun_num = 0
map_mcs_group = None
for mcs in self.mcs_dict['slot_a']:
if len(self.mcs_dict['slot_a'][mcs]) > 1:
if min_lun_num < self._get_mcs_channel_lun_map_num(mcs):
min_lun_num = self._get_mcs_channel_lun_map_num(mcs)
map_mcs_group = mcs
if map_mcs_group is None:
msg = _('Raid did not have MCS Channel.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
map_chl['slot_a'] = self.mcs_dict['slot_a'][map_mcs_group]
map_lun = self._get_mcs_channel_lun_map(map_chl['slot_a'])
return map_chl, map_lun, map_mcs_group
def _get_mcs_channel_lun_map_num(self, mcs_id):
lun_num = 0
for channel in self.mcs_dict['slot_a'][mcs_id]:
lun_num += len(self.map_dict['slot_a'][channel])
return lun_num
def _get_mcs_channel_lun_map(self, channel_list):
"""Find the common lun id in mcs channel."""
map_lun = []
for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
check_map = True
for channel_id in channel_list:
if lun_id not in self.map_dict['slot_a'][channel_id]:
check_map = False
if check_map:
map_lun.append(str(lun_id))
break
return map_lun
@log_func
def _get_mapping_info_with_normal(self):
"""Get the minimun mapping channel id and lun id mapping info.
# G model and R model
map_chl = {
'slot_a': ['1']
}
map_lun = ['0']
:returns: minimun mapping channel id per slot and lun id
"""
map_chl = {
'slot_a': []
}
map_lun = []
ret_chl = self._get_minimun_mapping_channel_id('slot_a')
lun_id = self._get_lun_id(ret_chl, 'slot_a')
mcs_id = self._get_mcs_id_by_channel_id(ret_chl)
map_chl['slot_a'].append(ret_chl)
map_lun.append(str(lun_id))
return map_chl, map_lun, mcs_id
@log_func
def _get_minimun_mapping_channel_id(self, controller):
empty_lun_num = 0
min_map_chl = -1
for key, value in self.map_dict[controller].items():
if empty_lun_num < len(value):
min_map_chl = key
empty_lun_num = len(value)
if int(min_map_chl) < 0:
msg = _('LUN map overflow on every channel.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
return min_map_chl
def _get_common_lun_map_id(self, wwpn_channel_info):
map_lun = None
for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']):
lun_id_exist = False
for slot_name in ['slot_a', 'slot_b']:
for wwpn in wwpn_channel_info:
channel_id = wwpn_channel_info[wwpn]['channel']
if channel_id not in self.map_dict[slot_name]:
continue
elif lun_id not in self.map_dict[slot_name][channel_id]:
lun_id_exist = True
if not lun_id_exist:
map_lun = str(lun_id)
break
return map_lun
def _get_mcs_id_by_channel_id(self, channel_id):
mcs_id = None
for mcs in self.mcs_dict['slot_a']:
if channel_id in self.mcs_dict['slot_a'][mcs]:
mcs_id = mcs
break
if mcs_id is None:
msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % {
'channel_id': channel_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
return mcs_id
def _concat_provider_location(self, model_dict):
return '@'.join([i + '^' + str(model_dict[i]) for i in model_dict])
def delete_volume(self, volume):
"""Delete the specific volume."""
volume_id = volume['id'].replace('-', '')
has_pair = False
have_map = False
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
(check_exist, have_map, part_id) = (
self._check_volume_exist(volume_id, part_id)
)
if not check_exist:
LOG.warning(_LW('Volume %(volume_id)s already deleted.'), {
'volume_id': volume_id})
return
rc, replica_list = self._execute('ShowReplica', '-l')
for entry in replica_list:
if (volume_id == entry['Source-Name'] and
part_id == entry['Source']):
if not self._check_replica_completed(entry):
has_pair = True
LOG.warning(_LW('Volume still %(status)s '
'Cannot delete volume.'), {
'status': entry['Status']})
else:
have_map = entry['Source-Mapped'] == 'Yes'
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
elif (volume_id == entry['Target-Name'] and
part_id == entry['Target']):
have_map = entry['Target-Mapped'] == 'Yes'
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
if not has_pair:
rc, snapshot_list = self._execute(
'ShowSnapshot', 'part=%s' % part_id)
for snapshot in snapshot_list:
si_has_pair = self._delete_pair_with_snapshot(
snapshot['SI-ID'], replica_list)
if si_has_pair:
msg = _('Failed to delete SI '
'for volume_id: %(volume_id)s '
'because it has pair.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self._execute('DeleteSnapshot', snapshot['SI-ID'], '-y')
rc, map_info = self._execute('ShowMap', 'part=%s' % part_id)
if have_map or len(map_info) > 0:
self._execute('DeleteMap', 'part', part_id, '-y')
self._execute('DeletePartition', part_id, '-y')
LOG.info(_LI('Delete Volume %(volume_id)s completed.'), {
'volume_id': volume_id})
else:
msg = _('Failed to delete volume '
'for volume_id: %(volume_id)s '
'because it has pair.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _check_replica_completed(self, replica):
if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or
(replica['Type'] == 'Mirror' and
replica['Status'] == 'Mirror')):
return True
return False
def _check_volume_exist(self, volume_id, part_id):
check_exist = False
have_map = False
result_part_id = part_id
rc, part_list = self._execute('ShowPartition', '-l')
for entry in part_list:
if entry['Name'] == volume_id:
check_exist = True
if part_id is None:
result_part_id = entry['ID']
if entry['Mapped'] == 'true':
have_map = True
if check_exist:
return (check_exist, have_map, result_part_id)
else:
return (False, False, None)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the volume by volume copy."""
volume_id = volume['id'].replace('-', '')
# Step1 create a snapshot of the volume
src_part_id = self._extract_specific_provider_location(
src_vref['provider_location'], 'partition_id')
if src_part_id is None:
src_part_id = self._get_part_id(volume_id)
model_update = self._create_volume_from_volume(volume, src_part_id)
LOG.info(_LI('Create Cloned Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return model_update
def _create_volume_from_volume(self, dst_volume, src_part_id):
# create the target volume for volume copy
dst_volume_id = dst_volume['id'].replace('-', '')
self._create_partition_by_default(dst_volume)
dst_part_id = self._get_part_id(dst_volume_id)
# prepare return value
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': dst_part_id,
}
model_info = self._concat_provider_location(model_dict)
model_update = {"provider_location": model_info}
# clone the volume from the origin partition
commands = (
'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
return model_update
def _extract_specific_provider_location(self, provider_location, key):
provider_location_dict = self._extract_all_provider_location(
provider_location)
result = provider_location_dict.get(key, None)
return result
@log_func
def _extract_all_provider_location(self, provider_location):
provider_location_dict = {}
dict_entry = provider_location.split("@")
for entry in dict_entry:
key, value = entry.split('^', 1)
if value == 'None':
value = None
provider_location_dict[key] = value
return provider_location_dict
def create_export(self, context, volume):
model_update = volume['provider_location']
LOG.info(_LI('Create export done from Volume %(volume_id)s.'), {
'volume_id': volume['id']})
return {'provider_location': model_update}
def get_volume_stats(self, refresh=False):
"""Get volume status.
If refresh is True, update the status first.
"""
if self._volume_stats is None or refresh:
self._update_volume_stats()
LOG.info(_LI(
'Successfully update volume stats. '
'backend: %(volume_backend_name)s, '
'vendor: %(vendor_name)s, '
'driver version: %(driver_version)s, '
'storage protocol: %(storage_protocol)s.'), self._volume_stats)
return self._volume_stats
def _update_volume_stats(self):
backend_name = self.configuration.safe_get('volume_backend_name')
data = {
'volume_backend_name': backend_name,
'vendor_name': 'Infortrend',
'driver_version': self.VERSION,
'storage_protocol': self.protocol,
'pools': self._update_pools_stats(),
}
self._volume_stats = data
def _update_pools_stats(self):
enable_specs_dict = self._get_enable_specs_on_array()
if 'Thin Provisioning' in enable_specs_dict.keys():
provisioning = 'thin'
provisioning_support = True
else:
provisioning = 'full'
provisioning_support = False
rc, part_list = self._execute('ShowPartition', '-l')
rc, pools_info = self._execute('ShowLV')
pools = []
for pool in pools_info:
if pool['Name'] in self.pool_list:
total_space = float(pool['Size'].split(' ', 1)[0])
available_space = float(pool['Available'].split(' ', 1)[0])
total_capacity_gb = round(mi_to_gi(total_space), 2)
free_capacity_gb = round(mi_to_gi(available_space), 2)
provisioning_factor = self.configuration.safe_get(
'max_over_subscription_ratio')
provisioned_space = self._get_provisioned_space(
pool['ID'], part_list)
provisioned_capacity_gb = round(mi_to_gi(provisioned_space), 2)
new_pool = {
'pool_name': pool['Name'],
'pool_id': pool['ID'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'reserved_percentage': 0,
'QoS_support': False,
'provisioned_capacity_gb': provisioned_capacity_gb,
'max_over_subscription_ratio': provisioning_factor,
'thin_provisioning_support': provisioning_support,
'thick_provisioning_support': True,
'infortrend_provisioning': provisioning,
}
pools.append(new_pool)
return pools
def _get_provisioned_space(self, pool_id, part_list):
provisioning_space = 0
for entry in part_list:
if entry['LV-ID'] == pool_id:
provisioning_space += int(entry['Size'])
return provisioning_space
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snapshot_id = snapshot['id'].replace('-', '')
volume_id = snapshot['volume_id'].replace('-', '')
LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.',
{'snapshot': snapshot_id, 'volume': volume_id})
model_update = {}
part_id = self._get_part_id(volume_id)
if part_id is None:
msg = _('Failed to get Partition ID for volume %(volume_id)s.') % {
'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
@lockutils.synchronized(
'snapshot-' + part_id, 'infortrend-', True)
def do_create_snapshot():
self._execute('CreateSnapshot', 'part', part_id)
rc, tmp_snapshot_list = self._execute(
'ShowSnapshot', 'part=%s' % part_id)
return tmp_snapshot_list
snapshot_list = do_create_snapshot()
LOG.info(_LI(
'Create success. '
'Snapshot: %(snapshot)s, '
'Snapshot ID in raid: %(raid_snapshot_id)s, '
'volume: %(volume)s.'), {
'snapshot': snapshot_id,
'raid_snapshot_id': snapshot_list[-1]['SI-ID'],
'volume': volume_id})
model_update['provider_location'] = snapshot_list[-1]['SI-ID']
return model_update
def delete_snapshot(self, snapshot):
"""Delete the snapshot."""
snapshot_id = snapshot['id'].replace('-', '')
volume_id = snapshot['volume_id'].replace('-', '')
LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.',
{'snapshot': snapshot_id, 'volume': volume_id})
raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
if raid_snapshot_id:
rc, replica_list = self._execute('ShowReplica', '-l')
has_pair = self._delete_pair_with_snapshot(
raid_snapshot_id, replica_list)
if not has_pair:
self._execute('DeleteSnapshot', raid_snapshot_id, '-y')
LOG.info(_LI('Delete Snapshot %(snapshot_id)s completed.'), {
'snapshot_id': snapshot_id})
else:
msg = _('Failed to delete snapshot '
'for snapshot_id: %s '
'because it has pair.') % snapshot_id
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
msg = _(
'Failed to get Raid Snapshot ID '
'from Snapshot %(snapshot_id)s.') % {
'snapshot_id': snapshot_id}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _get_raid_snapshot_id(self, snapshot):
if 'provider_location' not in snapshot:
LOG.warning(_LW(
'Failed to get Raid Snapshot ID and '
'did not store in snapshot.'))
return
return snapshot['provider_location']
def _delete_pair_with_snapshot(self, snapshot_id, replica_list):
has_pair = False
for entry in replica_list:
if entry['Source'] == snapshot_id:
if not self._check_replica_completed(entry):
has_pair = True
LOG.warning(_LW(
'Snapshot still %(status)s Cannot delete snapshot.'), {
'status': entry['Status']})
else:
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
return has_pair
def _get_part_id(self, volume_id, pool_id=None, part_list=None):
if part_list is None:
rc, part_list = self._execute('ShowPartition')
for entry in part_list:
if pool_id is None:
if entry['Name'] == volume_id:
return entry['ID']
else:
if entry['Name'] == volume_id and entry['LV-ID'] == pool_id:
return entry['ID']
return
def create_volume_from_snapshot(self, volume, snapshot):
raid_snapshot_id = self._get_raid_snapshot_id(snapshot)
if raid_snapshot_id is None:
msg = _('Failed to get Raid Snapshot ID '
'from snapshot: %(snapshot_id)s.') % {
'snapshot_id': snapshot['id']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
src_part_id = self._check_snapshot_filled_block(raid_snapshot_id)
model_update = self._create_volume_from_snapshot_id(
volume, raid_snapshot_id, src_part_id)
LOG.info(_LI(
'Create Volume %(volume_id)s from '
'snapshot %(snapshot_id)s completed.'), {
'volume_id': volume['id'],
'snapshot_id': snapshot['id']})
return model_update
def _check_snapshot_filled_block(self, raid_snapshot_id):
rc, snapshot_list = self._execute(
'ShowSnapshot', 'si=%s' % raid_snapshot_id, '-l')
if snapshot_list and snapshot_list[0]['Total-filled-block'] == '0':
return snapshot_list[0]['Partition-ID']
return
def _create_volume_from_snapshot_id(
self, dst_volume, raid_snapshot_id, src_part_id):
# create the target volume for volume copy
dst_volume_id = dst_volume['id'].replace('-', '')
self._create_partition_by_default(dst_volume)
dst_part_id = self._get_part_id(dst_volume_id)
# prepare return value
system_id = self._get_system_id(self.ip)
model_dict = {
'system_id': system_id,
'partition_id': dst_part_id,
}
model_info = self._concat_provider_location(model_dict)
model_update = {"provider_location": model_info}
if src_part_id:
# clone the volume from the origin partition
commands = (
'Cinder-Snapshot', 'part', src_part_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
# clone the volume from the snapshot
commands = (
'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
return model_update
@lockutils.synchronized('connection', 'infortrend-', True)
def initialize_connection(self, volume, connector):
if self.protocol == 'iSCSI':
multipath = connector.get('multipath', False)
return self._initialize_connection_iscsi(
volume, connector, multipath)
elif self.protocol == 'FC':
return self._initialize_connection_fc(
volume, connector)
else:
msg = _('Unknown protocol: %(protocol)s.') % {
'protocol': self.protocol}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _initialize_connection_fc(self, volume, connector):
self._init_map_info(True)
self._update_map_info(True)
map_lun, target_wwpns, initiator_target_map = (
self._do_fc_connection(volume, connector)
)
properties = self._generate_fc_connection_properties(
map_lun, target_wwpns, initiator_target_map)
LOG.info(_LI('Successfully initialized connection. '
'target_wwn: %(target_wwn)s, '
'initiator_target_map: %(initiator_target_map)s, '
'lun: %(target_lun)s.'), properties['data'])
return properties
def _do_fc_connection(self, volume, connector):
volume_id = volume['id'].replace('-', '')
target_wwpns = []
partition_data = self._extract_all_provider_location(
volume['provider_location'])
part_id = partition_data['partition_id']
if part_id is None:
part_id = self._get_part_id(volume_id)
wwpn_list, wwpn_channel_info = self._get_wwpn_list()
initiator_target_map, target_wwpns = self._build_initiator_target_map(
connector, wwpn_list)
map_lun = self._get_common_lun_map_id(wwpn_channel_info)
for initiator_wwpn in initiator_target_map:
for target_wwpn in initiator_target_map[initiator_wwpn]:
channel_id = wwpn_channel_info[target_wwpn.upper()]['channel']
controller = wwpn_channel_info[target_wwpn.upper()]['slot']
self._create_map_with_lun_filter(
part_id, channel_id, map_lun, initiator_wwpn,
controller=controller)
return map_lun, target_wwpns, initiator_target_map
def _build_initiator_target_map(self, connector, all_target_wwpns):
initiator_target_map = {}
target_wwpns = []
if self.fc_lookup_service:
lookup_map = (
self.fc_lookup_service.get_device_mapping_from_network(
connector['wwpns'], all_target_wwpns)
)
for fabric_name in lookup_map:
fabric = lookup_map[fabric_name]
target_wwpns.extend(fabric['target_port_wwn_list'])
for initiator in fabric['initiator_port_wwn_list']:
initiator_target_map[initiator] = (
fabric['target_port_wwn_list']
)
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
initiator_target_map[initiator] = all_target_wwpns
return initiator_target_map, target_wwpns
def _generate_fc_connection_properties(
self, lun_id, target_wwpns, initiator_target_map):
return {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map,
},
}
@log_func
def _initialize_connection_iscsi(self, volume, connector, multipath):
self._init_map_info(multipath)
self._update_map_info(multipath)
volume_id = volume['id'].replace('-', '')
partition_data = self._extract_all_provider_location(
volume['provider_location']) # system_id, part_id
part_id = partition_data['partition_id']
if part_id is None:
part_id = self._get_part_id(volume_id)
self._set_host_iqn(connector['initiator'])
map_chl, map_lun, mcs_id = self._get_mapping_info(multipath)
lun_id = map_lun[0]
if self.iscsi_multipath or multipath:
channel_id = self._create_map_with_mcs(
part_id, map_chl['slot_a'], lun_id, connector['initiator'])
else:
channel_id = map_chl['slot_a'][0]
self._create_map_with_lun_filter(
part_id, channel_id, lun_id, connector['initiator'])
rc, net_list = self._execute('ShowNet')
ip = self._get_ip_by_channel(channel_id, net_list)
if ip is None:
msg = _(
'Failed to get ip on Channel %(channel_id)s '
'with volume: %(volume_id)s.') % {
'channel_id': channel_id, 'volume_id': volume_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
partition_data = self._combine_channel_lun_target_id(
partition_data, mcs_id, lun_id, channel_id)
property_value = [{
'lun_id': partition_data['lun_id'],
'iqn': self._generate_iqn(partition_data),
'ip': ip,
'port': self.constants['ISCSI_PORT'],
}]
properties = self._generate_iscsi_connection_properties(
property_value, volume)
LOG.info(_LI('Successfully initialized connection '
'with volume: %(volume_id)s.'), properties['data'])
return properties
@log_func
def _combine_channel_lun_target_id(
self, partition_data, mcs_id, lun_id, channel_id):
target_id = self.target_dict['slot_a'][channel_id]
partition_data['mcs_id'] = mcs_id
partition_data['lun_id'] = lun_id
partition_data['target_id'] = target_id
partition_data['slot_id'] = 1
return partition_data
def _set_host_iqn(self, host_iqn):
rc, iqn_list = self._execute('ShowIQN')
check_iqn_exist = False
for entry in iqn_list:
if entry['IQN'] == host_iqn:
check_iqn_exist = True
if not check_iqn_exist:
self._execute(
'CreateIQN', host_iqn, self._truncate_host_name(host_iqn))
def _truncate_host_name(self, iqn):
if len(iqn) > 16:
return iqn[-16:]
else:
return iqn
@log_func
def _generate_iqn(self, partition_data):
return self.iqn % (
partition_data['system_id'],
partition_data['mcs_id'],
partition_data['target_id'],
partition_data['slot_id'])
@log_func
def _get_ip_by_channel(
self, channel_id, net_list, controller='slot_a'):
slot_name = 'slotA' if controller == 'slot_a' else 'slotB'
for entry in net_list:
if entry['ID'] == channel_id and entry['Slot'] == slot_name:
return entry['IPv4']
return
def _get_wwpn_list(self):
rc, wwn_list = self._execute('ShowWWN')
wwpn_list = []
wwpn_channel_info = {}
for entry in wwn_list:
channel_id = entry['CH']
if 'BID' in entry['ID']:
slot_name = 'slot_b'
else:
slot_name = 'slot_a'
if channel_id in self.map_dict[slot_name]:
wwpn_list.append(entry['WWPN'])
wwpn_channel_info[entry['WWPN']] = {
'channel': channel_id,
'slot': slot_name,
}
return wwpn_list, wwpn_channel_info
@log_func
def _generate_iscsi_connection_properties(
self, property_value, volume):
properties = {}
discovery_exist = False
specific_property = property_value[0]
discovery_ip = '%s:%s' % (
specific_property['ip'], specific_property['port'])
discovery_iqn = specific_property['iqn']
if self._do_iscsi_discovery(discovery_iqn, discovery_ip):
properties['target_portal'] = discovery_ip
properties['target_iqn'] = discovery_iqn
properties['target_lun'] = int(specific_property['lun_id'])
discovery_exist = True
if not discovery_exist:
msg = _(
'Could not find iSCSI target '
'for volume: %(volume_id)s.') % {
'volume_id': volume['id']}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
properties['target_discovered'] = discovery_exist
properties['volume_id'] = volume['id']
if 'provider_auth' in volume:
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
@log_func
def _do_iscsi_discovery(self, target_iqn, target_ip):
rc, out = self._execute(
'ExecuteCommand',
'iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p',
target_ip,
run_as_root=True)
if rc != 0:
LOG.error(_LE(
'Can not discovery in %(target_ip)s with %(target_iqn)s.'), {
'target_ip': target_ip, 'target_iqn': target_iqn})
return False
else:
for target in out.splitlines():
if target_iqn in target and target_ip in target:
return True
return False
def extend_volume(self, volume, new_size):
volume_id = volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
expand_size = new_size - volume['size']
if '.' in ('%s' % expand_size):
expand_size = round(gi_to_mi(float(expand_size)))
expand_command = 'size=%sMB' % expand_size
else:
expand_command = 'size=%sGB' % expand_size
self._execute('SetPartition', 'expand', part_id, expand_command)
LOG.info(_LI(
'Successfully extended volume %(volume_id)s to size %(size)s.'), {
'volume_id': volume['id'], 'size': new_size})
@lockutils.synchronized('connection', 'infortrend-', True)
def terminate_connection(self, volume, connector):
volume_id = volume['id'].replace('-', '')
multipath = connector.get('multipath', False)
conn_info = None
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
self._execute('DeleteMap', 'part', part_id, '-y')
if self.protocol == 'iSCSI':
self._execute(
'DeleteIQN', self._truncate_host_name(connector['initiator']))
map_info = self._update_map_info(multipath)
if self.protocol == 'FC' and self.fc_lookup_service:
lun_map_exist = self._check_initiator_has_lun_map(
connector['wwpns'], map_info)
if not lun_map_exist:
conn_info = {'driver_volume_type': 'fibre_channel',
'data': {}}
wwpn_list, wwpn_channel_info = self._get_wwpn_list()
init_target_map, target_wwpns = (
self._build_initiator_target_map(connector, wwpn_list)
)
conn_info['data']['initiator_target_map'] = init_target_map
LOG.info(_LI(
'Successfully terminated connection for volume: %(volume_id)s.'), {
'volume_id': volume['id']})
return conn_info
def migrate_volume(self, volume, host, new_extraspecs=None):
is_valid, dst_pool_id = (
self._is_valid_for_storage_assisted_migration(host)
)
if not is_valid:
return (False, None)
model_dict = self._migrate_volume_with_pool(
volume, dst_pool_id, new_extraspecs)
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Migrate Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return (True, model_update)
def _is_valid_for_storage_assisted_migration(self, host):
if 'pool_id' not in host['capabilities']:
LOG.warning(_LW('Failed to get target pool id.'))
return (False, None)
dst_pool_id = host['capabilities']['pool_id']
if dst_pool_id is None:
return (False, None)
return (True, dst_pool_id)
def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None):
volume_id = volume['id'].replace('-', '')
# Get old partition data for delete map
partition_data = self._extract_all_provider_location(
volume['provider_location'])
src_part_id = partition_data['partition_id']
if src_part_id is None:
src_part_id = self._get_part_id(volume_id)
# Create New Partition
self._create_partition_with_pool(volume, dst_pool_id, extraspecs)
dst_part_id = self._get_part_id(
volume_id, pool_id=dst_pool_id)
if dst_part_id is None:
msg = _('Failed to get new part id in new pool: %(pool_id)s.') % {
'pool_id': dst_pool_id}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Volume Mirror from old partition into new partition
commands = (
'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id,
'type=mirror'
)
self._execute('CreateReplica', *commands)
self._wait_replica_complete(dst_part_id)
self._execute('DeleteMap', 'part', src_part_id, '-y')
self._execute('DeletePartition', src_part_id, '-y')
model_dict = {
'system_id': partition_data['system_id'],
'partition_id': dst_part_id,
}
return model_dict
def _wait_replica_complete(self, part_id):
start_time = int(time.time())
timeout = self._replica_timeout
def _inner():
check_done = False
try:
rc, replica_list = self._execute('ShowReplica', '-l')
for entry in replica_list:
if (entry['Target'] == part_id and
self._check_replica_completed(entry)):
check_done = True
self._execute('DeleteReplica', entry['Pair-ID'], '-y')
except Exception:
check_done = False
LOG.exception(_LE('Cannot detect replica status.'))
if check_done:
raise loopingcall.LoopingCallDone()
if int(time.time()) - start_time > timeout:
msg = _('Wait replica complete timeout.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
timer = loopingcall.FixedIntervalLoopingCall(_inner)
timer.start(interval=10).wait()
def _check_extraspec_value(self, extraspec, validvalues):
if not extraspec:
LOG.debug("The given extraspec is None.")
elif extraspec not in validvalues:
msg = _("The extraspec: %(extraspec)s is not valid.") % {
'extraspec': extraspec}
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
def _get_enable_specs_on_array(self):
enable_specs = {}
rc, license_list = self._execute('ShowLicense')
for key, value in license_list.items():
if value['Support']:
enable_specs[key] = value
return enable_specs
def manage_existing_get_size(self, volume, ref):
"""Return size of volume to be managed by manage_existing."""
volume_name = self._get_existing_volume_ref_name(ref)
part_entry = self._get_latter_volume_dict(volume_name)
if part_entry is None:
msg = _('Specified logical volume does not exist.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
rc, map_info = self._execute('ShowMap', 'part=%s' % part_entry['ID'])
if len(map_info) != 0:
msg = _('The specified volume is mapped to a host.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return int(math.ceil(mi_to_gi(float(part_entry['Size']))))
def manage_existing(self, volume, ref):
volume_name = self._get_existing_volume_ref_name(ref)
volume_id = volume['id'].replace('-', '')
part_entry = self._get_latter_volume_dict(volume_name)
if part_entry is None:
msg = _('Specified logical volume does not exist.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
self._execute('SetPartition', part_entry['ID'], 'name=%s' % volume_id)
model_dict = {
'system_id': self._get_system_id(self.ip),
'partition_id': part_entry['ID'],
}
model_update = {
"provider_location": self._concat_provider_location(model_dict),
}
LOG.info(_LI('Rename Volume %(volume_id)s completed.'), {
'volume_id': volume['id']})
return model_update
def _get_existing_volume_ref_name(self, ref):
volume_name = None
if 'source-name' in ref:
volume_name = ref['source-name']
elif 'source-id' in ref:
volume_name = self._get_unmanaged_volume_name(
ref['source-id'].replace('-', ''))
else:
msg = _('Reference must contain source-id or source-name.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=ref, reason=msg)
return volume_name
def unmanage(self, volume):
volume_id = volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(volume_id)
new_vol_name = self._get_unmanaged_volume_name(volume_id)
self._execute('SetPartition', part_id, 'name=%s' % new_vol_name)
LOG.info(_LI('Unmanage volume %(volume_id)s completed.'), {
'volume_id': volume_id})
def _get_unmanaged_volume_name(self, volume_id):
return self.unmanaged_prefix % volume_id[:-17]
def _get_specific_volume_dict(self, volume_id):
ref_dict = {}
rc, part_list = self._execute('ShowPartition')
for entry in part_list:
if entry['Name'] == volume_id:
ref_dict = entry
break
return ref_dict
def _get_latter_volume_dict(self, volume_name):
rc, part_list = self._execute('ShowPartition', '-l')
latest_timestamps = 0
ref_dict = None
for entry in part_list:
if entry['Name'] == volume_name:
timestamps = self._get_part_timestamps(
entry['Creation-time'])
if timestamps > latest_timestamps:
ref_dict = entry
latest_timestamps = timestamps
return ref_dict
def _get_part_timestamps(self, time_string):
"""Transform 'Sat, Jan 11 22:18:40 2020' into timestamps with sec."""
first, value = time_string.split(',')
timestamps = time.mktime(
time.strptime(value, " %b %d %H:%M:%S %Y"))
return timestamps
def _check_volume_attachment(self, volume):
if not volume['volume_attachment']:
return False
return True
def _check_volume_has_snapshot(self, volume):
part_id = self._extract_specific_provider_location(
volume['provider_location'], 'partition_id')
rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id)
if len(snapshot_list) > 0:
return True
return False
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
if volume['host'] != host['host']:
if self._check_volume_attachment(volume):
LOG.warning(_LW(
'Volume %(volume_id)s cannot be retyped '
'during attachment.'), {
'volume_id': volume['id']})
return False
if self._check_volume_has_snapshot(volume):
LOG.warning(_LW(
'Volume %(volume_id)s cannot be retyped '
'because it has snapshot.'), {
'volume_id': volume['id']})
return False
new_extraspecs = new_type['extra_specs']
rc, model_update = self.migrate_volume(
volume, host, new_extraspecs)
if rc:
LOG.info(_LI(
'Retype Volume %(volume_id)s is done '
'and migrated to pool %(pool_id)s.'), {
'volume_id': volume['id'],
'pool_id': host['capabilities']['pool_id']})
return (rc, model_update)
else:
if ('infortrend_provisioning' in diff['extra_specs'] and
(diff['extra_specs']['infortrend_provisioning'][0] !=
diff['extra_specs']['infortrend_provisioning'][1])):
LOG.warning(_LW(
'The provisioning: %(provisioning)s '
'is not valid.'), {
'provisioning':
diff['extra_specs']['infortrend_provisioning'][1]})
return False
LOG.info(_LI('Retype Volume %(volume_id)s is completed.'), {
'volume_id': volume['id']})
return True
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume."""
src_volume_id = volume['id'].replace('-', '')
dst_volume_id = new_volume['id'].replace('-', '')
part_id = self._extract_specific_provider_location(
new_volume['provider_location'], 'partition_id')
if part_id is None:
part_id = self._get_part_id(dst_volume_id)
LOG.debug(
'Rename partition %(part_id)s '
'into new volume %(new_volume)s.', {
'part_id': part_id, 'new_volume': dst_volume_id})
try:
self._execute('SetPartition', part_id, 'name=%s' % src_volume_id)
except exception.InfortrendCliException:
LOG.exception(_LE('Failed to rename %(new_volume)s into '
'%(volume)s.'), {'new_volume': new_volume['id'],
'volume': volume['id']})
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.info(_LI('Update migrated volume %(new_volume)s completed.'), {
'new_volume': new_volume['id']})
model_update = {
'_name_id': None,
'provider_location': new_volume['provider_location'],
}
return model_update
|
SCPR/firetracker
|
refs/heads/master
|
calfire_tracker/migrations/0010_auto__add_wildfiretweet.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'WildfireTweet'
db.create_table('calfire_tracker_wildfiretweet', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('twitter_hashtag', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='calwildfire_twitter_hashtag', null=True, to=orm['calfire_tracker.CalWildfire'])),
('tweet_text', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('tweet_created_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('tweet_id', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('tweet_screen_name', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('tweet_profile_image_url', self.gf('django.db.models.fields.URLField')(max_length=1024, null=True, blank=True)),
))
db.send_create_signal('calfire_tracker', ['WildfireTweet'])
def backwards(self, orm):
# Deleting model 'WildfireTweet'
db.delete_table('calfire_tracker_wildfiretweet')
models = {
'calfire_tracker.calwildfire': {
'Meta': {'object_name': 'CalWildfire'},
'acres_burned': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'administrative_unit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'air_quality_rating': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'asset_host_image_id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'cause': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'computed_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'containment_percent': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'cooperating_agencies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'county_slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'created_fire_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'current_situation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'damage_assessment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_time_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'evacuations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fire_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'fire_slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'injuries': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'null': 'True', 'blank': 'True'}),
'last_scraped': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location_latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_numbers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'promoted_fire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'road_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'school_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'structures_destroyed': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'structures_threatened': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'total_airtankers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_dozers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_crews': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_engines': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_personnel': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_helicopters': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_water_tenders': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_hashtag': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'})
},
'calfire_tracker.wildfiretweet': {
'Meta': {'object_name': 'WildfireTweet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tweet_created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'twitter_hashtag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'calwildfire_twitter_hashtag'", 'null': 'True', 'to': "orm['calfire_tracker.CalWildfire']"})
},
'calfire_tracker.wildfireupdate': {
'Meta': {'object_name': 'WildfireUpdate'},
'date_time_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fire_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'calwildfire_fire_name'", 'null': 'True', 'to': "orm['calfire_tracker.CalWildfire']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'update_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['calfire_tracker']
|
TheKK/Shedskin
|
refs/heads/master
|
setup.py
|
3
|
#!/usr/bin/env python
from distutils.core import setup, Command
import os
class run_tests(Command):
description = "run testsuite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
self.cwd = os.getcwd()
ss_dir = os.path.abspath(__file__).split(os.path.sep)[:-1]
ss_dir.append('tests')
self.tests_dir = os.path.sep.join(ss_dir)
def run(self):
os.chdir(self.tests_dir)
os.system('./run.py')
os.chdir(self.cwd)
setup(name='shedskin',
version='0.9.4',
description='Shed Skin is an experimental compiler, that can translate pure, but implicitly statically typed Python programs into optimized C++. It can generate stand-alone programs or extension modules that can be imported and used in larger Python programs.',
url='http://code.google.com/p/shedskin/',
scripts=['scripts/shedskin'],
cmdclass={'test':run_tests},
packages=['shedskin'],
package_data={'shedskin': ['lib/*.cpp', 'lib/*.hpp', 'lib/builtin/*.cpp', 'lib/builtin/*.hpp', 'lib/*.py', 'lib/os/*.cpp', 'lib/os/*.hpp', 'lib/os/*.py', 'FLAGS*', 'illegal']},
)
|
scripni/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/hello/gyptest-regyp.py
|
268
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that Makefiles get rebuilt when a source gyp file changes.
"""
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('hello.gyp')
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, world!\n")
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('hello.gyp', test.read('hello2.gyp'))
test.build('hello.gyp', test.ALL)
test.run_built_executable('hello', stdout="Hello, two!\n")
test.pass_test()
|
admiraltoad/ScotchPy
|
refs/heads/master
|
scripts/unpackfiles.py
|
1
|
"""
Unpack Files
"""
import os, sys, shutil, errno
from ScotchPy.application import Application, get_root_directory
from ScotchPy.utils import folder_utils, file_utils
class UnpackFilesApp(Application):
def __init__(self):
super(UnpackFilesApp, self).__init__("Unpack Files")
def run(self):
""" Run the application instance in the calling directory. """
self.unpackfiles(get_root_directory())
def unpackfiles(self, search_directory):
""" Move all files in subdirectories under [search_directory] to root. """
file_utils.remove_useless_files(search_directory)
for root, directories, filenames in os.walk(search_directory):
for filename in filenames:
source_filename = os.path.join(root, filename)
if os.path.isfile(source_filename):
destination = os.path.join(search_directory, filename)
file_utils.move_file(source_filename, destination)
self.log.write("moved_from:{0};moved_to:{1};\n".format(source_filename, destination))
for directory in directories:
subdirectory = os.path.join(root, directory)
folder_utils.remove_if_empty(subdirectory)
folder_utils.remove_if_empty(search_directory)
if __name__ == "__main__":
try:
main = UnpackFilesApp()
main.run()
sys.exit(0)
except Exception as ex:
print("Error:", str(ex), "\n")
raise
sys.exit(-1)
|
MoritzS/django
|
refs/heads/master
|
tests/flatpages_tests/test_forms.py
|
43
|
from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False, MIDDLEWARE=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(f.errors, {'sites': [translation.gettext('This field is required.')]})
|
40223144/2015cdafinal
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/antigravity.py
|
917
|
import webbrowser
import hashlib
webbrowser.open("http://xkcd.com/353/")
def geohash(latitude, longitude, datedow):
'''Compute geohash() using the Munroe algorithm.
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
37.857713 -122.544543
'''
# http://xkcd.com/426/
h = hashlib.md5(datedow).hexdigest()
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
stgraber/snapcraft
|
refs/heads/master
|
integration_tests/test_clean.py
|
7
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
Not
)
import integration_tests
class CleanTestCase(integration_tests.TestCase):
def test_clean(self):
project_dir = 'simple-make'
self.run_snapcraft('snap', project_dir)
snap_dirs = ('stage', 'parts', 'prime')
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), DirExists())
self.run_snapcraft('clean', project_dir)
for dir_ in snap_dirs:
self.assertThat(
os.path.join(project_dir, dir_), Not(DirExists()))
def test_clean_again(self):
# Clean a second time doesn't fail.
# Regression test for https://bugs.launchpad.net/snapcraft/+bug/1497371
project_dir = 'simple-make'
self.run_snapcraft('snap', project_dir)
self.run_snapcraft('clean', project_dir)
self.run_snapcraft('clean', project_dir)
|
arnaud-morvan/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/HubDistancePoints.py
|
12
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
HubDistancePoints.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsField,
QgsGeometry,
QgsFeatureSink,
QgsDistanceArea,
QgsFeature,
QgsFeatureRequest,
QgsSpatialIndex,
QgsWkbTypes,
QgsUnitTypes,
QgsProcessing,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink,
QgsProcessingException)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class HubDistancePoints(QgisAlgorithm):
INPUT = 'INPUT'
HUBS = 'HUBS'
FIELD = 'FIELD'
UNIT = 'UNIT'
OUTPUT = 'OUTPUT'
LAYER_UNITS = 'LAYER_UNITS'
UNITS = [QgsUnitTypes.DistanceMeters,
QgsUnitTypes.DistanceFeet,
QgsUnitTypes.DistanceMiles,
QgsUnitTypes.DistanceKilometers,
LAYER_UNITS
]
def group(self):
return self.tr('Vector analysis')
def groupId(self):
return 'vectoranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.units = [self.tr('Meters'),
self.tr('Feet'),
self.tr('Miles'),
self.tr('Kilometers'),
self.tr('Layer units')]
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Source points layer')))
self.addParameter(QgsProcessingParameterFeatureSource(self.HUBS,
self.tr('Destination hubs layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Hub layer name attribute'), parentLayerParameterName=self.HUBS))
self.addParameter(QgsProcessingParameterEnum(self.UNIT,
self.tr('Measurement unit'), self.units))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Hub distance'), QgsProcessing.TypeVectorPoint))
def name(self):
return 'distancetonearesthubpoints'
def displayName(self):
return self.tr('Distance to nearest hub (points)')
def processAlgorithm(self, parameters, context, feedback):
if parameters[self.INPUT] == parameters[self.HUBS]:
raise QgsProcessingException(
self.tr('Same layer given for both hubs and spokes'))
point_source = self.parameterAsSource(parameters, self.INPUT, context)
if point_source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
hub_source = self.parameterAsSource(parameters, self.HUBS, context)
if hub_source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.HUBS))
fieldName = self.parameterAsString(parameters, self.FIELD, context)
units = self.UNITS[self.parameterAsEnum(parameters, self.UNIT, context)]
fields = point_source.fields()
fields.append(QgsField('HubName', QVariant.String))
fields.append(QgsField('HubDist', QVariant.Double))
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Point, point_source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
index = QgsSpatialIndex(hub_source.getFeatures(QgsFeatureRequest().setSubsetOfAttributes([]).setDestinationCrs(point_source.sourceCrs(), context.transformContext())))
distance = QgsDistanceArea()
distance.setSourceCrs(point_source.sourceCrs(), context.transformContext())
distance.setEllipsoid(context.project().ellipsoid())
# Scan source points, find nearest hub, and write to output file
features = point_source.getFeatures()
total = 100.0 / point_source.featureCount() if point_source.featureCount() else 0
for current, f in enumerate(features):
if feedback.isCanceled():
break
if not f.hasGeometry():
sink.addFeature(f, QgsFeatureSink.FastInsert)
continue
src = f.geometry().boundingBox().center()
neighbors = index.nearestNeighbor(src, 1)
ft = next(hub_source.getFeatures(QgsFeatureRequest().setFilterFid(neighbors[0]).setSubsetOfAttributes([fieldName], hub_source.fields()).setDestinationCrs(point_source.sourceCrs(), context.transformContext())))
closest = ft.geometry().boundingBox().center()
hubDist = distance.measureLine(src, closest)
if units != self.LAYER_UNITS:
hub_dist_in_desired_units = distance.convertLengthMeasurement(hubDist, units)
else:
hub_dist_in_desired_units = hubDist
attributes = f.attributes()
attributes.append(ft[fieldName])
attributes.append(hub_dist_in_desired_units)
feat = QgsFeature()
feat.setAttributes(attributes)
feat.setGeometry(QgsGeometry.fromPointXY(src))
sink.addFeature(feat, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id}
|
franky88/emperioanimesta
|
refs/heads/master
|
env/Lib/os.py
|
15
|
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
if name == 'nt' and isinstance(top, bytes):
scandir_it = _dummy_scandir(top)
else:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
entries = list(scandir_it)
except OSError as error:
if onerror is not None:
onerror(error)
return
for entry in entries:
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
yield from walk(entry.path, topdown, onerror, followlinks)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
class _DummyDirEntry:
"""Dummy implementation of DirEntry
Only used internally by os.walk(bytes). Since os.walk() doesn't need the
follow_symlinks parameter: don't implement it, always follow symbolic
links.
"""
def __init__(self, dir, name):
self.name = name
self.path = path.join(dir, name)
# Mimick FindFirstFile/FindNextFile: we should get file attributes
# while iterating on a directory
self._stat = None
self._lstat = None
try:
self.stat(follow_symlinks=False)
except OSError:
pass
def stat(self, *, follow_symlinks=True):
if follow_symlinks:
if self._stat is None:
self._stat = stat(self.path)
return self._stat
else:
if self._lstat is None:
self._lstat = stat(self.path, follow_symlinks=False)
return self._lstat
def is_dir(self):
if self._lstat is not None and not self.is_symlink():
# use the cache lstat
stat = self.stat(follow_symlinks=False)
return st.S_ISDIR(stat.st_mode)
stat = self.stat()
return st.S_ISDIR(stat.st_mode)
def is_symlink(self):
stat = self.stat(follow_symlinks=False)
return st.S_ISLNK(stat.st_mode)
def _dummy_scandir(dir):
# listdir-based implementation for bytes patches on Windows
for name in listdir(dir):
yield _DummyDirEntry(dir, name)
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
mastizada/kuma
|
refs/heads/master
|
vendor/packages/ipython/IPython/testing/plugin/setup.py
|
46
|
#!/usr/bin/env python
"""A Nose plugin to support IPython doctests.
"""
from setuptools import setup
setup(name='IPython doctest plugin',
version='0.1',
author='The IPython Team',
description = 'Nose plugin to load IPython-extended doctests',
license = 'LGPL',
py_modules = ['ipdoctest'],
entry_points = {
'nose.plugins.0.10': ['ipdoctest = ipdoctest:IPythonDoctest',
'extdoctest = ipdoctest:ExtensionDoctest',
],
},
)
|
RydrDojo/Ridr_app
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/wheel/test/test_basic.py
|
472
|
"""
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
edcomstock/werkzeug
|
refs/heads/master
|
werkzeug/testapp.py
|
364
|
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import werkzeug
from textwrap import wrap
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.utils import escape
import base64
logo = Response(base64.b64decode('''
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
='''), mimetype='image/png')
TEMPLATE = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(http://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
'''
def iter_sys_path():
if os.name == 'posix':
def strip(x):
prefix = os.path.expanduser('~')
if x.startswith(prefix):
x = '~' + x[len(prefix):]
return x
else:
strip = lambda x: x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), \
not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set,
key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = 'unknown'
python_eggs.append('<li>%s <small>[%s]</small>' % (
escape(egg.project_name),
escape(version)
))
wsgi_env = []
sorted_environ = sorted(req.environ.items(),
key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append('<tr><th>%s<td><code>%s</code>' % (
escape(str(key)),
' '.join(wrap(escape(repr(value))))
))
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append('virtual')
if expanded:
class_.append('exp')
sys_path.append('<li%s>%s' % (
class_ and ' class="%s"' % ' '.join(class_) or '',
escape(item)
))
return (TEMPLATE % {
'python_version': '<br>'.join(escape(sys.version).splitlines()),
'platform': escape(sys.platform),
'os': escape(os.name),
'api_version': sys.api_version,
'byteorder': sys.byteorder,
'werkzeug_version': werkzeug.__version__,
'python_eggs': '\n'.join(python_eggs),
'wsgi_env': '\n'.join(wsgi_env),
'sys_path': '\n'.join(sys_path)
}).encode('utf-8')
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get('resource') == 'logo':
response = logo
else:
response = Response(render_testapp(req), mimetype='text/html')
return response(environ, start_response)
if __name__ == '__main__':
from werkzeug.serving import run_simple
run_simple('localhost', 5000, test_app, use_reloader=True)
|
FlintHill/SUAS-Competition
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/cachecontrol/cache.py
|
74
|
"""
The cache object API for implementing caches. The default is a thread
safe in-memory dictionary.
"""
from threading import Lock
class BaseCache(object):
def get(self, key):
raise NotImplementedError()
def set(self, key, value):
raise NotImplementedError()
def delete(self, key):
raise NotImplementedError()
def close(self):
pass
class DictCache(BaseCache):
def __init__(self, init_dict=None):
self.lock = Lock()
self.data = init_dict or {}
def get(self, key):
return self.data.get(key, None)
def set(self, key, value):
with self.lock:
self.data.update({key: value})
def delete(self, key):
with self.lock:
if key in self.data:
self.data.pop(key)
|
Venturi/cms
|
refs/heads/master
|
env/lib/python2.7/site-packages/cms/migrations/0010_migrate_use_structure.py
|
7
|
# -*- coding: utf-8 -*-
import warnings
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db import models, migrations
def forwards(apps, schema_editor):
user_model = apps.get_model(settings.AUTH_USER_MODEL)
ph_model = apps.get_model('cms', 'Placeholder')
page_model = apps.get_model('cms', 'Page')
try:
ph_ctype = ContentType.objects.get_for_model(ph_model)
page_ctype = ContentType.objects.get_for_model(page_model)
permission, __ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
page_permission, __ = Permission.objects.get_or_create(codename='change_page', content_type=page_ctype)
for user in user_model.objects.filter(is_superuser=False, is_staff=True):
if user.user_permissions.filter(codename='change_page', content_type_id=page_ctype.pk).exists():
user.user_permissions.add(permission.pk)
for group in Group.objects.all():
if page_permission in group.permissions.all():
group.permissions.add(permission.pk)
except Exception:
warnings.warn(u'Users not migrated to use_structure permission, please add the permission manually')
def backwards(apps, schema_editor):
user_model = apps.get_model(settings.AUTH_USER_MODEL)
ph_model = apps.get_model('cms', 'Placeholder')
ph_ctype = ContentType.objects.get(app_label=ph_model._meta.app_label, model=ph_model._meta.model_name)
try:
permission, __ = Permission.objects.get_or_create(
codename='use_structure', content_type=ph_ctype, name=u"Can use Structure mode")
for user in user_model.objects.filter(is_superuser=False, is_staff=True):
user.user_permissions.remove(permission.pk)
for group in Group.objects.all():
if permission in group.permissions.all():
group.permissions.remove(permission.pk)
except Exception:
warnings.warn(u'use_structure not removed from all the users, please check the permission manually')
class Migration(migrations.Migration):
dependencies = [
('cms', '0009_merge'),
('contenttypes', '__latest__'),
]
operations = [
migrations.AlterModelOptions(
name='placeholder',
options={'permissions': (('use_structure', 'Can use Structure mode'),)},
),
migrations.RunPython(forwards, backwards)
]
|
zverevalexei/trex-http-proxy
|
refs/heads/master
|
trex_client/external_libs/pyzmq-14.5.0/python3/fedora18/64bit/zmq/sugar/__init__.py
|
33
|
"""pure-Python sugar wrappers for core 0MQ objects."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq.sugar import (
constants, context, frame, poll, socket, tracker, version
)
from zmq import error
__all__ = ['constants']
for submod in (
constants, context, error, frame, poll, socket, tracker, version
):
__all__.extend(submod.__all__)
from zmq.error import *
from zmq.sugar.context import *
from zmq.sugar.tracker import *
from zmq.sugar.socket import *
from zmq.sugar.constants import *
from zmq.sugar.frame import *
from zmq.sugar.poll import *
# from zmq.sugar.stopwatch import *
# from zmq.sugar._device import *
from zmq.sugar.version import *
|
thonkify/thonkify
|
refs/heads/master
|
src/lib/google/protobuf/type_pb2.py
|
8
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/type.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x1agoogle/protobuf/type.proto\x12\x0fgoogle.protobuf\x1a\x19google/protobuf/any.proto\x1a$google/protobuf/source_context.proto\"\xd7\x01\n\x04Type\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x06\x66ields\x18\x02 \x03(\x0b\x32\x16.google.protobuf.Field\x12\x0e\n\x06oneofs\x18\x03 \x03(\t\x12(\n\x07options\x18\x04 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x06 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x05\n\x05\x46ield\x12)\n\x04kind\x18\x01 \x01(\x0e\x32\x1b.google.protobuf.Field.Kind\x12\x37\n\x0b\x63\x61rdinality\x18\x02 \x01(\x0e\x32\".google.protobuf.Field.Cardinality\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x10\n\x08type_url\x18\x06 \x01(\t\x12\x13\n\x0boneof_index\x18\x07 \x01(\x05\x12\x0e\n\x06packed\x18\x08 \x01(\x08\x12(\n\x07options\x18\t \x03(\x0b\x32\x17.google.protobuf.Option\x12\x11\n\tjson_name\x18\n \x01(\t\x12\x15\n\rdefault_value\x18\x0b \x01(\t\"\xc8\x02\n\x04Kind\x12\x10\n\x0cTYPE_UNKNOWN\x10\x00\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"t\n\x0b\x43\x61rdinality\x12\x17\n\x13\x43\x41RDINALITY_UNKNOWN\x10\x00\x12\x18\n\x14\x43\x41RDINALITY_OPTIONAL\x10\x01\x12\x18\n\x14\x43\x41RDINALITY_REQUIRED\x10\x02\x12\x18\n\x14\x43\x41RDINALITY_REPEATED\x10\x03\"\xce\x01\n\x04\x45num\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\tenumvalue\x18\x02 \x03(\x0b\x32\x1a.google.protobuf.EnumValue\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x36\n\x0esource_context\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12\'\n\x06syntax\x18\x05 \x01(\x0e\x32\x17.google.protobuf.Syntax\"S\n\tEnumValue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\";\n\x06Option\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any*.\n\x06Syntax\x12\x11\n\rSYNTAX_PROTO2\x10\x00\x12\x11\n\rSYNTAX_PROTO3\x10\x01\x42L\n\x13\x63om.google.protobufB\tTypeProtoP\x01\xa0\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_source__context__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SYNTAX = _descriptor.EnumDescriptor(
name='Syntax',
full_name='google.protobuf.Syntax',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO2', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYNTAX_PROTO3', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1413,
serialized_end=1459,
)
_sym_db.RegisterEnumDescriptor(_SYNTAX)
Syntax = enum_type_wrapper.EnumTypeWrapper(_SYNTAX)
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
_FIELD_KIND = _descriptor.EnumDescriptor(
name='Kind',
full_name='google.protobuf.Field.Kind',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=18, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=610,
serialized_end=938,
)
_sym_db.RegisterEnumDescriptor(_FIELD_KIND)
_FIELD_CARDINALITY = _descriptor.EnumDescriptor(
name='Cardinality',
full_name='google.protobuf.Field.Cardinality',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='CARDINALITY_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_OPTIONAL', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REQUIRED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CARDINALITY_REPEATED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=940,
serialized_end=1056,
)
_sym_db.RegisterEnumDescriptor(_FIELD_CARDINALITY)
_TYPE = _descriptor.Descriptor(
name='Type',
full_name='google.protobuf.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Type.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fields', full_name='google.protobuf.Type.fields', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneofs', full_name='google.protobuf.Type.oneofs', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Type.options', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Type.source_context', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Type.syntax', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=328,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='google.protobuf.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='kind', full_name='google.protobuf.Field.kind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cardinality', full_name='google.protobuf.Field.cardinality', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.Field.number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Field.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type_url', full_name='google.protobuf.Field.type_url', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oneof_index', full_name='google.protobuf.Field.oneof_index', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.Field.packed', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Field.options', index=7,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='json_name', full_name='google.protobuf.Field.json_name', index=8,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.Field.default_value', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELD_KIND,
_FIELD_CARDINALITY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=331,
serialized_end=1056,
)
_ENUM = _descriptor.Descriptor(
name='Enum',
full_name='google.protobuf.Enum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Enum.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enumvalue', full_name='google.protobuf.Enum.enumvalue', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.Enum.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_context', full_name='google.protobuf.Enum.source_context', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='syntax', full_name='google.protobuf.Enum.syntax', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1059,
serialized_end=1265,
)
_ENUMVALUE = _descriptor.Descriptor(
name='EnumValue',
full_name='google.protobuf.EnumValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValue.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValue.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValue.options', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1267,
serialized_end=1350,
)
_OPTION = _descriptor.Descriptor(
name='Option',
full_name='google.protobuf.Option',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.Option.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.Option.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1352,
serialized_end=1411,
)
_TYPE.fields_by_name['fields'].message_type = _FIELD
_TYPE.fields_by_name['options'].message_type = _OPTION
_TYPE.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_TYPE.fields_by_name['syntax'].enum_type = _SYNTAX
_FIELD.fields_by_name['kind'].enum_type = _FIELD_KIND
_FIELD.fields_by_name['cardinality'].enum_type = _FIELD_CARDINALITY
_FIELD.fields_by_name['options'].message_type = _OPTION
_FIELD_KIND.containing_type = _FIELD
_FIELD_CARDINALITY.containing_type = _FIELD
_ENUM.fields_by_name['enumvalue'].message_type = _ENUMVALUE
_ENUM.fields_by_name['options'].message_type = _OPTION
_ENUM.fields_by_name['source_context'].message_type = google_dot_protobuf_dot_source__context__pb2._SOURCECONTEXT
_ENUM.fields_by_name['syntax'].enum_type = _SYNTAX
_ENUMVALUE.fields_by_name['options'].message_type = _OPTION
_OPTION.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['Type'] = _TYPE
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
DESCRIPTOR.message_types_by_name['Enum'] = _ENUM
DESCRIPTOR.message_types_by_name['EnumValue'] = _ENUMVALUE
DESCRIPTOR.message_types_by_name['Option'] = _OPTION
DESCRIPTOR.enum_types_by_name['Syntax'] = _SYNTAX
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), dict(
DESCRIPTOR = _TYPE,
__module__ = 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Type)
))
_sym_db.RegisterMessage(Type)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), dict(
DESCRIPTOR = _FIELD,
__module__ = 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Field)
))
_sym_db.RegisterMessage(Field)
Enum = _reflection.GeneratedProtocolMessageType('Enum', (_message.Message,), dict(
DESCRIPTOR = _ENUM,
__module__ = 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Enum)
))
_sym_db.RegisterMessage(Enum)
EnumValue = _reflection.GeneratedProtocolMessageType('EnumValue', (_message.Message,), dict(
DESCRIPTOR = _ENUMVALUE,
__module__ = 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValue)
))
_sym_db.RegisterMessage(EnumValue)
Option = _reflection.GeneratedProtocolMessageType('Option', (_message.Message,), dict(
DESCRIPTOR = _OPTION,
__module__ = 'google.protobuf.type_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Option)
))
_sym_db.RegisterMessage(Option)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\tTypeProtoP\001\240\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
|
Rhadow/leetcode
|
refs/heads/master
|
lintcode/Medium/140_Fast_Power.py
|
1
|
class Solution:
"""
@param a, b, n: 32bit integers
@return: An integer
"""
def fastPower(self, a, b, n):
# write your code here
res = 1
while (n > 0):
if (n % 2 == 1):
res *= a % b
n /= 2
a *= a % b
return (res % b);
|
jesopo/bitbot
|
refs/heads/master
|
src/utils/datetime/common.py
|
1
|
import datetime as _datetime
import enum
ISO8601_FORMAT_DT = "%Y-%m-%dT%H:%M:%S"
ISO8601_FORMAT_TZ = "%z"
TIME_HUMAN = "%H:%M:%S"
DATE_HUMAN = "%Y-%m-%d"
class TimeSpec(enum.Enum):
NORMAL = 1
MILLISECOND = 2
TIME_SECOND = 1
TIME_MINUTE = TIME_SECOND*60
TIME_HOUR = TIME_MINUTE*60
TIME_DAY = TIME_HOUR*24
TIME_WEEK = TIME_DAY*7
SECONDS_MINUTES = 60
SECONDS_HOURS = SECONDS_MINUTES*60
SECONDS_DAYS = SECONDS_HOURS*24
SECONDS_WEEKS = SECONDS_DAYS*7
UNIT_MINIMUM = 6
UNIT_SECOND = 5
UNIT_MINUTE = 4
UNIT_HOUR = 3
UNIT_DAY = 2
UNIT_WEEK = 1
UNIT_MONTH = 1
UNIT_YEAR = 1
def utcnow() -> _datetime.datetime:
return _datetime.datetime.utcnow().replace(tzinfo=_datetime.timezone.utc)
def timestamp(seconds: float) -> _datetime.datetime:
return _datetime.datetime.fromtimestamp(seconds).replace(
tzinfo=_datetime.timezone.utc)
def seconds_since(dt: _datetime.datetime) -> float:
return (utcnow()-dt).total_seconds()
class RelativeDirection(enum.Enum):
FORWARD = 1
BACKWARD = 2
|
sam-tsai/django-old
|
refs/heads/master
|
tests/modeltests/transactions/models.py
|
11
|
"""
15. Transactions
Django handles transactions in three different ways. The default is to commit
each transaction upon a write, but you can decorate a function to get
commit-on-success behavior. Alternatively, you can manage the transaction
manually.
"""
from django.db import models, DEFAULT_DB_ALIAS
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
class Meta:
ordering = ('first_name', 'last_name')
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
__test__ = {'API_TESTS':"""
>>> from django.db import connection, transaction
"""}
from django.conf import settings
building_docs = getattr(settings, 'BUILDING_DOCS', False)
if building_docs or settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] != 'django.db.backends.mysql':
__test__['API_TESTS'] += """
# the default behavior is to autocommit after each save() action
>>> def create_a_reporter_then_fail(first, last):
... a = Reporter(first_name=first, last_name=last)
... a.save()
... raise Exception("I meant to do that")
...
>>> create_a_reporter_then_fail("Alice", "Smith")
Traceback (most recent call last):
...
Exception: I meant to do that
# The object created before the exception still exists
>>> Reporter.objects.all()
[<Reporter: Alice Smith>]
# the autocommit decorator works exactly the same as the default behavior
>>> autocomitted_create_then_fail = transaction.autocommit(create_a_reporter_then_fail)
>>> autocomitted_create_then_fail("Ben", "Jones")
Traceback (most recent call last):
...
Exception: I meant to do that
# Same behavior as before
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>]
# the autocommit decorator also works with a using argument
>>> using_autocomitted_create_then_fail = transaction.autocommit(using='default')(create_a_reporter_then_fail)
>>> using_autocomitted_create_then_fail("Carol", "Doe")
Traceback (most recent call last):
...
Exception: I meant to do that
# Same behavior as before
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>, <Reporter: Carol Doe>]
# With the commit_on_success decorator, the transaction is only committed if the
# function doesn't throw an exception
>>> committed_on_success = transaction.commit_on_success(create_a_reporter_then_fail)
>>> committed_on_success("Dirk", "Gently")
Traceback (most recent call last):
...
Exception: I meant to do that
# This time the object never got saved
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>, <Reporter: Carol Doe>]
# commit_on_success decorator also works with a using argument
>>> using_committed_on_success = transaction.commit_on_success(using='default')(create_a_reporter_then_fail)
>>> using_committed_on_success("Dirk", "Gently")
Traceback (most recent call last):
...
Exception: I meant to do that
# This time the object never got saved
>>> Reporter.objects.all()
[<Reporter: Alice Smith>, <Reporter: Ben Jones>, <Reporter: Carol Doe>]
# If there aren't any exceptions, the data will get saved
>>> def remove_a_reporter():
... r = Reporter.objects.get(first_name="Alice")
... r.delete()
...
>>> remove_comitted_on_success = transaction.commit_on_success(remove_a_reporter)
>>> remove_comitted_on_success()
>>> Reporter.objects.all()
[<Reporter: Ben Jones>, <Reporter: Carol Doe>]
# You can manually manage transactions if you really want to, but you
# have to remember to commit/rollback
>>> def manually_managed():
... r = Reporter(first_name="Dirk", last_name="Gently")
... r.save()
... transaction.commit()
>>> manually_managed = transaction.commit_manually(manually_managed)
>>> manually_managed()
>>> Reporter.objects.all()
[<Reporter: Ben Jones>, <Reporter: Carol Doe>, <Reporter: Dirk Gently>]
# If you forget, you'll get bad errors
>>> def manually_managed_mistake():
... r = Reporter(first_name="Edward", last_name="Woodward")
... r.save()
... # oops, I forgot to commit/rollback!
>>> manually_managed_mistake = transaction.commit_manually(manually_managed_mistake)
>>> manually_managed_mistake()
Traceback (most recent call last):
...
TransactionManagementError: Transaction managed block ended with pending COMMIT/ROLLBACK
# commit_manually also works with a using argument
>>> using_manually_managed_mistake = transaction.commit_manually(using='default')(manually_managed_mistake)
>>> using_manually_managed_mistake()
Traceback (most recent call last):
...
TransactionManagementError: Transaction managed block ended with pending COMMIT/ROLLBACK
"""
# Regression for #11900: If a function wrapped by commit_on_success writes a
# transaction that can't be committed, that transaction should be rolled back.
# The bug is only visible using the psycopg2 backend, though
# the fix is generally a good idea.
pgsql_backends = ('django.db.backends.postgresql_psycopg2', 'postgresql_psycopg2',)
if building_docs or settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'] in pgsql_backends:
__test__['API_TESTS'] += """
>>> def execute_bad_sql():
... cursor = connection.cursor()
... cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
... transaction.set_dirty()
...
>>> execute_bad_sql = transaction.commit_on_success(execute_bad_sql)
>>> execute_bad_sql()
Traceback (most recent call last):
...
IntegrityError: null value in column "email" violates not-null constraint
<BLANKLINE>
>>> transaction.rollback()
"""
|
barryrobison/arsenalsuite
|
refs/heads/master
|
cpp/lib/PyQt4/examples/itemviews/editabletreemodel/ui_mainwindow.py
|
15
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Fri Mar 27 22:12:50 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(573, 468)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setMargin(0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName("vboxlayout")
self.view = QtGui.QTreeView(self.centralwidget)
self.view.setAlternatingRowColors(True)
self.view.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.view.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.view.setAnimated(False)
self.view.setAllColumnsShowFocus(True)
self.view.setObjectName("view")
self.vboxlayout.addWidget(self.view)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 573, 31))
self.menubar.setObjectName("menubar")
self.fileMenu = QtGui.QMenu(self.menubar)
self.fileMenu.setObjectName("fileMenu")
self.actionsMenu = QtGui.QMenu(self.menubar)
self.actionsMenu.setObjectName("actionsMenu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.exitAction = QtGui.QAction(MainWindow)
self.exitAction.setObjectName("exitAction")
self.insertRowAction = QtGui.QAction(MainWindow)
self.insertRowAction.setObjectName("insertRowAction")
self.removeRowAction = QtGui.QAction(MainWindow)
self.removeRowAction.setObjectName("removeRowAction")
self.insertColumnAction = QtGui.QAction(MainWindow)
self.insertColumnAction.setObjectName("insertColumnAction")
self.removeColumnAction = QtGui.QAction(MainWindow)
self.removeColumnAction.setObjectName("removeColumnAction")
self.insertChildAction = QtGui.QAction(MainWindow)
self.insertChildAction.setObjectName("insertChildAction")
self.fileMenu.addAction(self.exitAction)
self.actionsMenu.addAction(self.insertRowAction)
self.actionsMenu.addAction(self.insertColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.removeRowAction)
self.actionsMenu.addAction(self.removeColumnAction)
self.actionsMenu.addSeparator()
self.actionsMenu.addAction(self.insertChildAction)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.actionsMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Editable Tree Model", None, QtGui.QApplication.UnicodeUTF8))
self.fileMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.actionsMenu.setTitle(QtGui.QApplication.translate("MainWindow", "&Actions", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.exitAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+Q", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Row", None, QtGui.QApplication.UnicodeUTF8))
self.insertRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, R", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Row", None, QtGui.QApplication.UnicodeUTF8))
self.removeRowAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, R", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Column", None, QtGui.QApplication.UnicodeUTF8))
self.insertColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+I, C", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setText(QtGui.QApplication.translate("MainWindow", "Remove Column", None, QtGui.QApplication.UnicodeUTF8))
self.removeColumnAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R, C", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setText(QtGui.QApplication.translate("MainWindow", "Insert Child", None, QtGui.QApplication.UnicodeUTF8))
self.insertChildAction.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+N", None, QtGui.QApplication.UnicodeUTF8))
import editabletreemodel_rc
|
lizaifang/zinnia
|
refs/heads/master
|
python/test.py
|
12
|
#!/usr/bin/python
import zinnia
input = "(character (width 1000)(height 1000)(strokes ((243 273)(393 450))((700 253)(343 486)(280 716)(393 866)(710 880))))";
try:
s = zinnia.Character()
r = zinnia.Recognizer()
r.open("/usr/local/lib/zinnia/model/tomoe/handwriting-ja.model")
if (not s.parse(input)):
print s.what()
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
s.clear();
s.set_width(300)
s.set_height(300)
s.add(0, 51, 29)
s.add(0, 117, 41)
s.add(1, 99, 65)
s.add(1, 219, 77)
s.add(2, 27, 131)
s.add(2, 261, 131)
s.add(3, 129, 17)
s.add(3, 57, 203)
s.add(4, 111, 71)
s.add(4, 219, 173)
s.add(5, 81, 161)
s.add(5, 93, 281)
s.add(6, 99, 167)
s.add(6, 207, 167)
s.add(6, 189, 245)
s.add(7, 99, 227)
s.add(7, 189, 227)
s.add(8, 111, 257)
s.add(8, 189, 245)
result = r.classify(s, 10)
size = result.size()
for i in range(0, (size - 1)):
print "%s\t%f" % (result.value(i), result.score(i))
except RuntimeError, e:
print "RuntimeError: ", e,
|
timm/timmnix
|
refs/heads/master
|
pypy3-v5.5.0-linux64/lib-python/3/test/test_email/test_policy.py
|
34
|
import io
import types
import textwrap
import unittest
import email.policy
import email.parser
import email.generator
from email import headerregistry
def make_defaults(base_defaults, differences):
defaults = base_defaults.copy()
defaults.update(differences)
return defaults
class PolicyAPITests(unittest.TestCase):
longMessage = True
# Base default values.
compat32_defaults = {
'max_line_length': 78,
'linesep': '\n',
'cte_type': '8bit',
'raise_on_defect': False,
}
# These default values are the ones set on email.policy.default.
# If any of these defaults change, the docs must be updated.
policy_defaults = compat32_defaults.copy()
policy_defaults.update({
'raise_on_defect': False,
'header_factory': email.policy.EmailPolicy.header_factory,
'refold_source': 'long',
})
# For each policy under test, we give here what we expect the defaults to
# be for that policy. The second argument to make defaults is the
# difference between the base defaults and that for the particular policy.
new_policy = email.policy.EmailPolicy()
policies = {
email.policy.compat32: make_defaults(compat32_defaults, {}),
email.policy.default: make_defaults(policy_defaults, {}),
email.policy.SMTP: make_defaults(policy_defaults,
{'linesep': '\r\n'}),
email.policy.HTTP: make_defaults(policy_defaults,
{'linesep': '\r\n',
'max_line_length': None}),
email.policy.strict: make_defaults(policy_defaults,
{'raise_on_defect': True}),
new_policy: make_defaults(policy_defaults, {}),
}
# Creating a new policy creates a new header factory. There is a test
# later that proves this.
policies[new_policy]['header_factory'] = new_policy.header_factory
def test_defaults(self):
for policy, expected in self.policies.items():
for attr, value in expected.items():
self.assertEqual(getattr(policy, attr), value,
("change {} docs/docstrings if defaults have "
"changed").format(policy))
def test_all_attributes_covered(self):
for policy, expected in self.policies.items():
for attr in dir(policy):
if (attr.startswith('_') or
isinstance(getattr(email.policy.EmailPolicy, attr),
types.FunctionType)):
continue
else:
self.assertIn(attr, expected,
"{} is not fully tested".format(attr))
def test_abc(self):
with self.assertRaises(TypeError) as cm:
email.policy.Policy()
msg = str(cm.exception)
abstract_methods = ('fold',
'fold_binary',
'header_fetch_parse',
'header_source_parse',
'header_store_parse')
for method in abstract_methods:
self.assertIn(method, msg)
def test_policy_is_immutable(self):
for policy, defaults in self.policies.items():
for attr in defaults:
with self.assertRaisesRegex(AttributeError, attr+".*read-only"):
setattr(policy, attr, None)
with self.assertRaisesRegex(AttributeError, 'no attribute.*foo'):
policy.foo = None
def test_set_policy_attrs_when_cloned(self):
# None of the attributes has a default value of None, so we set them
# all to None in the clone call and check that it worked.
for policyclass, defaults in self.policies.items():
testattrdict = {attr: None for attr in defaults}
policy = policyclass.clone(**testattrdict)
for attr in defaults:
self.assertIsNone(getattr(policy, attr))
def test_reject_non_policy_keyword_when_called(self):
for policyclass in self.policies:
with self.assertRaises(TypeError):
policyclass(this_keyword_should_not_be_valid=None)
with self.assertRaises(TypeError):
policyclass(newtline=None)
def test_policy_addition(self):
expected = self.policy_defaults.copy()
p1 = email.policy.default.clone(max_line_length=100)
p2 = email.policy.default.clone(max_line_length=50)
added = p1 + p2
expected.update(max_line_length=50)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = p2 + p1
expected.update(max_line_length=100)
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
added = added + email.policy.default
for attr, value in expected.items():
self.assertEqual(getattr(added, attr), value)
def test_register_defect(self):
class Dummy:
def __init__(self):
self.defects = []
obj = Dummy()
defect = object()
policy = email.policy.EmailPolicy()
policy.register_defect(obj, defect)
self.assertEqual(obj.defects, [defect])
defect2 = object()
policy.register_defect(obj, defect2)
self.assertEqual(obj.defects, [defect, defect2])
class MyObj:
def __init__(self):
self.defects = []
class MyDefect(Exception):
pass
def test_handle_defect_raises_on_strict(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
email.policy.strict.handle_defect(foo, defect)
def test_handle_defect_registers_defect(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
email.policy.default.handle_defect(foo, defect1)
self.assertEqual(foo.defects, [defect1])
defect2 = self.MyDefect("two")
email.policy.default.handle_defect(foo, defect2)
self.assertEqual(foo.defects, [defect1, defect2])
class MyPolicy(email.policy.EmailPolicy):
defects = None
def __init__(self, *args, **kw):
super().__init__(*args, defects=[], **kw)
def register_defect(self, obj, defect):
self.defects.append(defect)
def test_overridden_register_defect_still_raises(self):
foo = self.MyObj()
defect = self.MyDefect("the telly is broken")
with self.assertRaisesRegex(self.MyDefect, "the telly is broken"):
self.MyPolicy(raise_on_defect=True).handle_defect(foo, defect)
def test_overriden_register_defect_works(self):
foo = self.MyObj()
defect1 = self.MyDefect("one")
my_policy = self.MyPolicy()
my_policy.handle_defect(foo, defect1)
self.assertEqual(my_policy.defects, [defect1])
self.assertEqual(foo.defects, [])
defect2 = self.MyDefect("two")
my_policy.handle_defect(foo, defect2)
self.assertEqual(my_policy.defects, [defect1, defect2])
self.assertEqual(foo.defects, [])
def test_default_header_factory(self):
h = email.policy.default.header_factory('Test', 'test')
self.assertEqual(h.name, 'Test')
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
self.assertIsInstance(h, headerregistry.BaseHeader)
class Foo:
parse = headerregistry.UnstructuredHeader.parse
def test_each_Policy_gets_unique_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = email.policy.EmailPolicy()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
self.assertNotIsInstance(h, headerregistry.UnstructuredHeader)
h = policy2.header_factory('foo', 'test')
self.assertNotIsInstance(h, self.Foo)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_clone_copies_factory(self):
policy1 = email.policy.EmailPolicy()
policy2 = policy1.clone()
policy1.header_factory.map_to_type('foo', self.Foo)
h = policy1.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
h = policy2.header_factory('foo', 'test')
self.assertIsInstance(h, self.Foo)
def test_new_factory_overrides_default(self):
mypolicy = email.policy.EmailPolicy()
myfactory = mypolicy.header_factory
newpolicy = mypolicy + email.policy.strict
self.assertEqual(newpolicy.header_factory, myfactory)
newpolicy = email.policy.strict + mypolicy
self.assertEqual(newpolicy.header_factory, myfactory)
def test_adding_default_policies_preserves_default_factory(self):
newpolicy = email.policy.default + email.policy.strict
self.assertEqual(newpolicy.header_factory,
email.policy.EmailPolicy.header_factory)
self.assertEqual(newpolicy.__dict__, {'raise_on_defect': True})
# XXX: Need subclassing tests.
# For adding subclassed objects, make sure the usual rules apply (subclass
# wins), but that the order still works (right overrides left).
class TestPolicyPropagation(unittest.TestCase):
# The abstract methods are used by the parser but not by the wrapper
# functions that call it, so if the exception gets raised we know that the
# policy was actually propagated all the way to feedparser.
class MyPolicy(email.policy.Policy):
def badmethod(self, *args, **kw):
raise Exception("test")
fold = fold_binary = header_fetch_parser = badmethod
header_source_parse = header_store_parse = badmethod
def test_message_from_string(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_string("Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_bytes(self):
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_bytes(b"Subject: test\n\n",
policy=self.MyPolicy)
def test_message_from_file(self):
f = io.StringIO('Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_file(f, policy=self.MyPolicy)
def test_message_from_binary_file(self):
f = io.BytesIO(b'Subject: test\n\n')
with self.assertRaisesRegex(Exception, "^test$"):
email.message_from_binary_file(f, policy=self.MyPolicy)
# These are redundant, but we need them for black-box completeness.
def test_parser(self):
p = email.parser.Parser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsestr('Subject: test\n\n')
def test_bytes_parser(self):
p = email.parser.BytesParser(policy=self.MyPolicy)
with self.assertRaisesRegex(Exception, "^test$"):
p.parsebytes(b'Subject: test\n\n')
# Now that we've established that all the parse methods get the
# policy in to feedparser, we can use message_from_string for
# the rest of the propagation tests.
def _make_msg(self, source='Subject: test\n\n', policy=None):
self.policy = email.policy.default.clone() if policy is None else policy
return email.message_from_string(source, policy=self.policy)
def test_parser_propagates_policy_to_message(self):
msg = self._make_msg()
self.assertIs(msg.policy, self.policy)
def test_parser_propagates_policy_to_sub_messages(self):
msg = self._make_msg(textwrap.dedent("""\
Subject: mime test
MIME-Version: 1.0
Content-Type: multipart/mixed, boundary="XXX"
--XXX
Content-Type: text/plain
test
--XXX
Content-Type: text/plain
test2
--XXX--
"""))
for part in msg.walk():
self.assertIs(part.policy, self.policy)
def test_message_policy_propagates_to_generator(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
s = io.StringIO()
g = email.generator.Generator(s)
g.flatten(msg)
self.assertEqual(s.getvalue(), "Subject: testXTo: fooXX")
def test_message_policy_used_by_as_string(self):
msg = self._make_msg("Subject: test\nTo: foo\n\n",
policy=email.policy.default.clone(linesep='X'))
self.assertEqual(msg.as_string(), "Subject: testXTo: fooXX")
if __name__ == '__main__':
unittest.main()
|
jlmadurga/microbot
|
refs/heads/master
|
permabots/views/hooks/kik_hook.py
|
2
|
from rest_framework.views import APIView
from permabots.serializers import KikMessageSerializer
from permabots.models import KikBot, KikUser, KikChat, KikMessage
from rest_framework.response import Response
from rest_framework import status
import logging
from permabots.tasks import handle_message
from datetime import datetime
from permabots import caching
import sys
import traceback
logger = logging.getLogger(__name__)
class OnlyTextMessages(Exception):
pass
class KikHookView(APIView):
"""
View for Kik webhook.
"""
def create_user(self, username):
try:
user = caching.get_or_set(KikUser, username)
except KikUser.DoesNotExist:
user, _ = KikUser.objects.get_or_create(username=username)
return user
def create_message(self, serializer, bot):
sender = self.create_user(serializer.data['from'])
try:
chat = caching.get_or_set(KikChat, serializer.data['chatId'])
except KikChat.DoesNotExist:
chat, _ = KikChat.objects.get_or_create(id=serializer.data['chatId'])
if 'participants' in serializer.data:
for participant in serializer.data['participants']:
chat.participants.add(self.create_user(participant))
if serializer.data['type'] == 'start-chatting':
body = "/start"
elif serializer.data['type'] == 'scan-data':
body = "/start"
else:
body = serializer.data['body']
message, _ = KikMessage.objects.get_or_create(message_id=serializer.data['id'],
from_user=sender,
timestamp=datetime.fromtimestamp(serializer.data['timestamp']),
chat=chat,
body=body)
caching.set(message)
return message
def accepted_types(self, serializer):
return serializer.data['type'] == 'start-chatting' or serializer.data['type'] == 'text' or serializer.data['type'] == 'scan-data'
def post(self, request, hook_id):
"""
Process Kik webhook:
1. Get an enabled Kik bot
2. Verify Kik signature
3. Serialize each message
4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>`
5. Delay each message processing to a task
6. Response provider
"""
try:
bot = caching.get_or_set(KikBot, hook_id)
except KikBot.DoesNotExist:
logger.warning("Hook id %s not associated to a bot" % hook_id)
return Response(status=status.HTTP_404_NOT_FOUND)
signature = request.META.get('HTTP_X_KIK_SIGNATURE')
if signature:
signature.encode('utf-8')
if not bot._bot.verify_signature(signature, request.stream.body):
logger.debug("Kik Bot data %s not verified %s" % (request.data, signature))
return Response(status=403)
logger.debug("Kik Bot data %s verified" % (request.data))
for kik_message in request.data['messages']:
serializer = KikMessageSerializer(data=kik_message)
logger.debug("Kik message %s serialized" % (kik_message))
if serializer.is_valid():
try:
if not self.accepted_types(serializer):
raise OnlyTextMessages
message = self.create_message(serializer, bot)
if bot.enabled:
logger.debug("Kik Bot %s attending request %s" % (bot, kik_message))
handle_message.delay(message.id, bot.id)
else:
logger.error("Message %s ignored by disabled bot %s" % (message, bot))
except OnlyTextMessages:
logger.warning("Not text message %s for bot %s" % (kik_message, hook_id))
return Response(status=status.HTTP_200_OK)
except:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
logger.error("Error processing %s for bot %s" % (kik_message, hook_id))
return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
logger.error("Validation error: %s from kik message %s" % (serializer.errors, kik_message))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
|
jounex/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/template_tests/test_unicode.py
|
110
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Template, TemplateEncodingError, Context
from django.utils.safestring import SafeData
from django.utils import six
from django.utils.unittest import TestCase
class UnicodeTests(TestCase):
def test_template(self):
# Templates can be created from unicode strings.
t1 = Template('ŠĐĆŽćžšđ {{ var }}')
# Templates can also be created from bytestrings. These are assumed to
# be encoded using UTF-8.
s = b'\xc5\xa0\xc4\x90\xc4\x86\xc5\xbd\xc4\x87\xc5\xbe\xc5\xa1\xc4\x91 {{ var }}'
t2 = Template(s)
s = b'\x80\xc5\xc0'
self.assertRaises(TemplateEncodingError, Template, s)
# Contexts can be constructed from unicode or UTF-8 bytestrings.
c1 = Context({b"var": b"foo"})
c2 = Context({"var": b"foo"})
c3 = Context({b"var": "Đđ"})
c4 = Context({"var": b"\xc4\x90\xc4\x91"})
# Since both templates and all four contexts represent the same thing,
# they all render the same (and are returned as unicode objects and
# "safe" objects as well, for auto-escaping purposes).
self.assertEqual(t1.render(c3), t2.render(c3))
self.assertIsInstance(t1.render(c3), six.text_type)
self.assertIsInstance(t1.render(c3), SafeData)
|
goodwinnk/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/utils/dateformat.py
|
234
|
"""
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDEfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = getattr(dt, 'tzinfo', None)
if hasattr(self.data, 'hour') and not self.timezone:
self.timezone = LocalTimezone(dt)
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200'"
seconds = self.Z()
return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# Only days can be negative, so negative offsets have days=-1 and
# seconds positive. Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
alorchhota/bioinformatics-algorithms-1
|
refs/heads/master
|
week9/code/7.SuffixArrayConstruction.py
|
1
|
import os
import csv
import sys
import re
import importlib
import networkx as nx
# settings
#curDir = 'E:/GitHub/bioinformatics-algorithms-1/week9'
curDir = 'D:/GitHub/bioinformatics-algorithms-1/week9'
inputFile = './data/7.SuffixArrayConstruction-2.txt'
inputFile = 'C:/Users/Ashis/Downloads/dataset_310_2.txt'
outputFile = './results/7.SuffixArrayConstruction.txt'
# set current directory
os.chdir(curDir)
## read input
with open(inputFile) as f:
inputs = f.readlines()
genome = inputs[0].strip()
#genome = 'GFEDCBA'
#genome = genome[0:50000]
genomeLen = len(genome)
suffix = lambda idx: genome[idx:]
## function to find longest common prefix
def longestCommonPrefix(str1, str2):
n = min([len(str1), len(str2)])
i = 0
while i < n and str1[i]==str2[i]:
i += 1
prefix = str1[0:i]
return prefix
## function to build a suffix tree from a genome
def suffixTree(genome):
## build suffix tree
g = nx.DiGraph()
g.add_node(1) # add root with id 1
# two required function
neighborsWithLabelPrefix = lambda node, prefix: [e[1] for e in g.edges_iter(node, data=True) if genome[e[2]['labelIdx'][0]] == prefix]
getNewNode = lambda : len(g.nodes())+1
#print(longestCommonPrefix('abc','ab'))
genomeLen = len(genome)
for idx in range(genomeLen):
# traverse as long as pattern matches
curNode = 1
i = idx
while(i < genomeLen):
# find the edge with the first prefix character
nextNode = neighborsWithLabelPrefix(curNode, genome[i])
# if there is no edge with the first prefix character,
# it must be a new edge with the rest of the string.
if len(nextNode) == 0:
newNode = getNewNode()
g.add_edge(curNode, newNode, {'labelIdx':[i,genomeLen]})
g.node[newNode]['startIdx'] = idx
break
# get the edge label
nextNode = nextNode[0]
edgeLabelIndices = g.edge[curNode][nextNode]['labelIdx']
edgeLabel = genome[edgeLabelIndices[0]:edgeLabelIndices[1]]
edgeLabelLen = len(edgeLabel)
# if the rest of the string starts with edgeLabel,
# move to the next node
if genome[i:i+edgeLabelLen] == edgeLabel:
curNode = nextNode
i += edgeLabelLen
else:
# edgeLabel matches partially
prefix = longestCommonPrefix(genome[i:i+edgeLabelLen], edgeLabel)
prefixLen = len(prefix)
# create two new node, one intermediate, another for unmatched string
intermediateNode = getNewNode()
unmatchedNode = intermediateNode + 1
# remove existing edge from curNode to nextNode
g.remove_edge(curNode, nextNode)
# add edge from curNode to intermediateNode
g.add_edge(curNode, intermediateNode, {'labelIdx':(edgeLabelIndices[0],edgeLabelIndices[0]+prefixLen)})
# add edge from intermediateNode to nextNode
g.add_edge(intermediateNode, nextNode, {'labelIdx':(edgeLabelIndices[0]+prefixLen, edgeLabelIndices[1])})
# add edge from intermediateNode to unmatchedNode
g.add_edge(intermediateNode, unmatchedNode, {'labelIdx':(i+prefixLen, genomeLen)})
g.node[unmatchedNode]['startIdx'] = idx
break
return g
#tree = suffixTree(genome)
#indexList = list(range(genomeLen))
#indexList.sort(key=lambda x: genome[x:])
#genome = 'AACGATAGCGGTAGA$'
def merge_sort(m):
if len(m) <= 1:
return m
middle = int(len(m) / 2)
left = m[:middle]
right = m[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
def merge(left, right):
result = []
left_idx, right_idx = 0, 0
while left_idx < len(left) and right_idx < len(right):
# change the direction of this comparison to change the direction of the sort
if suffix(left[left_idx]) <= suffix(right[right_idx]):
result.append(left[left_idx])
left_idx += 1
else:
result.append(right[right_idx])
right_idx += 1
if left:
result.extend(left[left_idx:])
if right:
result.extend(right[right_idx:])
return result
indexList = list(range(genomeLen))
sortedList = merge_sort(indexList)
#print(sortedList)
## output
with open(outputFile, "w") as f:
f.writelines(', '.join([str(i) for i in sortedList]))
print('done.')
|
chachan/nodeshot
|
refs/heads/master
|
nodeshot/core/websockets/urls.py
|
8
|
from django.conf.urls import patterns, url
from django.conf import settings
urlpatterns = patterns('',)
if settings.DEBUG:
urlpatterns += patterns('nodeshot.core.websockets.views',
url(r'^test/$', 'test', name='websocket_test'),
)
|
vasilenkomike/xen-api
|
refs/heads/master
|
ocaml/idl/binding_sanity_checks/create_vm.py
|
34
|
#!/usr/bin/env python
import XenAPI
import provision
import sanitychecklib
#log in
session=sanitychecklib.getsession()
sx=session.xenapi
#find the template for Debian Etch
vms = sx.VM.get_all()
print "Server", sanitychecklib.server, "has ", len(vms), "VMs",
etch_template_list = [x for x in vms if (('Etch' in sx.VM.get_name_label(x)) and (sx.VM.get_is_a_template(x)))]
print "including", len(etch_template_list), "template for 'Etch'"
etch_template=etch_template_list[0]
print "We pick the first template: "
print "name: ", sx.VM.get_name_label(etch_template)
print "description:", sx.VM.get_name_description(etch_template)
#Make a copy of the template
print "Cloning..."
clone=sx.VM.clone(etch_template, sanitychecklib.test_vm_name)
#find out where to put the new machine's disks by getting the first pool (I don't think there can be more than one)
#and using its default storage repository
pool_list=sx.pool.get_all()
if len(pool_list)==1:
print "There's only one pool"
else:
print "There are", len(pool_list), "pools"
print "We pick the first one:"
first_pool=pool_list[0]
print "name:", sx.pool.get_name_label(first_pool)
print "description: ", sx.pool.get_name_description(first_pool)
default_SR=sx.pool.get_default_SR(first_pool)
print "The default SR is: "
print "Name:", sx.SR.get_name_label(default_SR)
print "Description:", sx.SR.get_name_description(default_SR)
#set the new copy to have its disks in the default SR
#this is a debian template specific hack which allows us to create Debian VMs easily
spec=provision.getProvisionSpec(session, clone)
spec.setSR(sx.SR.get_uuid(default_SR))
provision.setProvisionSpec(session, clone, spec)
#now 'provision' it, which causes the disks to actually be created.
print "provisioning...."
sx.VM.provision(clone)
print "provisioned"
#now find out which network to attach the new machine to
#by finding out what the pool master host is connected to.
pool_master=sx.pool.get_master(first_pool)
master_PIFs=sx.host.get_PIFs(pool_master)
primary_PIF=master_PIFs[0]
master_network=sx.PIF.get_network(primary_PIF)
#attach new VM to default SR and master network
print "Creating VIF..."
new_vif = { 'device': '0',
'network': master_network,
'VM': clone,
'MAC': "",
'MTU': "1500",
"qos_algorithm_type": "",
"qos_algorithm_params": {},
"other_config": {} }
sx.VIF.create(new_vif)
#Another Debian template specific hack. If 'noninteractive' is passed on the kernel command line,
#the new machine will boot without asking for its root and VNC passwords to be set, and just use 'xensource'.
print "Adding noninteractive to the kernel commandline"
print "This is a hack in the template to enable the root account to be created with password 'xensource'"
sx.VM.set_PV_args(clone, "noninteractive")
#Should be all set now. Fire up the machine.
print "booting..."
sx.VM.start(clone, False, True)
#log out
print "logging out"
session.logout()
|
harish1696/Midterm-Project
|
refs/heads/master
|
vendor/googletest/googletest/test/gtest_break_on_failure_unittest.py
|
2140
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
alivecor/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/_impl/keras/layers/recurrent.py
|
6
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras._impl.keras import activations
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
# pylint: disable=access-member-before-definition
def _time_distributed_dense(x,
w,
b=None,
dropout=None,
input_dim=None,
output_dim=None,
timesteps=None,
training=None):
"""Apply `y . w + b` for every temporal slice y of x.
Arguments:
x: input tensor.
w: weight matrix.
b: optional bias vector.
dropout: whether to apply dropout (same dropout mask
for every temporal slice of the input).
input_dim: integer; optional dimensionality of the input.
output_dim: integer; optional dimensionality of the output.
timesteps: integer; optional number of timesteps.
training: training phase tensor or boolean.
Returns:
Output tensor.
"""
if not input_dim:
input_dim = K.shape(x)[2]
if not timesteps:
timesteps = K.shape(x)[1]
if not output_dim:
output_dim = K.shape(w)[1]
if dropout is not None and 0. < dropout < 1.:
# apply the same dropout pattern at every timestep
ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
dropout_matrix = K.dropout(ones, dropout)
expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)
# collapse time dimension and batch dimension together
x = K.reshape(x, (-1, input_dim))
x = K.dot(x, w)
if b is not None:
x = K.bias_add(x, b)
# reshape to 3D tensor
if K.backend() == 'tensorflow':
x = K.reshape(x, K.stack([-1, timesteps, output_dim]))
x.set_shape([None, None, output_dim])
else:
x = K.reshape(x, (-1, timesteps, output_dim))
return x
class Recurrent(Layer):
"""Abstract base class for recurrent layers.
Do not use in a model -- it's not a valid layer!
Use its children classes `LSTM`, `GRU` and `SimpleRNN` instead.
All recurrent layers (`LSTM`, `GRU`, `SimpleRNN`) also
follow the specifications of this class and accept
the keyword arguments listed below.
Example:
```python
# as the first layer in a Sequential model
model = Sequential()
model.add(LSTM(32, input_shape=(10, 64)))
# now model.output_shape == (None, 32)
# note: `None` is the batch dimension.
# for subsequent layers, no need to specify the input size:
model.add(LSTM(16))
# to stack recurrent layers, you must use return_sequences=True
# on any recurrent layer that feeds into another recurrent layer.
# note that you only need to specify the input size on the first layer.
model = Sequential()
model.add(LSTM(64, input_dim=64, input_length=10, return_sequences=True))
model.add(LSTM(32, return_sequences=True))
model.add(LSTM(10))
```
Arguments:
weights: list of Numpy arrays to set as initial weights.
The list should have 3 elements, of shapes:
`[(input_dim, output_dim), (output_dim, output_dim), (output_dim,)]`.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
implementation: one of {0, 1, or 2}.
If set to 0, the RNN will use
an implementation that uses fewer, larger matrix products,
thus running faster on CPU but consuming more memory.
If set to 1, the RNN will use more matrix products,
but smaller ones, thus running slower
(may actually be faster on GPU) while consuming less memory.
If set to 2 (LSTM/GRU only),
the RNN will combine the input gate,
the forget gate and the output gate into a single matrix,
enabling more time-efficient parallelization on the GPU.
Note: RNN dropout must be shared for all gates,
resulting in a slightly reduced regularization.
input_dim: dimensionality of the input (integer).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
Input shape:s
3D tensor with shape `(batch_size, timesteps, input_dim)`,
(Optional) 2D tensors with shape `(batch_size, output_dim)`.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, units)`.
- if `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, units)`.
- else, 2D tensor with shape `(batch_size, units)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an `Embedding` layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
"""
def __init__(self,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
implementation=0,
**kwargs):
super(Recurrent, self).__init__(**kwargs)
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.implementation = implementation
self.supports_masking = True
self.input_spec = [InputSpec(ndim=3)]
self.state_spec = None
self.dropout = 0
self.recurrent_dropout = 0
def _compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.return_sequences:
output_shape = (input_shape[0], input_shape[1], self.units)
else:
output_shape = (input_shape[0], self.units)
if self.return_state:
state_shape = [tensor_shape.TensorShape(
(input_shape[0], self.units)) for _ in self.states]
return [tensor_shape.TensorShape(output_shape)] + state_shape
return tensor_shape.TensorShape(output_shape)
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
return output_mask
def step(self, inputs, states):
raise NotImplementedError
def get_constants(self, inputs, training=None):
return []
def get_initial_state(self, inputs):
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(inputs) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1,
self.units]) # (samples, output_dim)
initial_state = [initial_state for _ in range(len(self.states))]
return initial_state
def preprocess_input(self, inputs, training=None):
return inputs
def __call__(self, inputs, initial_state=None, **kwargs):
if (isinstance(inputs, (list, tuple)) and
len(inputs) > 1
and initial_state is None):
initial_state = inputs[1:]
inputs = inputs[0]
# If `initial_state` is specified,
# and if it a Keras tensor,
# then add it to the inputs and temporarily
# modify the input spec to include the state.
if initial_state is None:
return super(Recurrent, self).__call__(inputs, **kwargs)
if not isinstance(initial_state, (list, tuple)):
initial_state = [initial_state]
is_keras_tensor = hasattr(initial_state[0], '_keras_history')
for tensor in initial_state:
if hasattr(tensor, '_keras_history') != is_keras_tensor:
raise ValueError('The initial state of an RNN layer cannot be'
' specified with a mix of Keras tensors and'
' non-Keras tensors')
if is_keras_tensor:
# Compute the full input spec, including state
input_spec = self.input_spec
state_spec = self.state_spec
if not isinstance(input_spec, list):
input_spec = [input_spec]
if not isinstance(state_spec, list):
state_spec = [state_spec]
self.input_spec = input_spec + state_spec
# Compute the full inputs, including state
inputs = [inputs] + list(initial_state)
# Perform the call
output = super(Recurrent, self).__call__(inputs, **kwargs)
# Restore original input spec
self.input_spec = input_spec
return output
else:
kwargs['initial_state'] = initial_state
return super(Recurrent, self).__call__(inputs, **kwargs)
def call(self, inputs, mask=None, training=None, initial_state=None):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
initial_state = inputs[1:]
inputs = inputs[0]
elif initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if isinstance(mask, list):
mask = mask[0]
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
input_shape = K.int_shape(inputs)
if self.unroll and input_shape[1] is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
constants = self.get_constants(inputs, training=None)
preprocessed_input = self.preprocess_input(inputs, training=None)
last_output, outputs, states = K.rnn(
self.step,
preprocessed_input,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
constants=constants,
unroll=self.unroll)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append((self.states[i], states[i]))
self.add_update(updates, inputs)
# Properly set learning phase
if 0 < self.dropout + self.recurrent_dropout:
last_output._uses_learning_phase = True
outputs._uses_learning_phase = True
if not self.return_sequences:
outputs = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return [outputs] + states
return outputs
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
self.states = [K.zeros((batch_size, self.units)) for _ in self.states]
elif states is None:
for state in self.states:
K.set_value(state, np.zeros((batch_size, self.units)))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if value.shape != (batch_size, self.units):
raise ValueError('State ' + str(index) +
' is incompatible with layer ' + self.name +
': expected shape=' + str((batch_size, self.units)) +
', found shape=' + str(value.shape))
K.set_value(state, value)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'implementation': self.implementation
}
base_config = super(Recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SimpleRNN(Recurrent):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNN, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation > 0:
return inputs
else:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
return _time_distributed_dense(
inputs,
self.kernel,
self.bias,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
def step(self, inputs, states):
if self.implementation == 0:
h = inputs
else:
if 0 < self.dropout < 1:
h = K.dot(inputs * states[1], self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
prev_output = states[0]
if 0 < self.recurrent_dropout < 1:
prev_output *= states[2]
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs, ones, training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GRU(Recurrent):
"""Gated Recurrent Unit - Cho et al.
2014.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [On the Properties of Neural Machine Translation: Encoder-Decoder
Approaches](https://arxiv.org/abs/1409.1259)
- [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence
Modeling](http://arxiv.org/abs/1412.3555v1)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(GRU, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = InputSpec(shape=(None, self.units))
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_z = self.kernel[:, :self.units]
self.recurrent_kernel_z = self.recurrent_kernel[:, :self.units]
self.kernel_r = self.kernel[:, self.units:self.units * 2]
self.recurrent_kernel_r = self.recurrent_kernel[:, self.units:
self.units * 2]
self.kernel_h = self.kernel[:, self.units * 2:]
self.recurrent_kernel_h = self.recurrent_kernel[:, self.units * 2:]
if self.use_bias:
self.bias_z = self.bias[:self.units]
self.bias_r = self.bias[self.units:self.units * 2]
self.bias_h = self.bias[self.units * 2:]
else:
self.bias_z = None
self.bias_r = None
self.bias_h = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
x_z = _time_distributed_dense(
inputs,
self.kernel_z,
self.bias_z,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_r = _time_distributed_dense(
inputs,
self.kernel_r,
self.bias_r,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_h = _time_distributed_dense(
inputs,
self.kernel_h,
self.bias_h,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
return K.concatenate([x_z, x_r, x_h], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(3)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def step(self, inputs, states):
h_tm1 = states[0] # previous memory
dp_mask = states[1] # dropout matrices for recurrent units
rec_dp_mask = states[2]
if self.implementation == 2:
matrix_x = K.dot(inputs * dp_mask[0], self.kernel)
if self.use_bias:
matrix_x = K.bias_add(matrix_x, self.bias)
matrix_inner = K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, :2 * self.units])
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units:2 * self.units]
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
x_h = matrix_x[:, 2 * self.units:]
recurrent_h = K.dot(r * h_tm1 * rec_dp_mask[0],
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
else:
if self.implementation == 0:
x_z = inputs[:, :self.units]
x_r = inputs[:, self.units:2 * self.units]
x_h = inputs[:, 2 * self.units:]
elif self.implementation == 1:
x_z = K.dot(inputs * dp_mask[0], self.kernel_z)
x_r = K.dot(inputs * dp_mask[1], self.kernel_r)
x_h = K.dot(inputs * dp_mask[2], self.kernel_h)
if self.use_bias:
x_z = K.bias_add(x_z, self.bias_z)
x_r = K.bias_add(x_r, self.bias_r)
x_h = K.bias_add(x_h, self.bias_h)
else:
raise ValueError('Unknown `implementation` mode.')
z = self.recurrent_activation(x_z + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_z))
r = self.recurrent_activation(x_r + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_r))
hh = self.activation(x_h + K.dot(r * h_tm1 * rec_dp_mask[2],
self.recurrent_kernel_h))
h = z * h_tm1 + (1 - z) * hh
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(GRU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LSTM(Recurrent):
"""Long-Short Term Memory unit - Hochreiter 1997.
For a step-by-step description of the algorithm, see
[this tutorial](http://deeplearning.net/tutorial/lstm.html).
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
References:
- [Long short-term
memory]((http://www.bioinf.jku.at/publications/older/2604.pdf)
(original 1997 paper)
- [Supervised sequence labeling with recurrent neural
networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(LSTM, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [
InputSpec(shape=(None, self.units)),
InputSpec(shape=(None, self.units))
]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
batch_size = input_shape[0] if self.stateful else None
self.input_dim = input_shape[2]
self.input_spec[0] = InputSpec(shape=(batch_size, None, self.input_dim))
self.states = [None, None]
if self.stateful:
self.reset_states()
self.kernel = self.add_weight(
shape=(self.input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.kernel_i = self.kernel[:, :self.units]
self.kernel_f = self.kernel[:, self.units:self.units * 2]
self.kernel_c = self.kernel[:, self.units * 2:self.units * 3]
self.kernel_o = self.kernel[:, self.units * 3:]
self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
self.recurrent_kernel_f = self.recurrent_kernel[:, self.units:
self.units * 2]
self.recurrent_kernel_c = self.recurrent_kernel[:, self.units * 2:
self.units * 3]
self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.units]
self.bias_f = self.bias[self.units:self.units * 2]
self.bias_c = self.bias[self.units * 2:self.units * 3]
self.bias_o = self.bias[self.units * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = inputs.get_shape().as_list()
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = _time_distributed_dense(
inputs,
self.kernel_i,
self.bias_i,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_f = _time_distributed_dense(
inputs,
self.kernel_f,
self.bias_f,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_c = _time_distributed_dense(
inputs,
self.kernel_c,
self.bias_c,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
x_o = _time_distributed_dense(
inputs,
self.kernel_o,
self.bias_o,
self.dropout,
input_dim,
self.units,
timesteps,
training=training)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return inputs
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, inputs, states):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
if self.implementation == 2:
z = K.dot(inputs * dp_mask[0], self.kernel)
z += K.dot(h_tm1 * rec_dp_mask[0], self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units:2 * self.units]
z2 = z[:, 2 * self.units:3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
else:
if self.implementation == 0:
x_i = inputs[:, :self.units]
x_f = inputs[:, self.units:2 * self.units]
x_c = inputs[:, 2 * self.units:3 * self.units]
x_o = inputs[:, 3 * self.units:]
elif self.implementation == 1:
x_i = K.dot(inputs * dp_mask[0], self.kernel_i) + self.bias_i
x_f = K.dot(inputs * dp_mask[1], self.kernel_f) + self.bias_f
x_c = K.dot(inputs * dp_mask[2], self.kernel_c) + self.bias_c
x_o = K.dot(inputs * dp_mask[3], self.kernel_o) + self.bias_o
else:
raise ValueError('Unknown `implementation` mode.')
i = self.recurrent_activation(x_i + K.dot(h_tm1 * rec_dp_mask[0],
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1 * rec_dp_mask[1],
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(
x_c + K.dot(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1 * rec_dp_mask[3],
self.recurrent_kernel_o))
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
h._uses_learning_phase = True
return h, [h, c]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout
}
base_config = super(LSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
gundalow/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts/system/lsb.py
|
95
|
# Collect facts related to LSB (Linux Standard Base)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
class LSBFactCollector(BaseFactCollector):
name = 'lsb'
_fact_ids = set()
STRIP_QUOTES = r'\'\"\\'
def _lsb_release_bin(self, lsb_path, module):
lsb_facts = {}
if not lsb_path:
return lsb_facts
rc, out, err = module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
if rc != 0:
return lsb_facts
for line in out.splitlines():
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
lsb_facts['release'] = value
elif 'Distributor ID:' in line:
lsb_facts['id'] = value
elif 'Description:' in line:
lsb_facts['description'] = value
elif 'Release:' in line:
lsb_facts['release'] = value
elif 'Codename:' in line:
lsb_facts['codename'] = value
return lsb_facts
def _lsb_release_file(self, etc_lsb_release_location):
lsb_facts = {}
if not os.path.exists(etc_lsb_release_location):
return lsb_facts
for line in get_file_lines(etc_lsb_release_location):
value = line.split('=', 1)[1].strip()
if 'DISTRIB_ID' in line:
lsb_facts['id'] = value
elif 'DISTRIB_RELEASE' in line:
lsb_facts['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
lsb_facts['description'] = value
elif 'DISTRIB_CODENAME' in line:
lsb_facts['codename'] = value
return lsb_facts
def collect(self, module=None, collected_facts=None):
facts_dict = {}
lsb_facts = {}
if not module:
return facts_dict
lsb_path = module.get_bin_path('lsb_release')
# try the 'lsb_release' script first
if lsb_path:
lsb_facts = self._lsb_release_bin(lsb_path,
module=module)
# no lsb_release, try looking in /etc/lsb-release
if not lsb_facts:
lsb_facts = self._lsb_release_file('/etc/lsb-release')
if lsb_facts and 'release' in lsb_facts:
lsb_facts['major_release'] = lsb_facts['release'].split('.')[0]
for k, v in lsb_facts.items():
if v:
lsb_facts[k] = v.strip(LSBFactCollector.STRIP_QUOTES)
facts_dict['lsb'] = lsb_facts
return facts_dict
|
chevah/python-cffi
|
refs/heads/master
|
demo/readdir_build.py
|
12
|
import sys
from cffi import FFI
if not sys.platform.startswith('linux'):
raise Exception("Linux-only demo")
ffi = FFI()
ffi.cdef("""
typedef void DIR;
typedef long ino_t;
typedef long off_t;
struct dirent {
ino_t d_ino; /* inode number */
off_t d_off; /* offset to the next dirent */
unsigned short d_reclen; /* length of this record */
unsigned char d_type; /* type of file; not supported
by all file system types */
char d_name[256]; /* filename */
};
int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result);
int openat(int dirfd, const char *pathname, int flags);
DIR *fdopendir(int fd);
int closedir(DIR *dirp);
""")
ffi.set_source("_readdir", None)
if __name__ == '__main__':
ffi.compile()
|
eul721/The-Perfect-Pokemon-Team-Balancer
|
refs/heads/master
|
libs/env/Lib/encodings/unicode_escape.py
|
852
|
""" Python 'unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_escape_encode
decode = codecs.unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
runtimejs/runtime
|
refs/heads/master
|
deps/v8/tools/testrunner/local/statusfile.py
|
2
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
PASS = "PASS"
OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FAST_VARIANTS = "FAST_VARIANTS"
NO_IGNITION = "NO_IGNITION"
NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
FAIL_SLOPPY = "FAIL_SLOPPY"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
FAST_VARIANTS, NO_IGNITION, NO_VARIANTS, PASS_OR_FAIL, FAIL_SLOPPY,
ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
PASS_OR_FAIL: [PASS, FAIL]}
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
"mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
"s390", "s390x", "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
def DoSkip(outcomes):
return SKIP in outcomes
def IsSlow(outcomes):
return SLOW in outcomes
def NoIgnitionVariant(outcomes):
return NO_IGNITION in outcomes
def OnlyStandardVariant(outcomes):
return NO_VARIANTS in outcomes
def OnlyFastVariants(outcomes):
return FAST_VARIANTS in outcomes
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
def IsFailOk(outcomes):
return (FAIL in outcomes) and (OKAY in outcomes)
def _AddOutcome(result, new):
global DEFS
if new in DEFS:
mapped = DEFS[new]
if type(mapped) == list:
for m in mapped:
_AddOutcome(result, m)
elif type(mapped) == str:
_AddOutcome(result, mapped)
else:
result.add(new)
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
_AddOutcome(result, item)
elif type(item) == list:
if not eval(item[0], variables): continue
for outcome in item[1:]:
assert type(outcome) == str
_AddOutcome(result, outcome)
else:
assert False
if len(result) == 0: return
if rule in target_dict:
target_dict[rule] |= result
else:
target_dict[rule] = result
def ReadContent(path):
with open(path) as f:
global KEYWORDS
return eval(f.read(), KEYWORDS)
def ReadStatusFile(path, variables):
contents = ReadContent(path)
rules = {}
wildcards = {}
variables.update(VARIABLES)
for section in contents:
assert type(section) == list
assert len(section) == 2
if not eval(section[0], variables): continue
section = section[1]
assert type(section) == dict
for rule in section:
assert type(rule) == str
if rule[-1] == '*':
_ParseOutcomeList(rule, section[rule], wildcards, variables)
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
def PresubmitCheck(path):
contents = ReadContent(path)
root_prefix = os.path.basename(os.path.dirname(path)) + "/"
status = {"success": True}
def _assert(check, message): # Like "assert", but doesn't throw.
if not check:
print("%s: Error: %s" % (path, message))
status["success"] = False
try:
for section in contents:
_assert(type(section) == list, "Section must be a list")
_assert(len(section) == 2, "Section list must have exactly 2 entries")
section = section[1]
_assert(type(section) == dict,
"Second entry of section must be a dictionary")
for rule in section:
_assert(type(rule) == str, "Rule key must be a string")
_assert(not rule.startswith(root_prefix),
"Suite name prefix must not be used in rule keys")
_assert(not rule.endswith('.js'),
".js extension must not be used in rule keys.")
return status["success"]
except Exception as e:
print e
return False
|
yxping/leetcode
|
refs/heads/master
|
solutions/102.Binary_Tree_Level_Order_Traversal/AC_dfs_n.py
|
7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_dfs_n.py
# Create Date: 2015-03-09 09:30:11
# Usage: AC_dfs_n.py
# Descripton:
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def levelOrder(self, root):
ret = []
def dfs(root, level):
if root:
if level >= len(ret):
ret.append([])
ret[level].append(root.val)
dfs(root.left, level + 1)
dfs(root.right, level + 1)
dfs(root, 0)
return ret
|
40223119/2015cd_0505
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/copy.py
|
628
|
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
import builtins
class Error(Exception):
pass
error = Error # backward compatibility
# module org.python.core does not exist in Brython, so lets just ignore
# this import request.
#try:
# from org.python.core import PyStringMap
#except ImportError:
# PyStringMap = None
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
copier = getattr(cls, "__copy__", None)
if copier:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
return _reconstruct(x, rv, 0)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, str, tuple,
frozenset, type, range,
types.BuiltinFunctionType, type(Ellipsis),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
for name in ("complex", "unicode"):
t = getattr(builtins, name, None)
if t is not None:
d[t] = _copy_immutable
def _copy_with_constructor(x):
return type(x)(x)
for t in (list, dict, set):
d[t] = _copy_with_constructor
def _copy_with_copy_method(x):
return x.copy()
if PyStringMap is not None:
d[PyStringMap] = _copy_with_copy_method
del d
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
try:
d[complex] = _deepcopy_atomic
except NameError:
pass
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
try:
d[types.CodeType] = _deepcopy_atomic
except AttributeError:
pass
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
def _deepcopy_list(x, memo):
y = []
memo[id(x)] = y
for a in x:
y.append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo):
y = []
for a in x:
y.append(deepcopy(a, memo))
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for i in range(len(x)):
if x[i] is not y[i]:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
_deepcopy_dispatch[types.MethodType] = _deepcopy_method
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, info, deep, memo=None):
if isinstance(info, str):
return x
assert isinstance(info, tuple)
if memo is None:
memo = {}
n = len(info)
assert n in (2, 3, 4, 5)
callable, args = info[:2]
if n > 2:
state = info[2]
else:
state = {}
if n > 3:
listiter = info[3]
else:
listiter = None
if n > 4:
dictiter = info[4]
else:
dictiter = None
if deep:
args = deepcopy(args, memo)
y = callable(*args)
memo[id(x)] = y
if state:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
for item in listiter:
if deep:
item = deepcopy(item, memo)
y.append(item)
if dictiter is not None:
for key, value in dictiter:
if deep:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
return y
del d
del types
# Helper for instance creation without calling __init__
class _EmptyClass:
pass
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-servicefabric/azure/servicefabric/models/property_batch_operation.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PropertyBatchOperation(Model):
"""Represents the base type for property operations that can be put into a
batch and submitted.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CheckExistsPropertyBatchOperation,
CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation,
DeletePropertyBatchOperation, GetPropertyBatchOperation,
PutPropertyBatchOperation
:param property_name: The name of the Service Fabric property.
:type property_name: str
:param kind: Constant filled by server.
:type kind: str
"""
_validation = {
'property_name': {'required': True},
'kind': {'required': True},
}
_attribute_map = {
'property_name': {'key': 'PropertyName', 'type': 'str'},
'kind': {'key': 'Kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'}
}
def __init__(self, property_name):
super(PropertyBatchOperation, self).__init__()
self.property_name = property_name
self.kind = None
|
empeeu/numpy
|
refs/heads/master
|
numpy/polynomial/legendre.py
|
75
|
"""
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
arguement `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = legvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
|
crossin/yuan-xin
|
refs/heads/master
|
settings.py
|
11
|
# -*- coding: utf-8 -*-
# Django settings for the example project.
import os
DEBUG = True
TEMPLATE_DEBUG = False
##LANGUAGE_CODE = 'zh-CN'
##LANGUAGE_CODE = 'fr'
LOCALE_PATHS = 'locale'
USE_I18N = True
TEMPLATE_LOADERS=('django.template.loaders.filesystem.load_template_source',
'ziploader.zip_loader.load_template_source')
|
barma1309/Kalista
|
refs/heads/master
|
.virtualenvs/Kalista/lib/python3.4/site-packages/django/contrib/gis/measure.py
|
344
|
# Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (http://exogen.case.edu/projects/geopy/)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
__all__ = ['A', 'Area', 'D', 'Distance']
from decimal import Decimal
from functools import total_ordering
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase(object):
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)
if default_unit and isinstance(default_unit, six.string_types):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit,
getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)})
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)})
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __idiv__(self, other): # Python 2 compatibility
return type(self).__itruediv__(self, other)
def __bool__(self):
return bool(self.standard)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in six.iteritems(kwargs):
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)})
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
# Shortcuts
D = Distance
A = Area
|
RoboJackets/robocup-software
|
refs/heads/staging
|
soccer/gameplay/plays/testing/test_one_touch_pass.py
|
2
|
import play
import behavior
import tactics.one_touch_pass
## Continually runs a one_touch_pass pass tactic
class TestOneTouchPass(play.Play):
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
pass_bhvr = tactics.one_touch_pass.OneTouchPass()
self.add_subbehavior(pass_bhvr, 'pass')
def execute_running(self):
pass_bhvr = self.subbehavior_with_name('pass')
if pass_bhvr.is_done_running():
pass_bhvr.restart()
|
nikhilgarg459/bcs
|
refs/heads/master
|
bcs_server/config.py
|
2
|
__doc__ = """
This module all the config parameters for bcs serevr
"""
# Storage config
DB_FILE = 'bank.db'
# Server config
TCP_IP = '127.0.0.1'
TCP_PORT = 5010
MAX_CONNECTIONS = 2
MESSAGE_LENGTH = 1024
# Superuser account
ADMIN_NAME = 'admin'
ADMIN_EMAIL = 'admin@bcs.com'
ADMIN_PASSWORD = 'admin'
# Log config
LOG_FILENAME = 'logs/bcs_server.log'
MAX_SIZE_IN_KB = 20
NUMBER_OF_FILES = 5
|
jacquesd/indico
|
refs/heads/master
|
indico/util/fossilize/__init__.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
"""
``fossilize`` allows us to "serialize" complex python objects into dictionaries
and lists. Such operation is very useful for generating JSON data structures
from business objects. It works as a wrapper around ``zope.interface``.
Some of the features are:
* Different "fossil" types for the same source class;
* Built-in inheritance support;
"""
import logging
import inspect
import re
import threading
import zope.interface
from types import NoneType
from itertools import ifilter
_fossil_cache = threading.local()
def fossilizes(*classList):
"""
Simple wrapper around 'implements'
"""
zope.interface.declarations._implements("fossilizes",
classList,
zope.interface.classImplements)
def addFossil(klazz, fossils):
"""
Declares fossils for a class
:param klazz: a class object
:type klass: class object
:param fossils: a fossil class (or a list of fossil classes)
"""
if not type(fossils) is list:
fossils = [fossils]
for fossil in fossils:
zope.interface.classImplements(klazz, fossil)
def clearCache():
"""
Shortcut for Fossilizable.clearCache()
"""
Fossilizable.clearCache()
class NonFossilizableException(Exception):
"""
Object is not fossilizable (doesn't implement Fossilizable)
"""
class InvalidFossilException(Exception):
"""
The fossil name doesn't follow the convention I(\w+)Fossil
or has an invalid method name and did not declare a .name tag for it
"""
class IFossil(zope.interface.Interface):
"""
Fossil base interface. All fossil classes should derive from this one.
"""
class Fossilizable(object):
"""
Base class for all the objects that can be fossilized
"""
__fossilNameRE = re.compile('^I(\w+)Fossil$')
__methodNameRE = re.compile('^get(\w+)|(has\w+)|(is\w+)$')
@classmethod
def __extractName(cls, name):
"""
'De-camelcase' the name
"""
if name in _fossil_cache.methodName:
return _fossil_cache.methodName[name]
else:
nmatch = cls.__methodNameRE.match(name)
if not nmatch:
raise InvalidFossilException("method name '%s' is not valid! "
"has to start by 'get', 'has', 'is' "
"or use 'name' tag" % name)
else:
group = nmatch.group(1) or nmatch.group(2) or nmatch.group(3)
extractedName = group[0:1].lower() + group[1:]
_fossil_cache.methodName[name] = extractedName
return extractedName
@classmethod
def __extractFossilName(cls, name):
"""
Extracts the fossil name from a I(.*)Fossil
class name.
IMyObjectBasicFossil -> myObjectBasic
"""
if name in _fossil_cache.fossilName:
fossilName = _fossil_cache.fossilName[name]
else:
fossilNameMatch = Fossilizable.__fossilNameRE.match(name)
if fossilNameMatch is None:
raise InvalidFossilException("Invalid fossil name: %s."
" A fossil name should follow the"
" pattern: I(\w+)Fossil." % name)
else:
fossilName = fossilNameMatch.group(1)[0].lower() + \
fossilNameMatch.group(1)[1:]
_fossil_cache.fossilName[name] = fossilName
return fossilName
@classmethod
def __obtainInterface(cls, obj, interfaceArg):
"""
Obtains the appropriate interface for this object.
:param interfaceArg: the target fossile type
:type interfaceArg: IFossil, NoneType, or dict
* If IFossil, we will use it.
* If None, we will take the default fossil
(the first one of this class's 'fossilizes' list)
* If a dict, we will use the objects class, class name, or full class name
as key.
Also verifies that the interface obtained through these 3 methods is
effectively provided by the object.
"""
if interfaceArg is None:
# we try to take the 1st interface declared with fossilizes
implementedInterfaces = list(
i for i in zope.interface.implementedBy(obj.__class__) \
if i.extends(IFossil) )
if not implementedInterfaces:
raise NonFossilizableException(
"Object %s of class %s cannot be fossilized,"
"no fossils were declared for it" %
(str(obj), obj.__class__.__name__))
else:
interface = implementedInterfaces[0]
elif type(interfaceArg) is dict:
className = obj.__class__.__module__ + '.' + \
obj.__class__.__name__
# interfaceArg is a dictionary of class:Fossil pairs
if className in interfaceArg:
interface = interfaceArg[className]
elif obj.__class__ in interfaceArg:
interface = interfaceArg[obj.__class__]
else:
raise NonFossilizableException(
"Object %s of class %s cannot be fossilized; "
"its class was not a key in the provided fossils dictionary" %
(str(obj), obj.__class__.__name__))
else:
interface = interfaceArg
return interface
@classmethod
def clearCache(cls):
"""
Clears the fossil attribute cache
"""
_fossil_cache.methodName = {}
_fossil_cache.fossilName = {}
_fossil_cache.fossilInterface = {}
_fossil_cache.fossilAttrs = {} # Attribute Cache for Fossils with
# fields that are repeated
@classmethod
def fossilizeIterable(cls, target, interface, useAttrCache=False, filterBy=None, **kwargs):
"""
Fossilizes an object, be it a 'direct' fossilizable
object, or an iterable (dict, list, set);
"""
if isinstance(target, Fossilizable):
return target.fossilize(interface, useAttrCache, **kwargs)
else:
ttype = type(target)
if ttype in [int, str, float, bool, NoneType]:
return target
elif ttype is dict:
container = {}
for key, value in target.iteritems():
container[key] = fossilize(value, interface, useAttrCache,
**kwargs)
return container
elif hasattr(target, '__iter__'):
if filterBy:
iterator = ifilter(filterBy, target)
else:
iterator = iter(target)
# we turn sets and tuples into lists since JSON does not
# have sets / tuples
return list(fossilize(elem,
interface,
useAttrCache, **kwargs) for elem in iterator)
# If the object is a wrapper for an iterable, by default we fossilize
# the iterable the object is wrapping. This behaviour is included in
# order to let objects like legacy PersistentLists to be fossilized
elif hasattr(target, '__dict__') and len(target.__dict__) == 1 and \
hasattr(target.__dict__.values()[0], '__iter__'):
return list(fossilize(elem,
interface,
useAttrCache,
**kwargs) for elem in target.__dict__.values()[0])
elif cls.__obtainInterface(target, interface):
return cls.fossilize_obj(target, interface, useAttrCache, **kwargs)
else:
raise NonFossilizableException("Type %s is not fossilizable!" %
ttype)
return fossilize(target, interface, useAttrCache, **kwargs)
def fossilize(self, interfaceArg=None, useAttrCache=False, **kwargs):
return self.fossilize_obj(self, interfaceArg=interfaceArg, useAttrCache=useAttrCache,
**kwargs)
@classmethod
def fossilize_obj(cls, obj, interfaceArg=None, useAttrCache=False, mapClassType=None, **kwargs):
"""
Fossilizes the object, using the fossil provided by `interface`.
:param interfaceArg: the target fossile type
:type interfaceArg: IFossil, NoneType, or dict
:param useAttrCache: use caching of attributes if same fields are
repeated for a fossil
:type useAttrCache: boolean
"""
mapClassType = dict(mapClassType or {}, AvatarUserWrapper='Avatar', AvatarProvisionalWrapper='Avatar',
EmailPrincipal='Email')
interface = cls.__obtainInterface(obj, interfaceArg)
name = interface.getName()
fossilName = cls.__extractFossilName(name)
result = {}
# cache method names for each interface
names = _fossil_cache.fossilInterface.get(interface)
if names is None:
names = interface.names(all=True)
_fossil_cache.fossilInterface[interface] = names
###
for methodName in names:
method = interface[methodName]
tags = method.getTaggedValueTags()
isAttribute = False
if 'onlyIf' in tags:
onlyIf = method.getTaggedValue('onlyIf')
# If the condition not in the kwargs or the condition False, we do not fossilize the method
if not kwargs.get(onlyIf, False):
continue
# In some cases it is better to use the attribute cache to
# speed up the fossilization
cacheUsed = False
if useAttrCache:
try:
methodResult = _fossil_cache.fossilAttrs[obj._p_oid][methodName]
cacheUsed = True
except KeyError:
pass
if not cacheUsed:
# Please use 'produce' as little as possible;
# there is almost always a more elegant and modular solution!
if 'produce' in tags:
methodResult = method.getTaggedValue('produce')(obj)
else:
attr = getattr(obj, methodName)
if callable(attr):
try:
methodResult = attr()
except:
logging.getLogger('indico.fossilize').error("Problem fossilizing '%r' with '%s'" %
(obj, interfaceArg))
raise
else:
methodResult = attr
isAttribute = True
if hasattr(obj, "_p_oid"):
_fossil_cache.fossilAttrs.setdefault(obj._p_oid, {})[methodName] = methodResult
if 'filterBy' in tags:
if 'filters' not in kwargs:
raise Exception('No filters defined!')
filterName = method.getTaggedValue('filterBy')
if filterName in kwargs['filters']:
filterBy = kwargs['filters'][filterName]
else:
raise Exception("No filter '%s' defined!" % filterName)
else:
filterBy = None
# Result conversion
if 'result' in tags:
targetInterface = method.getTaggedValue('result')
#targetInterface = globals()[targetInterfaceName]
methodResult = Fossilizable.fossilizeIterable(
methodResult, targetInterface, filterBy=filterBy, mapClassType=mapClassType, **kwargs)
# Conversion function
if 'convert' in tags:
convertFunction = method.getTaggedValue('convert')
converterArgNames = inspect.getargspec(convertFunction)[0]
converterArgs = dict((name, kwargs[name])
for name in converterArgNames
if name in kwargs)
if '_obj' in converterArgNames:
converterArgs['_obj'] = obj
try:
methodResult = convertFunction(methodResult, **converterArgs)
except:
logging.getLogger('indico.fossilize').error("Problem fossilizing '%r' with '%s' (%s)" %
(obj, interfaceArg, methodName))
raise
# Re-name the attribute produced by the method
if 'name' in tags:
attrName = method.getTaggedValue('name')
elif isAttribute:
attrName = methodName
else:
attrName = cls.__extractName(methodName)
# In case the name contains dots, each of the 'domains' but the
# last one are translated into nested dictionnaries. For example,
# if we want to re-name an attribute into "foo.bar.tofu", the
# corresponding fossilized attribute will be of the form:
# {"foo":{"bar":{"tofu": res,...},...},...}
# instead of:
# {"foo.bar.tofu": res, ...}
current = result
attrList = attrName.split('.')
while len(attrList) > 1:
attr = attrList.pop(0)
if attr not in current:
current[attr] = {}
current = current[attr]
# For the last attribute level
current[attrList[0]] = methodResult
if "_type" in result or "_fossil" in result:
raise InvalidFossilException('"_type" or "_fossil"'
' cannot be a fossil attribute name')
else:
result["_type"] = mapClassType.get(obj.__class__.__name__, obj.__class__.__name__)
if fossilName: #we check that it's not an empty string
result["_fossil"] = fossilName
else:
result["_fossil"] = ""
return result
def fossilize(target, interfaceArg=None, useAttrCache=False, **kwargs):
"""
Method that allows the "fossilization" process to
be called on data structures (lists, dictionaries
and sets) as well as normal `Fossilizable` objects.
:param target: target object to be fossilized
:type target: Fossilizable
:param interfaceArg: target fossil type
:type interfaceArg: IFossil, NoneType, or dict
:param useAttrCache: use the attribute caching
:type useAttrCache: boolean
"""
return Fossilizable.fossilizeIterable(target, interfaceArg, useAttrCache,
**kwargs)
|
bmcfee/gordon
|
refs/heads/master
|
examples/playdar_resolver/playdar.py
|
1
|
#!/usr/bin/python
# Copyright (C) 2010 Michael Mandel
#
# This file is part of Gordon.
#
# Gordon is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gordon is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gordon. If not, see <http://www.gnu.org/licenses/>.
"""Basic script to search playdar for a track and get the url."""
import sys
import simplejson as json
import urllib
import time
base_url = 'http://localhost:60210'
wait_time = 2
def main():
artist, track = sys.argv[1:3]
# '/api/?method=resolve&artist=weezer&track=buddy%20holly'
res = get_json("%s/api" % base_url,
{"artist":artist, "track":track, "method":"resolve"})
qid = res["qid"]
# Wait a little for results
time.sleep(wait_time)
# '/api/?method=get_results&qid=386C50C9-9F9E-4633-B9AE-33EB41E31312'
res = get_json("%s/api" % base_url, {"method":"get_results","qid":qid})
if len(res["results"]) > 0:
print res["results"][0]
sid = res["results"][0]["sid"]
# '/sid/E042363C-075C-43AA-8BC2-CC3C18CA0008'
print "%s/sid/%s" % (base_url, sid)
def get_json(url, params):
"""Make a request at a url with a dictionary of parameters.
Return a dictionary parsed from a json response.
"""
p_enc = urllib.urlencode(params)
full_url = '%s/?%s' % (url, p_enc)
print full_url
f = urllib.urlopen(full_url)
return json.loads(f.read())
if __name__=='__main__':
main()
|
qilicun/python
|
refs/heads/master
|
python3/matplotlib/spines_demo_bounds.py
|
7
|
"""
Demo of spines using custom bounds to limit the extent of the spine.
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2*np.pi, 50)
y = np.sin(x)
y2 = y + 0.1 * np.random.normal(size=x.shape)
fig, ax = plt.subplots()
ax.plot(x, y, 'k--')
ax.plot(x, y2, 'ro')
# set ticks and tick labels
ax.set_xlim((0, 2*np.pi))
ax.set_xticks([0, np.pi, 2*np.pi])
ax.set_xticklabels(['0', '$\pi$','2$\pi$'])
ax.set_ylim((-1.5, 1.5))
ax.set_yticks([-1, 0, 1])
# Only draw spine between the y-ticks
ax.spines['left'].set_bounds(-1, 1)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.show()
|
sidartaoliveira/ansible
|
refs/heads/devel
|
lib/ansible/compat/__init__.py
|
241
|
# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat library for ansible. This contains compatibility definitions for older python
When we need to import a module differently depending on python version, do it
here. Then in the code we can simply import from compat in order to get what we want.
'''
|
gleon99/rpyc
|
refs/heads/logs
|
rpyc/lib/colls.py
|
1
|
from __future__ import with_statement
import weakref
from threading import Lock
import logging
class WeakValueDict(object):
"""a light-weight version of weakref.WeakValueDictionary"""
__slots__ = ("_dict",)
def __init__(self):
self._dict = {}
def __repr__(self):
return repr(self._dict)
def __iter__(self):
return self.iterkeys()
def __len__(self):
return len(self._dict)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def get(self, key, default = None):
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
obj = self._dict[key]()
if obj is None:
raise KeyError(key)
return obj
def __setitem__(self, key, value):
def remover(wr, _dict = self._dict, key = key):
_dict.pop(key, None)
self._dict[key] = weakref.ref(value, remover)
def __delitem__(self, key):
del self._dict[key]
def iterkeys(self):
return self._dict.keys()
def keys(self):
return self._dict.keys()
def itervalues(self):
for k in self:
yield self[k]
def values(self):
return list(self.itervalues())
def iteritems(self):
for k in self:
yield k, self[k]
def items(self):
return list(self.iteritems())
def clear(self):
self._dict.clear()
class RefCountingColl(object):
"""a set-like object that implements refcounting on its contained objects"""
__slots__ = ("_lock", "_dict")
def __init__(self):
self._lock = Lock()
self._dict = {}
def __repr__(self):
logging.info("repr({0})".format(self._dict))
return repr(self._dict)
def add(self, obj):
with self._lock:
logging.info("add({0}),id={1}".format(obj, id(obj)))
key = id(obj)
slot = self._dict.get(key, None)
if slot is None:
slot = [obj, 0]
else:
slot[1] += 1
self._dict[key] = slot
logging.info("slot[1]={0}".format(slot[1]))
def clear(self):
with self._lock:
logging.info("clr({0})".format(self._dict))
self._dict.clear()
def decref(self, key):
with self._lock:
logging.info("dec({0})".format(key))
slot = self._dict[key]
if slot[1] < 1:
logging.info("del {0}".format(slot[0]))
del self._dict[key]
else:
slot[1] -= 1
self._dict[key] = slot
logging.info("slot={0}".format(slot))
def __getitem__(self, key):
with self._lock:
logging.info("get({0})".format(key))
slot = self._dict.get(key, None)
logging.info("slot={0}".format(slot))
return slot[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.